prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import gpxpy
import pandas as pd
def path_to_gpx(path_to_tcx):
return path_to_tcx.split('.')[0] + '.gpx'
def get_workout_info(path_to_tcx):
"""Get name and type of a workout from its gpx file."""
path = path_to_gpx(path_to_tcx)
with open(path) as f:
gpx = gpxpy.parse(f)
# assert len(gpx.tracks) == 1, "This gpx file has 1+ tracks!!"
workout = gpx.tracks[0]
n, t = workout.name, workout.type
d = workout.description
return n, t, d
# nms_base = '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}'
# def get_workout_type(filepath):
# with open(filepath) as f:
# tcx = ET.parse(f)
# root = tcx.getroot()
# nsp = nms_base + 'Activities'
# out = root.find(nsp)[0].get('Sport')
# return out
def get_info_from_files(pseries):
"""Return a Pandas dataframe with name and type of each workout."""
dic = {'name': [], 'type': [], 'desc': []}
for _ in pseries:
n, t, d = get_workout_info(_)
dic['name'].append(n)
dic['type'].append(t)
dic['desc'].append(d)
return | pd.DataFrame(dic) | pandas.DataFrame |
# Copyright (c) 2021. <NAME>. All rights Reserved.
import numpy
import numpy as np
import pandas as pd
from bm.datamanipulation.AdjustDataFrame import remove_null_values
class DocumentProcessor:
custom_dtypes = []
model_types = []
def __init__(self):
self.custom_dtypes = ['int64', 'float64', 'datetime', 'string']
self.model_types = ['Prediction', 'Time Series Forecasting']
def document_analyzer(self, csv_file_location):
# Read the file
df = pd.read_csv(csv_file_location)
if (not df.empty) or (len(df.columns) < 2):
total_rows = len(df.index)
# list of columns data types
columns_list = df.columns
data_types = df.dtypes
extracted_data_types = []
datetime_columns = []
numric_columns = []
for col in df.columns:
if df[col].dtype == 'object':
try:
df[col] = pd.to_datetime(df[col])
datetime_columns.append(col)
extracted_data_types.append('datetime')
except ValueError:
extracted_data_types.append('string')
pass
elif (df[col].dtype == 'float64' or df[col].dtype == 'int64'):
numric_columns.append(col)
extracted_data_types.append(df[col].dtype.name)
else:
extracted_data_types.append('string')
# Check if there is any empty columns
df = df.replace(' ', np.nan)
nan_cols = []
for col in df.columns:
x = | pd.isna(df[col]) | pandas.isna |
"""Tests for misc module."""
import mock
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from numpy.testing import assert_almost_equal
import pytest
import causalimpact
standardize = causalimpact.misc.standardize_all_variables
unstandardize = causalimpact.misc.unstandardize
df_print = causalimpact.misc.df_print
def test_basic_standardize():
pre_period = [0, 2]
post_period = [3, 4]
data = {
'c1': [1, 4, 8, 9, 10],
'c2': [4, 8, 12, 16, 20]
}
data = pd.DataFrame(data)
result = standardize(data, pre_period, post_period)
assert_almost_equal(
np.zeros((2)),
np.mean(result['data_pre'].values, axis=0)
)
assert_almost_equal(
np.ones((2)),
np.std(result['data_pre'].values, axis=0)
)
assert len(result['data_pre']) == pre_period[-1] + 1
def test_standardize_returns_expected_types():
pre_period = [0, 4]
post_period = [5, 5]
data = [-1, 0.1, 1, 2, np.nan, 3]
data = | pd.DataFrame(data) | pandas.DataFrame |
from __future__ import absolute_import, print_function
from builtins import object, str
import copy, numpy, pandas, pyarrow as pa, sys, uuid
from .pygraphistry import PyGraphistry
from .pygraphistry import util
from .pygraphistry import bolt_util
from .nodexlistry import NodeXLGraphistry
from .tigeristry import Tigeristry
from .arrow_uploader import ArrowUploader
maybe_cudf = None
try:
import cudf
maybe_cudf = cudf
except ImportError:
1
class Plotter(object):
"""Graph plotting class.
Created using ``Graphistry.bind()``.
Chained calls successively add data and visual encodings, and end with a plot call.
To streamline reuse and replayable notebooks, Plotter manipulations are immutable. Each chained call returns a new instance that derives from the previous one. The old plotter or the new one can then be used to create different graphs.
The class supports convenience methods for mixing calls across Pandas, NetworkX, and IGraph.
"""
_defaultNodeId = '__nodeid__'
def __init__(self):
# Bindings
self._edges = None
self._nodes = None
self._source = None
self._destination = None
self._node = None
self._edge_title = None
self._edge_label = None
self._edge_color = None
self._edge_source_color = None
self._edge_destination_color = None
self._edge_size = None
self._edge_weight = None
self._edge_icon = None
self._edge_opacity = None
self._point_title = None
self._point_label = None
self._point_color = None
self._point_size = None
self._point_weight = None
self._point_icon = None
self._point_opacity = None
self._point_x = None
self._point_y = None
# Settings
self._height = 500
self._render = True
self._url_params = {'info': 'true'}
# Metadata
self._name = None
self._description = None
# Integrations
self._bolt_driver = None
self._tigergraph = None
def __repr__(self):
bindings = ['edges', 'nodes', 'source', 'destination', 'node',
'edge_label', 'edge_color', 'edge_size', 'edge_weight', 'edge_title', 'edge_icon', 'edge_opacity',
'edge_source_color', 'edge_destination_color',
'point_label', 'point_color', 'point_size', 'point_weight', 'point_title', 'point_icon', 'point_opacity',
'point_x', 'point_y']
settings = ['height', 'url_params']
rep = {'bindings': dict([(f, getattr(self, '_' + f)) for f in bindings]),
'settings': dict([(f, getattr(self, '_' + f)) for f in settings])}
if util.in_ipython():
from IPython.lib.pretty import pretty
return pretty(rep)
else:
return str(rep)
def bind(self, source=None, destination=None, node=None,
edge_title=None, edge_label=None, edge_color=None, edge_weight=None, edge_size=None, edge_opacity=None, edge_icon=None,
edge_source_color=None, edge_destination_color=None,
point_title=None, point_label=None, point_color=None, point_weight=None, point_size=None, point_opacity=None, point_icon=None,
point_x=None, point_y=None):
"""Relate data attributes to graph structure and visual representation.
To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs.
:param source: Attribute containing an edge's source ID
:type source: String.
:param destination: Attribute containing an edge's destination ID
:type destination: String.
:param node: Attribute containing a node's ID
:type node: String.
:param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used.
:type edge_title: HtmlString.
:param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings.
:type edge_label: HtmlString.
:param edge_color: Attribute overriding edge's color. rgba (int64) or int32 palette index, see palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type edge_color: int32 | int64.
:param edge_source_color: Attribute overriding edge's source color if no edge_color, as an rgba int64 value.
:type edge_source_color: int64.
:param edge_destination_color: Attribute overriding edge's destination color if no edge_color, as an rgba int64 value.
:type edge_destination_color: int64.
:param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value.
:type edge_weight: String.
:param point_title: Attribute overriding node's minimized label text. By default, the node ID is used.
:type point_title: HtmlString.
:param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings.
:type point_label: HtmlString.
:param point_color: Attribute overriding node's color.rgba (int64) or int32 palette index, see palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer.
:type point_color: int32 | int64.
:param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom.
:type point_size: HtmlString.
:param point_x: Attribute overriding node's initial x position. Combine with ".settings(url_params={'play': 0}))" to create a custom layout
:type point_x: number.
:param point_y: Attribute overriding node's initial y position. Combine with ".settings(url_params={'play': 0}))" to create a custom layout
:type point_y: number.
:returns: Plotter.
:rtype: Plotter.
**Example: Minimal**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst')
**Example: Node colors**
::
import graphistry
g = graphistry.bind()
g = g.bind(source='src', destination='dst',
node='id', point_color='color')
**Example: Chaining**
::
import graphistry
g = graphistry.bind(source='src', destination='dst', node='id')
g1 = g.bind(point_color='color1', point_size='size1')
g.bind(point_color='color1b')
g2a = g1.bind(point_color='color2a')
g2b = g1.bind(point_color='color2b', point_size='size2b')
g3a = g2a.bind(point_size='size3a')
g3b = g2b.bind(point_size='size3b')
In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to:
::
g: default/default
g1: color1/size1
g2a: color2a/size1
g2b: color2b/size2b
g3a: color2a/size3a
g3b: color2b/size3b
"""
res = copy.copy(self)
res._source = source or self._source
res._destination = destination or self._destination
res._node = node or self._node
res._edge_title = edge_title or self._edge_title
res._edge_label = edge_label or self._edge_label
res._edge_color = edge_color or self._edge_color
res._edge_source_color = edge_source_color or self._edge_source_color
res._edge_destination_color = edge_destination_color or self._edge_destination_color
res._edge_size = edge_size or self._edge_size
res._edge_weight = edge_weight or self._edge_weight
res._edge_icon = edge_icon or self._edge_icon
res._edge_opacity = edge_opacity or self._edge_opacity
res._point_title = point_title or self._point_title
res._point_label = point_label or self._point_label
res._point_color = point_color or self._point_color
res._point_size = point_size or self._point_size
res._point_weight = point_weight or self._point_weight
res._point_opacity = point_opacity or self._point_opacity
res._point_icon = point_icon or self._point_icon
res._point_x = point_x or self._point_x
res._point_y = point_y or self._point_y
return res
def nodes(self, nodes):
"""Specify the set of nodes and associated data.
Must include any nodes referenced in the edge list.
:param nodes: Nodes and their attributes.
:type point_size: Pandas dataframe
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = graphistry
.bind(source='src', destination='dst')
.edges(es)
vs = pandas.DataFrame({'v': [0,1,2], 'lbl': ['a', 'b', 'c']})
g = g.bind(node='v').nodes(vs)
g.plot()
"""
res = copy.copy(self)
res._nodes = nodes
return res
def name(self, name):
"""Upload name
:param name: Upload name
:type name: str"""
res = copy.copy(self)
res._name = name
return res
def description(self, description):
"""Upload description
:param description: Upload description
:type description: str"""
res = copy.copy(self)
res._description = description
return res
def edges(self, edges):
"""Specify edge list data and associated edge attribute values.
:param edges: Edges and their attributes.
:type point_size: Pandas dataframe, NetworkX graph, or IGraph graph.
:returns: Plotter.
:rtype: Plotter.
**Example**
::
import graphistry
df = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(df)
.plot()
"""
res = copy.copy(self)
res._edges = edges
return res
def graph(self, ig):
"""Specify the node and edge data.
:param ig: Graph with node and edge attributes.
:type ig: NetworkX graph or an IGraph graph.
:returns: Plotter.
:rtype: Plotter.
"""
res = copy.copy(self)
res._edges = ig
res._nodes = None
return res
def settings(self, height=None, url_params={}, render=None):
"""Specify iframe height and add URL parameter dictionary.
The library takes care of URI component encoding for the dictionary.
:param height: Height in pixels.
:type height: Integer.
:param url_params: Dictionary of querystring parameters to append to the URL.
:type url_params: Dictionary
:param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL
:type render: Boolean
"""
res = copy.copy(self)
res._height = height or self._height
res._url_params = dict(self._url_params, **url_params)
res._render = self._render if render == None else render
return res
def plot(self, graph=None, nodes=None, name=None, description=None, render=None, skip_upload=False):
"""Upload data to the Graphistry server and show as an iframe of it.
Uses the currently bound schema structure and visual encodings.
Optional parameters override the current bindings.
When used in a notebook environment, will also show an iframe of the visualization.
:param graph: Edge table or graph.
:type graph: Pandas dataframe, NetworkX graph, or IGraph graph.
:param nodes: Nodes table.
:type nodes: Pandas dataframe.
:param name: Upload name.
:type name: Optional str.
:param description: Upload description.
:type description: Optional str.
:param render: Whether to render the visualization using the native notebook environment (default True), or return the visualization URL
:type render: Boolean
:param skip_upload: Return node/edge/bindings that would have been uploaded. By default, upload happens.
:type skip_upload: Boolean.
**Example: Simple**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.edges(es)
.plot()
**Example: Shorthand**
::
import graphistry
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
graphistry
.bind(source='src', destination='dst')
.plot(es)
"""
if graph is None:
if self._edges is None:
util.error('Graph/edges must be specified.')
g = self._edges
else:
g = graph
n = self._nodes if nodes is None else nodes
name = name or self._name or ("Untitled " + util.random_string(10))
description = description or self._description or ("")
self._check_mandatory_bindings(not isinstance(n, type(None)))
api_version = PyGraphistry.api_version()
if api_version == 1:
dataset = self._plot_dispatch(g, n, name, description, 'json')
if skip_upload:
return dataset
info = PyGraphistry._etl1(dataset)
elif api_version == 2:
dataset = self._plot_dispatch(g, n, name, description, 'vgraph')
if skip_upload:
return dataset
info = PyGraphistry._etl2(dataset)
elif api_version == 3:
PyGraphistry.refresh()
dataset = self._plot_dispatch(g, n, name, description, 'arrow')
if skip_upload:
return dataset
#fresh
dataset.token = PyGraphistry.api_token()
dataset.post()
info = {
'name': dataset.dataset_id,
'type': 'arrow',
'viztoken': str(uuid.uuid4())
}
viz_url = PyGraphistry._viz_url(info, self._url_params)
cfg_client_protocol_hostname = PyGraphistry._config['client_protocol_hostname']
full_url = ('%s:%s' % (PyGraphistry._config['protocol'], viz_url)) if cfg_client_protocol_hostname is None else viz_url
if render == False or (render == None and not self._render):
return full_url
elif util.in_ipython():
from IPython.core.display import HTML
return HTML(util.make_iframe(full_url, self._height))
else:
import webbrowser
webbrowser.open(full_url)
return full_url
def pandas2igraph(self, edges, directed=True):
"""Convert a pandas edge dataframe to an IGraph graph.
Uses current bindings. Defaults to treating edges as directed.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst')
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
g.bind(point_color='community').plot(ig)
"""
import igraph
self._check_mandatory_bindings(False)
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
self._node = self._node or Plotter._defaultNodeId
eattribs = edges.columns.values.tolist()
eattribs.remove(self._source)
eattribs.remove(self._destination)
cols = [self._source, self._destination] + eattribs
etuples = [tuple(x) for x in edges[cols].values]
return igraph.Graph.TupleList(etuples, directed=directed, edge_attrs=eattribs,
vertex_name_attr=self._node)
def igraph2pandas(self, ig):
"""Under current bindings, transform an IGraph into a pandas edges dataframe and a nodes dataframe.
**Example**
::
import graphistry
g = graphistry.bind()
es = pandas.DataFrame({'src': [0,1,2], 'dst': [1,2,0]})
g = g.bind(source='src', destination='dst').edges(es)
ig = g.pandas2igraph(es)
ig.vs['community'] = ig.community_infomap().membership
(es2, vs2) = g.igraph2pandas(ig)
g.nodes(vs2).bind(point_color='community').plot()
"""
def get_edgelist(ig):
idmap = dict(enumerate(ig.vs[self._node]))
for e in ig.es:
t = e.tuple
yield dict({self._source: idmap[t[0]], self._destination: idmap[t[1]]},
**e.attributes())
self._check_mandatory_bindings(False)
if self._node is None:
ig.vs[Plotter._defaultNodeId] = [v.index for v in ig.vs]
self._node = Plotter._defaultNodeId
elif self._node not in ig.vs.attributes():
util.error('Vertex attribute "%s" bound to "node" does not exist.' % self._node)
edata = get_edgelist(ig)
ndata = [v.attributes() for v in ig.vs]
nodes = pandas.DataFrame(ndata, columns=ig.vs.attributes())
cols = [self._source, self._destination] + ig.es.attributes()
edges = pandas.DataFrame(edata, columns=cols)
return (edges, nodes)
def networkx_checkoverlap(self, g):
import networkx as nx
[x, y] = [int(x) for x in nx.__version__.split('.')]
vattribs = None
if x == 1:
vattribs = g.nodes(data=True)[0][1] if g.number_of_nodes() > 0 else []
else:
vattribs = g.nodes(data=True) if g.number_of_nodes() > 0 else []
if not (self._node is None) and self._node in vattribs:
util.error('Vertex attribute "%s" already exists.' % self._node)
def networkx2pandas(self, g):
def get_nodelist(g):
for n in g.nodes(data=True):
yield dict({self._node: n[0]}, **n[1])
def get_edgelist(g):
for e in g.edges(data=True):
yield dict({self._source: e[0], self._destination: e[1]}, **e[2])
self._check_mandatory_bindings(False)
self.networkx_checkoverlap(g)
self._node = self._node or Plotter._defaultNodeId
nodes = pandas.DataFrame(get_nodelist(g))
edges = pandas.DataFrame(get_edgelist(g))
return (edges, nodes)
def _check_mandatory_bindings(self, node_required):
if self._source is None or self._destination is None:
util.error('Both "source" and "destination" must be bound before plotting.')
if node_required and self._node is None:
util.error('Node identifier must be bound when using node dataframe.')
def _check_bound_attribs(self, df, attribs, typ):
cols = df.columns.values.tolist()
for a in attribs:
b = getattr(self, '_' + a)
if b not in cols:
util.error('%s attribute "%s" bound to "%s" does not exist.' % (typ, a, b))
def _plot_dispatch(self, graph, nodes, name, description, mode='json'):
if isinstance(graph, pandas.core.frame.DataFrame) \
or isinstance(graph, pa.Table) \
or ( not (maybe_cudf is None) and isinstance(graph, maybe_cudf.DataFrame) ):
return self._make_dataset(graph, nodes, name, description, mode)
try:
import igraph
if isinstance(graph, igraph.Graph):
(e, n) = self.igraph2pandas(graph)
return self._make_dataset(e, n, name, description, mode)
except ImportError:
pass
try:
import networkx
if isinstance(graph, networkx.classes.graph.Graph) or \
isinstance(graph, networkx.classes.digraph.DiGraph) or \
isinstance(graph, networkx.classes.multigraph.MultiGraph) or \
isinstance(graph, networkx.classes.multidigraph.MultiDiGraph):
(e, n) = self.networkx2pandas(graph)
return self._make_dataset(e, n, name, description, mode)
except ImportError:
pass
util.error('Expected Pandas/Arrow/cuDF dataframe(s) or Igraph/NetworkX graph.')
# Sanitize node/edge dataframe by
# - dropping indices
# - dropping edges with NAs in source or destination
# - dropping nodes with NAs in nodeid
# - creating a default node table if none was provided.
# - inferring numeric types of all columns containing numpy objects
def _sanitize_dataset(self, edges, nodes, nodeid):
self._check_bound_attribs(edges, ['source', 'destination'], 'Edge')
elist = edges.reset_index(drop=True) \
.dropna(subset=[self._source, self._destination])
obj_df = elist.select_dtypes(include=[numpy.object_])
elist[obj_df.columns] = obj_df.apply(pandas.to_numeric, errors='ignore')
if nodes is None:
nodes = pandas.DataFrame()
nodes[nodeid] = pandas.concat([edges[self._source], edges[self._destination]],
ignore_index=True).drop_duplicates()
else:
self._check_bound_attribs(nodes, ['node'], 'Vertex')
nlist = nodes.reset_index(drop=True) \
.dropna(subset=[nodeid]) \
.drop_duplicates(subset=[nodeid])
obj_df = nlist.select_dtypes(include=[numpy.object_])
nlist[obj_df.columns] = obj_df.apply(pandas.to_numeric, errors='ignore')
return (elist, nlist)
def _check_dataset_size(self, elist, nlist):
edge_count = len(elist.index)
node_count = len(nlist.index)
graph_size = edge_count + node_count
if edge_count > 8e6:
util.error('Maximum number of edges (8M) exceeded: %d.' % edge_count)
if node_count > 8e6:
util.error('Maximum number of nodes (8M) exceeded: %d.' % node_count)
if graph_size > 1e6:
util.warn('Large graph: |nodes| + |edges| = %d. Layout/rendering might be slow.' % graph_size)
# Bind attributes for ETL1 by creating a copy of the designated column renamed
# with magic names understood by ETL1 (eg. pointColor, etc)
def _bind_attributes_v1(self, edges, nodes):
def bind(df, pbname, attrib, default=None):
bound = getattr(self, attrib)
if bound:
if bound in df.columns.tolist():
df[pbname] = df[bound]
else:
util.warn('Attribute "%s" bound to %s does not exist.' % (bound, attrib))
elif default:
df[pbname] = df[default]
nodeid = self._node or Plotter._defaultNodeId
(elist, nlist) = self._sanitize_dataset(edges, nodes, nodeid)
self._check_dataset_size(elist, nlist)
bind(elist, 'edgeColor', '_edge_color')
bind(elist, 'edgeSourceColor', '_edge_source_color')
bind(elist, 'edgeDestinationColor', '_edge_destination_color')
bind(elist, 'edgeLabel', '_edge_label')
bind(elist, 'edgeTitle', '_edge_title')
bind(elist, 'edgeSize', '_edge_size')
bind(elist, 'edgeWeight', '_edge_weight')
bind(elist, 'edgeOpacity', '_edge_opacity')
bind(elist, 'edgeIcon', '_edge_icon')
bind(nlist, 'pointColor', '_point_color')
bind(nlist, 'pointLabel', '_point_label')
bind(nlist, 'pointTitle', '_point_title', nodeid)
bind(nlist, 'pointSize', '_point_size')
bind(nlist, 'pointWeight', '_point_weight')
bind(nlist, 'pointOpacity', '_point_opacity')
bind(nlist, 'pointIcon', '_point_icon')
bind(nlist, 'pointX', '_point_x')
bind(nlist, 'pointY', '_point_y')
return (elist, nlist)
# Bind attributes for ETL2 by an encodings map storing the visual semantic of
# each bound column.
def _bind_attributes_v2(self, edges, nodes):
def bind(enc, df, pbname, attrib, default=None):
bound = getattr(self, attrib)
if bound:
if bound in df.columns.tolist():
enc[pbname] = {'attributes' : [bound]}
else:
util.warn('Attribute "%s" bound to %s does not exist.' % (bound, attrib))
elif default:
enc[pbname] = {'attributes': [default]}
nodeid = self._node or Plotter._defaultNodeId
(elist, nlist) = self._sanitize_dataset(edges, nodes, nodeid)
self._check_dataset_size(elist, nlist)
edge_encodings = {
'source': {'attributes' : [self._source]},
'destination': {'attributes': [self._destination]},
}
node_encodings = {
'nodeId': {'attributes': [nodeid]}
}
bind(edge_encodings, elist, 'edgeColor', '_edge_color')
bind(edge_encodings, elist, 'edgeSourceColor', '_edge_source_color')
bind(edge_encodings, elist, 'edgeDestinationColor', '_edge_destination_color')
bind(edge_encodings, elist, 'edgeLabel', '_edge_label')
bind(edge_encodings, elist, 'edgeTitle', '_edge_title')
bind(edge_encodings, elist, 'edgeSize', '_edge_size')
bind(edge_encodings, elist, 'edgeWeight', '_edge_weight')
bind(edge_encodings, elist, 'edgeOpacity', '_edge_opacity')
bind(edge_encodings, elist, 'edgeIcon', '_edge_icon')
bind(node_encodings, nlist, 'pointColor', '_point_color')
bind(node_encodings, nlist, 'pointLabel', '_point_label')
bind(node_encodings, nlist, 'pointTitle', '_point_title', nodeid)
bind(node_encodings, nlist, 'pointSize', '_point_size')
bind(node_encodings, nlist, 'pointWeight', '_point_weight')
bind(node_encodings, nlist, 'pointOpacity', '_point_opacity')
bind(node_encodings, nlist, 'pointIcon', '_point_icon')
bind(node_encodings, nlist, 'pointX', '_point_x')
bind(node_encodings, nlist, 'pointY', '_point_y')
encodings = {
'nodes': node_encodings,
'edges': edge_encodings
}
return (elist, nlist, encodings)
def _table_to_pandas(self, table) -> pandas.DataFrame:
if table is None:
return table
if isinstance(table, pandas.DataFrame):
return table
if isinstance(table, pa.Table):
return table.to_pandas()
if not (maybe_cudf is None) and isinstance(table, maybe_cudf.DataFrame):
return table.to_pandas()
raise Exception('Unknown type %s: Could not convert data to Pandas dataframe' % str(type(table)))
def _table_to_arrow(self, table) -> pa.Table:
if table is None:
return table
if isinstance(table, pa.Table):
return table
if isinstance(table, pandas.DataFrame):
return pa.Table.from_pandas(table, preserve_index=False).replace_schema_metadata({})
if not (maybe_cudf is None) and isinstance(table, maybe_cudf.DataFrame):
return table.to_arrow()
raise Exception('Unknown type %s: Could not convert data to Arrow' % str(type(table)))
def _make_dataset(self, edges, nodes, name, description, mode):
try:
if len(edges) == 0:
util.warn('Graph has no edges, may have rendering issues')
except:
1
if mode == 'json':
edges_df = self._table_to_pandas(edges)
nodes_df = self._table_to_pandas(nodes)
return self._make_json_dataset(edges_df, nodes_df, name)
elif mode == 'vgraph':
edges_df = self._table_to_pandas(edges)
nodes_df = self._table_to_pandas(nodes)
return self._make_vgraph_dataset(edges_df, nodes_df, name)
elif mode == 'arrow':
edges_arr = self._table_to_arrow(edges)
nodes_arr = self._table_to_arrow(nodes)
return self._make_arrow_dataset(edges_arr, nodes_arr, name, description)
#token=None, dataset_id=None, url_params = None)
else:
raise ValueError('Unknown mode: ' + mode)
# Main helper for creating ETL1 payload
def _make_json_dataset(self, edges, nodes, name):
(elist, nlist) = self._bind_attributes_v1(edges, nodes)
edict = elist.where((pandas.notnull(elist)), None).to_dict(orient='records')
bindings = {'idField': self._node or Plotter._defaultNodeId,
'destinationField': self._destination, 'sourceField': self._source}
dataset = {'name': PyGraphistry._config['dataset_prefix'] + name,
'bindings': bindings, 'type': 'edgelist', 'graph': edict}
if nlist is not None:
ndict = nlist.where((pandas.notnull(nlist)), None).to_dict(orient='records')
dataset['labels'] = ndict
return dataset
# Main helper for creating ETL2 payload
def _make_vgraph_dataset(self, edges, nodes, name):
from . import vgraph
(elist, nlist, encodings) = self._bind_attributes_v2(edges, nodes)
nodeid = self._node or Plotter._defaultNodeId
sources = elist[self._source]
dests = elist[self._destination]
elist.drop([self._source, self._destination], axis=1, inplace=True)
# Filter out nodes which have no edges
lnodes = pandas.concat([sources, dests], ignore_index=True).unique()
lnodes_df = | pandas.DataFrame(lnodes, columns=[nodeid]) | pandas.DataFrame |
"""Prepare feature data from Universal Dependencies and UniMorph datasets.
We need to know the feature values of each word in BERT's vocab. For the
multilingual model, we want to know the feature values for all the languages it
models.
This module is intended to be run as a script:
$ python src/features.py
"""
import os
from glob import glob
import pandas as pd
import pyconll
from constants import LANGUAGES, NA
from filenames import FEATURES_DIR, UNIMORPH_DIR, UNIVERSAL_DEPENDENCIES_DIR
from utils import refresh
COLS = ["lemma", "word", "pos", "number", "gender", "case", "person"]
# Preparing features from UniMorph
def make_mapper(mapping): # noqa: D202
"""Return function that maps between values in `mapping` if present.
The Universal Dependencies and UniMorph data use different annotation
schemas. We need to convert one schema to another. There's a great project
by <NAME> on converting from the Universal Dependencies schema to
the UniMorph one, but another library I rely on (pyconll) needs them in
the original schema. I use the Universal Dependencies data in more places
than the UniMorph data, so in my case it makes more sense to convert from
UniMorph to Universal Dependencies.
Given a set of UniMorph feature values, the returned function looks for
values that it can map, but returns NA if it doesn't find any.
Parameters
----------
mapping : dict(str : str)
Mapping from UniMorph to UD feature values
Returns
-------
function
"""
def func(features):
for feature in features:
if feature in mapping:
return mapping[feature]
return NA
return func
# mappings from UniMorph feature values to Universal Dependencies ones
# these are the only POS that we are interested in for this project
POS_MAPPING = {
"V": "VERB",
"V.PTCP": "VERB",
"N": "NOUN",
"PRO": "PRON",
"ADJ": "ADJ",
"ART": "DET",
"DET": "DET",
"AUX": "AUX",
}
NUMBER_MAPPING = {"SG": "Sing", "PL": "Plur"}
GENDER_MAPPING = {"MASC": "Masc", "FEM": "Fem", "NEUT": "Neut"}
# we restrict our attention to the core case values
CASE_MAPPING = {"NOM": "Nom", "ACC": "Acc", "ERG": "Erg", "ABS": "Abs"}
PERSON_MAPPING = {"1": "1", "2": "2", "3": "3"}
map_pos = make_mapper(POS_MAPPING)
map_number = make_mapper(NUMBER_MAPPING)
map_gender = make_mapper(GENDER_MAPPING)
map_case = make_mapper(CASE_MAPPING)
map_person = make_mapper(PERSON_MAPPING)
def prepare_um(language):
"""Prepare word feature values from `language` from UniMorph data.
Some of this function was borrowed from <NAME>'s marry.py
https://github.com/unimorph/ud-compatibility
Parameters
----------
language : str
Name of language
Returns
-------
pd.DataFrame
Contains columns for word form, pos, number, gender, case and person
"""
code = LANGUAGES[language]
file_name = os.path.join(UNIMORPH_DIR, code, code)
result = []
with open(file_name) as file:
for line in file:
if line.split():
lemma, inflected, features = line.strip().split("\t")
features = set(features.split(";"))
data = {"word": inflected, "lemma": lemma}
data["pos"] = map_pos(features)
data["number"] = map_number(features)
data["gender"] = map_gender(features)
data["case"] = map_case(features)
data["person"] = map_person(features)
result.append(data)
return pd.DataFrame(result)
# Preparing features from UD
POSSIBLE_FEATURE_VALUES = set(
list(NUMBER_MAPPING.values())
+ list(GENDER_MAPPING.values())
+ list(CASE_MAPPING.values())
+ list(PERSON_MAPPING.values())
)
def feature_value(token, feature):
"""Return the value of `feature` in `token`.
The token may not have a value for the feature, either because the
language doesn't mark that feature on this kind of token, or because
the annotation is missing. In this case we return NA.
Parameters
----------
token : pyconll Token
feature : str
Returns
-------
str
"""
feature = feature.title()
try:
value = str(next(iter(token.feats[feature])))
if value in POSSIBLE_FEATURE_VALUES:
return value
return NA
except KeyError:
return NA
def prepare_one_ud_file(fname):
"""Prepare feature values from `fname` of Universal Dependencies data.
We look at every token in this file. If the token's POS is one that we care
about for this project, we extract its feature values.
Parameters
----------
fname : str
Returns
-------
pd.DataFrame
Contains columns for word form, pos, number, gender, case and person
"""
conll = pyconll.iter_from_file(fname)
result = []
pos_of_interest = set(POS_MAPPING.values())
for sentence in conll:
for token in sentence:
pos = token.upos
if pos in pos_of_interest:
data = {"word": token.form, "pos": pos, "lemma": token.lemma}
for feature in ["number", "gender", "case", "person"]:
data[feature] = feature_value(token, feature)
result.append(data)
return | pd.DataFrame(result) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/1/26 13:10
Desc: 申万指数-申万一级、二级和三级
http://www.swsindex.com/IdxMain.aspx
https://legulegu.com/stockdata/index-composition?industryCode=851921.SI
"""
import time
import json
import pandas as pd
from akshare.utils import demjson
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import sw_headers, sw_payload, sw_url
def sw_index_representation_spot() -> pd.DataFrame:
"""
申万-市场表征实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8831
:return: 市场表征实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
params = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801001','801002','801003','801005','801300','801901','801903','801905','801250','801260','801270','801280','802613')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "9",
"timed": "1632300641756",
}
r = requests.get(url, params=params)
data_json = demjson.decode(r.text)
temp_df = pd.DataFrame(data_json["root"])
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_spot() -> pd.DataFrame:
"""
申万一级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnid=8832
:return: 申万一级行业实时行情数据
:rtype: pandas.DataFrame
"""
url = "http://www.swsindex.com/handler.aspx"
result = []
for i in range(1, 3):
payload = sw_payload.copy()
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
return temp_df
def sw_index_second_spot() -> pd.DataFrame:
"""
申万二级行业-实时行情数据
http://www.swsindex.com/idx0120.aspx?columnId=8833
:return: 申万二级行业-实时行情数据
:rtype: pandas.DataFrame
"""
result = []
for i in range(1, 6):
payload = {
"tablename": "swzs",
"key": "L1",
"p": "1",
"where": "L1 in('801011','801012','801013','801014','801015','801016','801021','801022','801023','801032','801033','801034','801035','801036','801037','801041','801051','801072','801073','801074','801075','801081','801082','801083','801084','801092','801093','801094','801101','801102','801111','801112','801123','801131','801132','801141','801142','801143','801151','801152','801153','801154','801155','801156','801161','801162','801163','801164','801171','801172','801173','801174','801175','801176','801177','801178','801181','801182','801191','801192','801193','801194','801202','801211','801212','801213','801214','801222','801223','801053','801054','801055','801076','801203','801204','801205','801711','801712','801713','801721','801722','801723','801724','801725','801731','801732','801733','801734','801741','801742','801743','801744','801751','801752','801761','801881','801017','801018')",
"orderby": "",
"fieldlist": "L1,L2,L3,L4,L5,L6,L7,L8,L11",
"pagecount": "98",
"timed": "",
}
payload.update({"p": i})
payload.update({"timed": int(time.time() * 1000)})
r = requests.post(sw_url, headers=sw_headers, data=payload)
data = r.content.decode()
data = data.replace("'", '"')
data = json.loads(data)
result.extend(data["root"])
temp_df = pd.DataFrame(result)
temp_df["L2"] = temp_df["L2"].str.strip()
temp_df.columns = ["指数代码", "指数名称", "昨收盘", "今开盘", "成交额", "最高价", "最低价", "最新价", "成交量"]
temp_df["昨收盘"] = pd.to_numeric(temp_df["昨收盘"])
temp_df["今开盘"] = pd.to_numeric(temp_df["今开盘"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["最高价"] = pd.to_numeric(temp_df["最高价"])
temp_df["最低价"] = pd.to_numeric(temp_df["最低价"])
temp_df["最新价"] = pd.to_ | numeric(temp_df["最新价"]) | pandas.to_numeric |
from pathlib import Path
import numpy as np
import pandas as pd
import pandas.testing as tm
import pytest
from tableauhyperapi import Connection, CreateMode, HyperProcess, TableName, Telemetry
import pantab
import pantab._compat as compat
def assert_roundtrip_equal(result, expected):
"""Compat helper for comparing round-tripped results."""
if compat.PANDAS_100:
expected["object"] = expected["object"].astype("string")
expected["non-ascii"] = expected["non-ascii"].astype("string")
tm.assert_frame_equal(result, expected)
def test_basic(df, tmp_hyper, table_name, table_mode):
# Write twice; depending on mode this should either overwrite or duplicate entries
pantab.frame_to_hyper(df, tmp_hyper, table=table_name, table_mode=table_mode)
pantab.frame_to_hyper(df, tmp_hyper, table=table_name, table_mode=table_mode)
result = pantab.frame_from_hyper(tmp_hyper, table=table_name)
expected = df.copy()
expected["float32"] = expected["float32"].astype(np.float64)
expected["Float32"] = expected["Float32"].astype(np.float64)
expected["Float64"] = expected["Float64"].astype(np.float64)
if table_mode == "a":
expected = pd.concat([expected, expected]).reset_index(drop=True)
assert_roundtrip_equal(result, expected)
def test_use_float_na_flag(df, tmp_hyper, table_name):
pantab.frame_to_hyper(df, tmp_hyper, table=table_name)
result = pantab.frame_from_hyper(tmp_hyper, table=table_name, use_float_na=False)
expected = df.copy()
expected["float32"] = expected["float32"].astype(np.float64)
expected["Float32"] = expected["Float32"].astype(np.float64)
expected["Float64"] = expected["Float64"].astype(np.float64)
assert_roundtrip_equal(result, expected)
result = pantab.frame_from_hyper(tmp_hyper, table=table_name, use_float_na=True)
expected = df.copy()
expected["float32"] = expected["float32"].astype("Float64")
expected["float64"] = expected["float64"].astype("Float64")
expected["float32_limits"] = expected["float32_limits"].astype("Float64")
expected["float64_limits"] = expected["float64_limits"].astype("Float64")
expected["Float32"] = expected["Float32"].astype("Float64")
assert_roundtrip_equal(result, expected)
def test_multiple_tables(df, tmp_hyper, table_name, table_mode):
# Write twice; depending on mode this should either overwrite or duplicate entries
pantab.frames_to_hyper(
{table_name: df, "table2": df}, tmp_hyper, table_mode=table_mode
)
pantab.frames_to_hyper(
{table_name: df, "table2": df}, tmp_hyper, table_mode=table_mode
)
result = pantab.frames_from_hyper(tmp_hyper)
expected = df.copy()
expected["float32"] = expected["float32"].astype(np.float64)
expected["Float32"] = expected["Float32"].astype(np.float64)
expected["Float64"] = expected["Float64"].astype(np.float64)
if table_mode == "a":
expected = | pd.concat([expected, expected]) | pandas.concat |
######
# Author: <NAME>
# this file loads and organizes
# Foundation data for further use
######
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
from datetime import datetime,timedelta
from tqdm import tqdm
import matplotlib.gridspec as gridspec
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import copy
# function to load the data
def load_data():
print("loading data...")
art_metadata_df = | pd.read_csv("data/nft_metadata.csv") | pandas.read_csv |
# ----------------------------------------------------------------------------
# Name: Read/Write/Helper functions for HDF5 based CML data format cmlH5
# Purpose:
#
# Authors:
#
# Created:
# Copyright: (c) <NAME> 2016
# Licence: The MIT License
# ----------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from builtins import zip
import numpy as np
import pandas as pd
import h5py
from copy import deepcopy
from warnings import warn
from collections import OrderedDict
from pycomlink import Comlink, ComlinkChannel
CMLH5_VERSION = 0.2
cml_metadata_dict = {
'cml_id': {'mandatory': True, 'type': str},
'site_a_latitude': {'mandatory': True, 'type': float},
'site_a_longitude': {'mandatory': True, 'type': float},
'site_b_latitude': {'mandatory': True, 'type': float},
'site_b_longitude': {'mandatory': True, 'type': float},
}
cml_ch_metadata_dict = {
'frequency': {'mandatory': True, 'type': float,
'min': 0.1e9, 'max': 100e9},
'polarization': {'mandatory': True, 'type': str,
'options': ['H', 'V', 'h', 'v']},
'channel_id': {'mandatory': True, 'type': str},
'atpc': {'mandatory': False, 'type': str,
'options': ['on', 'off']}
}
cml_ch_time_dict = {
'mandatory': True,
'quantity': 'Timestamp',
'units': 'seconds since 1970-01-01 00:00:00',
'calendar': 'proleptic_gregorian'
}
cml_ch_data_names_dict_tx_rx = {
'rx': {'mandatory': False,
'quantity': 'Received signal level',
'units': 'dBm'},
'tx': {'mandatory': False,
'quantity': 'Trasmitted signal level',
'units': 'dBm'},
'time': cml_ch_time_dict
}
cml_ch_data_names_dict_tx_rx_min_max = {
'rx_min': {'mandatory': False,
'quantity': 'Minimum received signal level',
'units': 'dBm'},
'rx_max': {'mandatory': False,
'quantity': 'Maximum received signal level',
'units': 'dBm'},
'tx_min': {'mandatory': False,
'quantity': 'Minimum trasmitted signal level',
'units': 'dBm'},
'tx_max': {'mandatory': False,
'quantity': 'Maximum trasmitted signal level',
'units': 'dBm'},
'time': cml_ch_time_dict
}
#########################
# Functions for writing #
#########################
def write_to_cmlh5(cml_list, fn,
t_start=None, t_stop=None,
column_names_to_write=None,
split_to_multiple_files=False, splitting_period='D',
append_date_str_to_fn='_%Y%m%d',
write_all_data=False,
product_keys=None, product_names=None, product_units=None,
compression='gzip', compression_opts=4):
"""
Parameters
----------
cml_list
fn
t_start
t_stop
column_names_to_write
split_to_multiple_files
splitting_period
append_date_str_to_fn
write_all_data
product_keys
product_names
product_units
compression
compression_opts
Returns
-------
"""
# Check and prepare `product_keys`, `product_names` and `product_units`
if product_keys is not None:
if type(product_keys) == str:
product_keys = [product_keys]
strings_are_supplied = True
else:
strings_are_supplied = False
if product_names is None:
product_names = product_keys
else:
if type(product_names) == str:
if not strings_are_supplied:
raise AttributeError('`product_keys` was supplied as list,'
' so must be `product_names`')
product_names = [product_names]
if product_units is None:
raise AttributeError('Units must be supplied for the products')
else:
if type(product_units) == str:
if not strings_are_supplied:
raise AttributeError('`product_keys` was supplied as list,'
' so must be `product_units`')
product_units = [product_units]
if (t_start is None) and (t_stop is None):
t_start, t_stop = (
_get_first_and_last_timestamp_in_cml_list(cml_list))
if split_to_multiple_files:
t_in_file_start_list = pd.date_range(
start=t_start,
end=t_stop,
freq=splitting_period,
normalize=True)
t_in_file_stop_list = pd.date_range(
start=t_start + pd.Timedelta(1, splitting_period),
end=t_stop + pd.Timedelta(1, splitting_period),
freq=splitting_period,
normalize=True)
include_t_stop_in_file = False
else:
t_in_file_start_list = [t_start, ]
t_in_file_stop_list = [t_stop, ]
include_t_stop_in_file = True
# Write to file(s)
for i, (t_in_file_start, t_in_file_stop) in \
enumerate(zip(t_in_file_start_list, t_in_file_stop_list)):
if t_start > t_in_file_start:
t_in_file_start = t_start
if t_stop < t_in_file_stop:
t_in_file_stop = t_stop
include_t_stop_in_file = True
if split_to_multiple_files:
try:
fn_body, fn_ending = fn.split('.')
except:
raise ValueError('file name must contain a `.`, '
'e.g. `my_cml_file.h5`')
if append_date_str_to_fn:
fn_i = (fn_body +
t_in_file_start.strftime(append_date_str_to_fn) +
'.' + fn_ending)
else:
fn_i = '%s_%d.%s' % (fn_body, i, fn_ending)
else:
fn_i = fn
with h5py.File(fn_i, mode='w') as h5file:
h5file.attrs['file_format'] = 'cmlH5'
h5file.attrs['file_format_version'] = CMLH5_VERSION
h5file.attrs['time_coverage_start'] = t_in_file_start.strftime(
'%Y-%m-%dT%H:%M:%SZ')
h5file.attrs['time_coverage_stop'] = t_in_file_stop.strftime(
'%Y-%m-%dT%H:%M:%SZ')
for i_cml, cml in enumerate(cml_list):
# Create CML HDF5 group
cml_g = h5file.create_group(cml.metadata['cml_id'])
# Write CML attributes
_write_cml_attributes(cml_g, cml)
# Write CML channels
for i_channel, channel_id in enumerate(cml.channels.keys()):
cml_ch = cml.channels[channel_id]
chan_g = cml_g.create_group('channel_%d' % (i_channel + 1))
_write_channel_attributes(chan_g, cml_ch)
_write_channel_data(
chan_g=chan_g,
cml_ch=cml_ch,
t_start=t_in_file_start,
t_stop=t_in_file_stop,
column_names_to_write=column_names_to_write,
include_t_stop=include_t_stop_in_file,
compression=compression,
compression_opts=compression_opts,
write_all_data=write_all_data)
# Write CML derived products like rain rate for each CML
if product_keys is not None:
for i_prod, (product_key, product_name, product_unit) in \
enumerate(zip(
product_keys,
product_names,
product_units)):
prod_g = cml_g.create_group('product_%d' % i_prod)
_write_product(prod_g, cml, product_key,
product_name, product_unit,
compression, compression_opts)
def _get_first_and_last_timestamp_in_cml_list(cml_list):
"""
Parameters
----------
cml_list
Returns
-------
"""
t_min = (
min([min([cml_ch.data.index.min()
for cml_ch in cml.channels.values()])
for cml in cml_list]))
t_max = (
max([max([cml_ch.data.index.max()
for cml_ch in cml.channels.values()])
for cml in cml_list]))
return t_min, t_max
def _write_cml_attributes(cml_g, cml):
"""
cml_g : HDF5 group at CML level
cml : pycomlink.Comlink object
"""
for attr_name, attr_options in cml_metadata_dict.items():
cml_g.attrs[attr_name] = cml.metadata[attr_name]
def _write_channel_attributes(chan_g, cml_ch):
"""
chan_g : HDF5 group at CML-channel level
cml : pycomlink.Comlink object
"""
for attr_name, attr_options in cml_ch_metadata_dict.items():
attr_value = cml_ch.metadata[attr_name]
if attr_value is None:
if attr_options['mandatory']:
warn('\n The mandatory attribute `%s` is `None`'
'\n Using fill_value instead' % attr_name)
chan_g.attrs[attr_name] = _missing_attribute(attr_options['type'])
else:
chan_g.attrs[attr_name] = attr_value
def _write_channel_data(chan_g,
cml_ch,
t_start,
t_stop,
column_names_to_write,
compression,
compression_opts,
include_t_stop=True,
write_all_data=False):
"""
Parameters
----------
chan_g
cml_ch
t_start
t_stop
column_names_to_write
compression
compression_opts
include_t_stop
write_all_data
Returns
-------
"""
# Build a dictionary with the info for the standard data that is to be
# written to a cmlh5 file, i.e. `tx` and `rx`, or `rx_min`, `rx_max`,...
_cml_ch_data_names_dict = {'time': cml_ch_time_dict}
for column_name in cml_ch.data.columns:
try:
_cml_ch_data_names_dict[column_name] = (
cml_ch_data_names_dict_tx_rx[column_name])
except KeyError:
pass
try:
_cml_ch_data_names_dict[column_name] = (
cml_ch_data_names_dict_tx_rx_min_max[column_name])
except KeyError:
pass
if (write_all_data is False) and (column_names_to_write is None):
pass
# If desired, add the rest of the columns, but without specific info
# on the data like units or a descriptive name
elif (write_all_data is True) and (column_names_to_write is None):
for column_name in cml_ch.data.columns:
if column_name not in list(_cml_ch_data_names_dict.keys()):
_cml_ch_data_names_dict[column_name] = {}
# Or add additional columns according to the info passed as argument
elif (write_all_data is False) and (column_names_to_write is not None):
if isinstance(column_names_to_write, dict):
for key, value in column_names_to_write.iteritems():
_cml_ch_data_names_dict[key] = value
elif isinstance(column_names_to_write, list):
for column_name in column_names_to_write:
_cml_ch_data_names_dict[column_name] = {}
else:
raise AttributeError('`column_names_to_write` must be either a list'
'or a dict')
else:
raise AttributeError('`write_all_data cannot be True if '
'`columns_names_to_write` is provided '
'and not None')
# Get the time index in UTC
ts_t = cml_ch.data.index.tz_convert('UTC')
if include_t_stop:
t_slice_ix = (ts_t >= t_start) & (ts_t <= t_stop)
else:
t_slice_ix = (ts_t >= t_start) & (ts_t < t_stop)
# write variables
for name, attrs in _cml_ch_data_names_dict.items():
if name == 'time':
# Transform the pandas (np.datetime64) which is in ns to seconds
t_vec = ts_t.astype('int64') / 1e9
chan_g.create_dataset(name,
data=t_vec[t_slice_ix],
compression=compression,
compression_opts=compression_opts)
elif name in cml_ch.data.columns:
chan_g.create_dataset(name,
data=cml_ch.data[name].values[t_slice_ix],
compression=compression,
compression_opts=compression_opts)
else:
print('`%s` not found in ComlinkChannel.data.columns' % name)
for attr_name, attr_value in attrs.items():
chan_g[name].attrs[attr_name] = attr_value
# Create time scale
chan_g['time'].dims.create_scale(chan_g['time'], 'time')
# Link all other datasets to the time scale
for name in list(_cml_ch_data_names_dict.keys()):
if name in cml_ch.data.columns:
if not name == 'time':
chan_g[name].dims[0].attach_scale(chan_g['time'])
def _write_product(prod_g, cml,
product_key, product_name, product_unit,
compression, compression_opts):
"""
@param prod_g:
@param cml:
@param product_key:
@param product_name:
@param product_unit:
@param compression:
@param compression_opts:
@return:
"""
# TODO: Make it possible to save product from different channels
# Choose the first channel (since there is no other solution now for how
# to deal with the "products" for each channel of one CML
cml_ch = cml.channel_1
ts_t = cml_ch.data.index.tz_convert('UTC')
# Transform the pandas (np.datetime64) which is in ns to seconds
t_vec = ts_t.astype('int64'), 1e9
prod_g.create_dataset('time',
data=t_vec,
compression=compression,
compression_opts=compression_opts)
prod_g.create_dataset(product_name,
data=cml_ch.data[product_key].values,
compression=compression,
compression_opts=compression_opts)
# Create time scale
prod_g['time'].dims.create_scale(prod_g['time'], 'time')
prod_g['time'].attrs['units'] = 'seconds since 1970-01-01 00:00:00'
prod_g['time'].attrs['calendar'] = 'proleptic_gregorian'
prod_g['time'].attrs['quantity'] = 'Timestamp'
prod_g[product_name].attrs['units'] = product_unit
prod_g[product_name].attrs['quantity'] = product_name
# Link all other datasets to the time scale
if not product_name == 'time':
prod_g[product_name].dims[0].attach_scale(prod_g['time'])
def _missing_attribute(attr_type):
if attr_type == float:
fill_value = np.nan
elif attr_type == int:
fill_value = -9999
elif attr_type == str:
fill_value = 'NA'
else:
raise AttributeError('Could not infer `missing_value` for '
'`attr_type` %s' % attr_type)
return fill_value
#########################
# Functions for reading #
#########################
def read_from_cmlh5(fn,
cml_id_list=None,
t_start=None,
t_stop=None,
column_names_to_read=None,
read_all_data=False):
"""
Parameters
----------
fn
cml_id_list
t_start
t_stop
column_names_to_read
read_all_data
Returns
-------
"""
h5_reader = h5py.File(fn, mode='r')
cml_list = []
for cml_g_name in h5_reader['/']:
cml_g = h5_reader['/' + cml_g_name]
cml = _read_one_cml(cml_g=cml_g,
cml_id_list=cml_id_list,
t_start=t_start,
t_stop=t_stop,
column_names_to_read=column_names_to_read,
read_all_data=read_all_data)
if cml is not None:
cml_list.append(cml)
print('%d CMLs read in' % len(cml_list))
return cml_list
def read_from_multiple_cmlh5(fn_list,
cml_id_list=None,
t_start=None,
t_stop=None,
sort_fn_list=True):
"""
Parameters
----------
fn_list
cml_id_list
t_start
t_stop
sort_fn_list
Returns
-------
"""
if sort_fn_list:
fn_list.sort()
fn_list_selected = []
# Find the files where data is stored for the specified period
if (t_start is not None) and (t_stop is not None):
# loop through all files to find their temporal coverage
t_start = pd.to_datetime(t_start)
t_stop = pd.to_datetime(t_stop)
for fn in fn_list:
with h5py.File(fn, mode='r') as h5_reader:
# update fn_list so that only necessary files are contained
time_coverage_start = pd.to_datetime(
h5_reader.attrs['time_coverage_start'])
time_coverage_stop = pd.to_datetime(
h5_reader.attrs['time_coverage_stop'])
if ((time_coverage_start < t_stop) and
(time_coverage_stop > t_start)):
fn_list_selected.append(fn)
# If no start and stop data has been provided, just use fn_list
elif (t_start is None) and (t_stop is None):
fn_list_selected = fn_list
else:
raise ValueError('`t_start` and `t_stop` must both be either `None` '
'or some timestamp information.')
# Loop over cmlh5 files and read them in
cml_lists = []
for fn in fn_list_selected:
cml_lists.append(read_from_cmlh5(fn=fn,
cml_id_list=cml_id_list,
t_start=t_start,
t_stop=t_stop))
# Concat data for the Comlink objects
cml_dict = OrderedDict()
for cml_list in cml_lists:
for cml in cml_list:
cml_id = cml.metadata['cml_id']
if cml_id in list(cml_dict.keys()):
cml_dict[cml_id].append_data(cml)
else:
cml_dict[cml_id] = cml
return list(cml_dict.values())
def _read_one_cml(cml_g,
cml_id_list=None,
t_start=None,
t_stop=None,
column_names_to_read=None,
read_all_data=False):
"""
Parameters
----------
cml_g
cml_id_list
t_start
t_stop
column_names_to_read
read_all_data
Returns
-------
"""
metadata = _read_cml_metadata(cml_g)
if cml_id_list is not None:
if metadata['cml_id'] not in cml_id_list:
return None
cml_ch_list = []
for cml_ch_name, cml_ch_g in list(cml_g.items()):
if 'channel_' in cml_ch_name:
cml_ch_list.append(
_read_cml_channel(
cml_ch_g=cml_ch_g,
t_start=t_start,
t_stop=t_stop,
column_names_to_read=column_names_to_read,
read_all_data=read_all_data))
# TODO: Handle `auxiliary_N` and `product_N` cml_g-subgroups
return Comlink(channels=cml_ch_list, metadata=metadata)
def _read_cml_metadata(cml_g):
"""
@param cml_g:
@return:
"""
metadata = {}
for attr_name, attr_options in cml_metadata_dict.items():
value = cml_g.attrs[attr_name]
# TODO: Handle NaN values
metadata[attr_name] = value
return metadata
def _read_cml_channel_metadata(cml_ch_g):
"""
@param cml_ch_g:
@return:
"""
metadata = {}
for attr_name, attr_options in cml_ch_metadata_dict.items():
value = cml_ch_g.attrs[attr_name]
# This is necessary because of this h5py issue
# https://github.com/h5py/h5py/issues/379
if isinstance(value, bytes):
value = value.decode("utf-8")
# TODO: Handle NaN values
metadata[attr_name] = value
return metadata
def _read_cml_channel_data(cml_ch_g,
t_start=None,
t_stop=None,
column_names_to_read=None,
read_all_data=None):
"""
Parameters
----------
cml_ch_g
t_start
t_stop
column_names_to_read
read_all_data
Returns
-------
"""
if (read_all_data is False) and (column_names_to_read is None):
_cml_ch_data_names_list = list(set(
list(cml_ch_data_names_dict_tx_rx.keys()) +
list(cml_ch_data_names_dict_tx_rx_min_max.keys())))
elif (read_all_data is True) and (column_names_to_read is None):
_cml_ch_data_names_list = list(cml_ch_g.keys())
elif (read_all_data is False) and (column_names_to_read is not None):
if isinstance(column_names_to_read, list):
_cml_ch_data_names_list = column_names_to_read
else:
raise AttributeError('`column_names_to_write` must be either a list'
'or a dict')
else:
raise AttributeError('`read_all_data cannot be True if '
'`columns_names_to_read` is provided '
'and not None')
data_dict = {}
for name in _cml_ch_data_names_list:
try:
data_dict[name] = cml_ch_g[name]
except KeyError:
pass
if len(list(data_dict.keys())) == 0:
print('Warning: No dataset matching the column names %s '
'found in cml_ch_g' % str(_cml_ch_data_names_list))
# Time is stored in seconds since epoch and is represented in pandas by
# np.datetime64 in nanoseconds
t = (data_dict.pop('time')[:] * 1e9).astype('datetime64[ns]')
df = | pd.DataFrame(index=t, data=data_dict) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
爬虫抓取工具
"""
import numpy as np
import time
import uuid
import sys
from mllib.utils import seleniumutil as util
import re
import lxml.html
import pandas as pd
from lxml import etree
from urllib.request import urlopen, Request
import requests
from pandas.compat import StringIO
from mllib.utils import config_vars as CONFIG
import random
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
# 嵌套查询, 针对那些嵌套的 html 一次取出所有 text, 返回一个大的字符串
from selenium.common.exceptions import WebDriverException
def scroll_mouse(driver):
try:
js1 = "window.scrollTo(0,250)"
js2 = "window.scrollTo(250,0)"
js3 = "window.scrollTo(0,document.body.scrollHeight)"
js_window_height = driver.execute_script('return document.body.scrollHeight')
driver.execute_script(js1)
time.sleep(1)
driver.execute_script(js2)
time.sleep(1)
driver.execute_script(js3)
time.sleep(1)
except WebDriverException:
print('页面下拉失败')
def get_all_children_elements_chrome(element):
result = ''
all_infos = util.find_element_by_xpath(element, './descendant-or-self::node()/text()')
for s in all_infos:
#print('type(s)', type(s))
#print('s', s)
result = result + ' ' + s.strip()
#print('result: ', result)
return result
def get_all_children_elements(element):
result = ''
all_infos = element[0].xpath('./descendant-or-self::node()/text()')
for s in all_infos:
#print('type(s)', type(s))
#print('s', s)
result = result + ' ' + s.strip()
#print('result: ', result)
return result
# 新浪财经数据
def get_sina_finance_data(retry_count = 3, pause = 0.01, dataArr=pd.DataFrame(), pageNo=1, endPage=3):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(CONFIG.SINA_URL%(pageNo), headers=CONFIG.HEADERS)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id=\"dataTable\"]/tr")
sarr = [etree.tostring(node).decode('utf-8') for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
df = pd.read_html(sarr)[0]
df.columns = CONFIG.SINA_COLUMNS
dataArr = dataArr.append(df, ignore_index=True)
#a[last()]/@onclick
nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick')
if len(nextPage)>0 and int(pageNo) < endPage:
pageNo = re.findall(r'\d+', nextPage[0])[0]
return get_sina_finance_data(retry_count, pause, dataArr, pageNo=pageNo)
else:
return dataArr
except Exception as e:
print(e)
# 链家网数据
def get_lianjia_rent_data(retry_count = 3, pause = 0.01, dataArr=[], pageNo=1, endPage=3):
for _ in range(retry_count):
time.sleep(pause)
try:
request_1 = Request(CONFIG.LIANJIA_URL%(pageNo))
text_1 = urlopen(request_1, timeout=10).read()
text_1 = text_1.decode('utf-8')
html_1 = lxml.html.parse(StringIO(text_1))
res_1 = html_1.xpath("//*[@id=\"house-lst\"]/li/div[@class=\"info-panel\"]")
links_1 = html_1.xpath("//*[@id=\"house-lst\"]/li/div[@class=\"info-panel\"]/h2/a/@href")
for link in links_1:
request_2 = Request(link)
text_2 = urlopen(request_2, timeout=10).read()
text_2 = text_2.decode('utf-8')
html_2 = lxml.html.parse(StringIO(text_2))
_price = html_2.xpath("//div[@class=\"price \"]/span[@class=\"total\"]/text()")
_area = html_2.xpath("//div[@class=\"zf-room\"]/p[1]/text()")
_house_type = html_2.xpath("//div[@class=\"zf-room\"]/p[2]/text()")
_stair_level=html_2.xpath("//div[@class=\"zf-room\"]/p[3]/text()")
_house_direction=html_2.xpath("//div[@class=\"zf-room\"]/p[4]/text()")
_subway = html_2.xpath("//div[@class=\"zf-room\"]/p[5]/text()")
_xiaoqu_1 = html_2.xpath("//div[@class=\"zf-room\"]/p[6]/a[1]/text()")
_xiaoqu_2 = html_2.xpath("//div[@class=\"zf-room\"]/p[6]/a[2]/text()")
_house_num = html_2.xpath("//div[@class=\"houseRecord\"]/span/text()")
#_other_all = html_2.xpath("//div[@class=\"content zf-content\"]/descendant::*/text()")
_tmp = []
_tmp.append(_price)
_tmp.append(_area)
_tmp.append(_house_type)
_tmp.append(_stair_level)
_tmp.append(_house_direction)
_tmp.append(_subway)
_tmp.append('-'.join(_xiaoqu_1 + _xiaoqu_2) )
_tmp.append(_house_num)
#_tmp.append(_other_all[0].strip())
print(_tmp)
dataArr.append(_tmp)
nextPage = html_1.xpath('//a[text()=\'下一页\']/@href')
if len(nextPage)>0 and int(pageNo) < endPage:
#pageNo = re.findall(r'\d+', nextPage[0])[0]
nextPage = 'https://bj.lianjia.com' + nextPage
return get_lianjia_rent_data(retry_count, pause, dataArr, pageNo=pageNo)
else:
return dataArr
except Exception as e:
print(e)
# 链家网数据
def craw_lianjia_rent_data_sandbox(retry_count = 3, pause = 1, dataArr=[],pageNo='/zufang/pg1', endPage=3):
for _ in range(retry_count):
time.sleep(pause)
try:
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'
HEADERS = {'user-agent': UA}
COOKIES="select_city=110000; all-lj=eae2e4b99b3cdec6662e8d55df89179a; lianjia_uuid=27657801-7728-4cdd-a8a6-2c91da633c92; _gat=1; _gat_past=1; _gat_global=1; _gat_new_global=1; _gat_dianpu_agent=1; _smt_uid=59a4cafa.20fe3268; _ga=GA1.2.402296747.1503972092; _gid=GA1.2.1448219226.1503972092; lianjia_ssid=242c6dac-12ad-49db-a047-9a32f0e6ff44"
COOKIES = dict(x.split('=') for x in COOKIES.split(';'))
print('cookie: ', COOKIES)
#request_1 = Request(CONFIG.LIANJIA_URL%(pageNo), headers=HEADERS)
request_1 = requests.get(CONFIG.LIANJIA_URL%(pageNo), headers=HEADERS, cookies=COOKIES)
#text_1 = urlopen(request_1, timeout=10).read()
text_1 = request_1.text
#print('text: ', text_1)
#text_1 = text_1.decode('utf-8')
#print('text: ', text_1)
html_1 = lxml.html.parse( | StringIO(text_1) | pandas.compat.StringIO |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import featuretools as ft
import pandas as pd
import pytest
from numpy import nan
from cardea.data_loader import EntitySetLoader
from cardea.problem_definition.predicting_diagnosis import DiagnosisPrediction
@pytest.fixture()
def diagnosis_prediction():
return DiagnosisPrediction("Z10")
@pytest.fixture()
def es_loader():
return EntitySetLoader()
@pytest.fixture()
def cutoff_times():
temp = pd.DataFrame({"instance_id": [10, 11, 12],
"time": ['9/22/2018 00:00', '9/21/2018 00:00', '10/4/2018 00:00'],
"label": [True, False, False]})
temp['time'] = pd.to_datetime(temp['time'])
return temp
@pytest.fixture()
def objects(es_loader):
encounter_df = pd.DataFrame({"identifier": [10, 11, 12],
"subject": [0, 1, 2],
"period": [120, 121, 122],
"length": [2, 1, 7],
"diagnosis": [1, 2, 3]})
encounter_diagnosis_df = pd.DataFrame({"object_id": [1, 2, 3],
"condition": [10, 11, 12]})
condition_df = pd.DataFrame({"identifier": [10, 11, 12],
"code": [1, 2, 3],
"subject": [10, 11, 12]})
cc_df = pd.DataFrame({"object_id": [1, 2, 3],
"coding": [100, 111, 112],
"subject": [10, 11, 12]})
coding_df = pd.DataFrame({"object_id": [100, 111, 112],
"code": ["Z10", "C12", "A10"]})
period_df = pd.DataFrame({"object_id": [120, 121, 122],
"start": ['9/22/2018 00:00', '9/21/2018 00:00', '10/4/2018 00:00'],
"end": ['9/22/2018 00:10', '9/21/2018 00:10', '10/4/2018 00:10']})
duration_df = pd.DataFrame({"object_id": [0, 2, 1, 7]})
patient_df = pd.DataFrame({"identifier": [0, 1, 2],
"gender": ['female', 'female', 'male'],
"birthDate": ['10/21/2000', '7/2/2000', '1/10/2000'],
"active": ['True', 'True', 'nan']})
encounter = es_loader.create_object(encounter_df, 'Encounter')
period = es_loader.create_object(period_df, 'Period')
patient = es_loader.create_object(patient_df, 'Patient')
duration = es_loader.create_object(duration_df, 'Duration')
encounter_diagnosis = es_loader.create_object(encounter_diagnosis_df, 'Encounter_Diagnosis')
condition = es_loader.create_object(condition_df, 'Condition')
cc = es_loader.create_object(cc_df, 'CodeableConcept')
coding = es_loader.create_object(coding_df, 'Coding')
objects = [encounter, period, patient, duration, encounter_diagnosis, condition, cc, coding]
return objects
@pytest.fixture()
def objects_fail(es_loader):
encounter_df = pd.DataFrame({"identifier": [10, 11, 12],
"subject": [0, 1, 2],
"period": [120, 121, 122]})
period_df = pd.DataFrame({"object_id": [120, 121, 122],
"start": ['9/18/2018 00:00', '9/19/2018 00:00', '9/20/2018 11:00'],
"end": ['9/20/2018 00:00', '9/20/2018 00:10', '9/27/2018 00:10']})
patient_df = pd.DataFrame({"identifier": [0, 1, 2],
"gender": ['female', 'female', 'male'],
"birthDate": ['10/21/2000', '7/2/2000', '1/10/2000'],
"active": ['True', 'True', 'nan']})
encounter = es_loader.create_object(encounter_df, 'Encounter')
period = es_loader.create_object(period_df, 'Period')
patient = es_loader.create_object(patient_df, 'Patient')
return [encounter, period, patient]
@pytest.fixture()
def objects_missing_generation_table(es_loader):
encounter_df = pd.DataFrame({"identifier": [10, 11, 12, 13, 14, 15],
"subject": [0, 1, 2, 0, 0, 0],
"length": [2, 1, 7, 0, 0, 0]})
duration_df = | pd.DataFrame({"object_id": [0, 2, 1, 7]}) | pandas.DataFrame |
#
# Build a graph describing the layout of each station based on data
# from the MTA's elevator and escalator equipment file. We also
# incorporate an override file, since some of the MTA descriptions
# too difficult for this simple program to understand. Writes to
# stdout.
#
import argparse
import pandas as pd
import re
import sys
from utils import split_elevator_description
def load_equipment(master_file, with_inactive=False, with_inaccessible=False, with_escalators=False, with_elevators=True):
equipment = | pd.read_csv(master_file) | pandas.read_csv |
import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import check_empty_nwp
from Fuzzy_clustering.version2.dataset_manager.common_utils import rescale_mean
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_2d_dense
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
class DatasetCreatorDense:
def __init__(self, projects_group, projects, data, path_nwp, nwp_model, nwp_resolution, data_variables, njobs=1,
test=False, dates=None):
self.projects = projects
self.is_for_test = test
self.projects_group = projects_group
self.data = data
self.path_nwp = path_nwp
self.nwp_model = nwp_model
self.nwp_resolution = nwp_resolution
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = njobs
self.variables = data_variables
self.logger = create_logger(logger_name=__name__, abs_path=self.path_nwp,
logger_path=f'log_{self.projects_group}.log', write_type='a')
if not self.data is None:
self.dates = self.check_dates()
elif not dates is None:
self.dates = dates
def check_dates(self):
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates are checked. Number of time samples is %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def stack_by_sample(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables, predictions):
timestep = 60
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
preds = predictions[project['_id']]
hor = preds.columns[-1] + timestep
p_dates = [t + pd.DateOffset(minutes=hor)]
preds = preds.loc[t].to_frame().T
dates_pred = [t + pd.DateOffset(minutes=h) for h in preds.columns]
pred = pd.DataFrame(preds.values.ravel(), index=dates_pred, columns=[project['_id']])
data_temp = pd.concat([data[project['_id']].iloc[np.where(data.index < t)].to_frame(), pred])
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
try:
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1))].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2))].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data_temp.loc[(date - pd.DateOffset(hours=1)), project_id].values
inp['Obs_lag2'] = data_temp.loc[(date - pd.DateOffset(hours=2)), project_id].values
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
for project in projects:
if len(x_3d[project['_id']].shape) == 3:
x_3d[project['_id']] = x_3d[project['_id']][np.newaxis, :, :, :]
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
y = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
y[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not self.is_for_test:
inp['Obs_lag1'] = inp['Obs_lag1'] + np.random.normal(0, 0.05) * inp['Obs_lag1']
inp['Obs_lag2'] = inp['Obs_lag2'] + np.random.normal(0, 0.05) * inp['Obs_lag2']
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
y[project_id] = pd.concat([y[project_id], pd.DataFrame(data.loc[date, project_id],
columns=['target'],
index=[date])])
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
if not inp.isnull().any(axis=1).values and not np.isnan(data.loc[date, project_id]):
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
y[project['_id']] = pd.concat(
[y[project['_id']], pd.DataFrame(data.loc[date, project['_id']],
columns=['target'], index=[date])])
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, y, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_rabbitmq(self, t, path_nwp, nwp_model, project, variables):
x = dict()
x_3d = dict()
nwps = project['nwp']
p_dates = pd.date_range(t, t + pd.DateOffset(days=3) - pd.DateOffset(hours=1), freq='H')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_rabbitmq(date, nwp, nwp_prev, nwp_next, project['static_data']['type'])
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def stack_daily_nwps_online(self, t, data, lats, longs, path_nwp, nwp_model, projects, variables):
x = dict()
x_3d = dict()
file_name = os.path.join(path_nwp, f"{nwp_model}_{t.strftime('%d%m%y')}.pickle")
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for project in projects:
if project['static_data']['horizon'] == 'day_ahead':
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
else:
p_dates = pd.date_range(t + pd.DateOffset(hours=1), t + pd.DateOffset(hours=24), freq='15min')
project_id = project['_id'] # It's the project name, the park's name
x[project_id] = pd.DataFrame()
x_3d[project_id] = np.array([])
areas = project['static_data']['areas']
if isinstance(areas, list):
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date_nwp = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample(date, nwp, nwp_prev, nwp_next, lats[project_id],
longs[project_id], project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project_id] = pd.concat([x[project_id], inp])
x_3d[project_id] = stack_2d_dense(x_3d[project_id], inp_cnn, False)
except Exception:
continue
else:
for date in p_dates:
try:
date_nwp = date.round('H').strftime('%d%m%y%H%M')
nwp = nwps[date_nwp]
nwp = self.correct_nwps(nwp, variables)
date = pd.to_datetime(date_nwp, format='%d%m%y%H%M')
nwp_prev = nwps[(date_nwp - pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_next = nwps[(date_nwp + pd.DateOffset(hours=1)).strftime('%d%m%y%H%M')]
nwp_prev = self.correct_nwps(nwp_prev, variables)
nwp_next = self.correct_nwps(nwp_next, variables)
if check_empty_nwp(nwp, nwp_next, nwp_prev, variables):
inp, inp_cnn = self.create_sample_country(date, nwp, nwp_prev, nwp_next,
lats[project['_id']],
longs[project['_id']],
project['static_data']['type'])
if project['static_data']['horizon'] == 'short-term':
inp['Obs_lag1'] = data.loc[(date - pd.DateOffset(hours=1)), project_id]
inp['Obs_lag2'] = data.loc[(date - pd.DateOffset(hours=2)), project_id]
x[project['_id']] = pd.concat([x[project['_id']], inp])
x_3d[project['_id']] = stack_2d_dense(x_3d[project['_id']], inp_cnn, False)
except Exception:
continue
print(t.strftime('%d%m%y%H%M'), ' extracted')
return x, x_3d, t.strftime('%d%m%y%H%M')
def get_lats_longs(self):
lats = dict()
longs = dict()
nwp_found = False
for t in self.dates: # Try to load at least one file ??
file_name = os.path.join(self.path_nwp, f"{self.nwp_model}_{t.strftime('%d%m%y')}.pickle")
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=48), freq='H').strftime(
'%d%m%y%H%M')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
if date in nwps:
nwp = nwps[date]
nwp_found = True
break
if nwp_found:
break
print(nwp_found)
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2, resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2, resolution).reshape(-1, 1).T
for project in self.projects:
areas = project['static_data']['areas'] # The final area is a 5x5 grid
project_id = project['_id']
lat, long = nwp['lat'], nwp['long']
if isinstance(areas, list):
# Is this guaranteed to be 5x5 ? I think yes, because of the resolution. TODO: VERIFY
lats[project_id] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
else:
lats[project_id] = dict()
longs[project_id] = dict()
for area in sorted(areas.keys()):
lats[project_id][area] = np.where((lat[:, 0] >= areas[0][0]) & (lat[:, 0] <= areas[1][0]))[0]
longs[project_id][area] = np.where((long[0, :] >= areas[0][1]) & (long[0, :] <= areas[1][1]))[0]
return lats, longs
def make_dataset_res_short_term(self):
lats, longs = self.get_lats_longs()
predictions = dict()
for project in self.projects:
predictions[project['_id']] = joblib.load(os.path.join(project['static_data']['path_data']
, 'predictions_short_term.pickle'))
nwp = self.stack_by_sample(self.data.index[20], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions)
nwp_samples = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_by_sample)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables, predictions) for t in self.data.index[20:])
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
x_3d[project['_id']] = np.array([])
for nwp in nwp_samples:
for project in self.projects:
if project['_id'] in nwp[2].keys():
if nwp[2][project['_id']].shape[0] != 0:
x[project['_id']] = pd.concat([x[project['_id']], nwp[0][project['_id']]])
y[project['_id']] = pd.concat([y[project['_id']], nwp[1][project['_id']]])
x_3d[project['_id']] = stack_3d(x_3d[project['_id']], nwp[2][project['_id']])
self.logger.info('All Inputs stacked')
dataset_x_csv = 'dataset_X_test.csv'
dataset_y_csv = 'dataset_y_test.csv'
dataset_cnn_pickle = 'dataset_cnn_test.pickle'
for project in self.projects:
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x[project_id]
dataset_y = y[project_id]
if dataset_y.isna().any().values[0]:
dataset_x = dataset_x.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if dataset_x.isna().any().values[0]:
dataset_y = dataset_y.drop(dataset_x.index[np.where(dataset_x.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_x.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
index = [d for d in dataset_x.index if d in dataset_y.index]
dataset_x = dataset_x.loc[index]
dataset_y = dataset_y.loc[index]
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
dataset_x.to_csv(os.path.join(data_path, dataset_x_csv))
dataset_y.to_csv(os.path.join(data_path, dataset_y_csv))
joblib.dump(x_3d[project_id], os.path.join(data_path, dataset_cnn_pickle))
self.logger.info('Datasets saved for project %s', project['_id'])
def make_dataset_res_rabbitmq(self):
project = self.projects[0]
nwp_daily = self.stack_daily_nwps_rabbitmq(self.dates[0], self.path_nwp, self.nwp_model, project,
self.variables)
x = nwp_daily[0][project['_id']]
x_3d = nwp_daily[1][project['_id']]
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
if os.path.exists(os.path.join(data_path, 'dataset_columns_order.pickle')):
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res_online(self):
project = self.projects[0]
lats, longs = self.get_lats_longs()
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps_online)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = pd.DataFrame()
y = pd.DataFrame()
x_3d = np.array([])
for nwp in nwp_daily:
if nwp[1][project['_id']].shape[0] != 0:
x = pd.concat([x, nwp[0][project['_id']]])
x_3d = stack_3d(x_3d, nwp[2][project['_id']])
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
return dataset_x, x_3d
def make_dataset_res(self):
lats, longs = self.get_lats_longs()
nwp = self.stack_daily_nwps(self.dates[4], self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables)
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(self.stack_daily_nwps)(t, self.data, lats, longs, self.path_nwp, self.nwp_model, self.projects,
self.variables) for t in self.dates)
x = dict()
y = dict()
x_3d = dict()
for project in self.projects:
x[project['_id']] = pd.DataFrame()
y[project['_id']] = pd.DataFrame()
x_3d[project['_id']] = np.array([])
for nwp in nwp_daily:
for project in self.projects:
if project['_id'] in nwp[2].keys():
if nwp[2][project['_id']].shape[0] != 0:
x[project['_id']] = pd.concat([x[project['_id']], nwp[0][project['_id']]])
y[project['_id']] = pd.concat([y[project['_id']], nwp[1][project['_id']]])
x_3d[project['_id']] = stack_3d(x_3d[project['_id']], nwp[2][project['_id']])
self.logger.info('All Inputs stacked')
dataset_x_csv = 'dataset_X_test.csv' if self.is_for_test else 'dataset_X.csv'
dataset_y_csv = 'dataset_y_test.csv' if self.is_for_test else 'dataset_y.csv'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
for project in self.projects:
project_id = project['_id']
data_path = project['static_data']['path_data']
dataset_x = x[project_id]
dataset_y = y[project_id]
if dataset_y.isna().any().values[0]:
dataset_x = dataset_x.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
if len(x_3d.shape) > 1:
x_3d = np.delete(x_3d, np.where(dataset_y.isna())[0], axis=0)
dataset_y = dataset_y.drop(dataset_y.index[np.where(dataset_y.isna())[0]])
index = [d for d in dataset_x.index if d in dataset_y.index]
dataset_x = dataset_x.loc[index]
dataset_y = dataset_y.loc[index]
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
else: # create the right order of the columns
corr = []
for f in range(dataset_x.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_x.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_x.columns[ind]
dataset_x = dataset_x[columns]
dataset_x.to_csv(os.path.join(data_path, dataset_x_csv))
dataset_y.to_csv(os.path.join(data_path, dataset_y_csv))
joblib.dump(x_3d[project_id], os.path.join(data_path, dataset_cnn_pickle))
self.logger.info('Datasets saved for project %s', project['_id'])
def create_sample_country(self, date, nwp, nwp_prev, nwp_next, lats_all, longs_all, model_type):
inp = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = | StringIO(csv) | pandas.compat.StringIO |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
##########################################################################
# Copyright (c) 2017-2018 <NAME>. All rights reserved. #
# Use of this source code is governed by a BSD-style license that can be #
# found in the LICENSE file. #
##########################################################################
import os
from io import StringIO
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import colorlog
_log = colorlog.getLogger('emg_analyzer')
class Emg:
"""
Class to handle **E**\ lectro **M**\ yo **G**\ ram.
From *.emt* files.
"""
def __init__(self):
"""
Initialization of Emg object.
"""
self.name = None
self.header = None
self.data = None
def __eq__(self, other):
"""
:param other:
:return:
"""
return self.header == other.header and self.data == other.data
def parse(self, emt_file):
"""
Parse emt_file to fill this object.
:param emt_file: the file to parse
:type emt_file: file object
"""
self.name = os.path.splitext(os.path.basename(emt_file.name))[0]
self.header = EmgHeader()
self.header.parse(emt_file)
self.data = EmgData()
self.data.parse(emt_file, self.header.tracks_names)
if self.header.frames != self.data.frames:
raise RuntimeError("The number of Frames in header '{}' "
"does not match data frames '{}'.".format(self.header.frames,
self.data.frames))
def norm_by_track(self, dyn_cal=None):
"""
Normalize each Voltage records.
Each record is normalize independently following the formula below.
.. math::
zi = xi - min(x) / max(x) - min(x)
where x=(x1,...,xn) and zi is now your with normalized data.
"""
new_emg = Emg()
new_header = self.header.copy()
new_data = self.data.norm_by_track(self.header.tracks_names, dyn_cal=dyn_cal)
new_emg.header = new_header
new_emg.data = new_data
return new_emg
def norm(self):
"""
Compute a new Emg where tracks are normalized (all together) following the formula below
.. math::
zi = xi - min(x) / max(x) - min(x)
where x=(x1,...,xn) and zi is now your matrix with normalized data.
:return: a new Emg
:rtype: :class:`Emg` object
"""
new_emg = Emg()
new_header = self.header.copy()
new_data = self.data.norm()
new_emg.header = new_header
new_emg.data = new_data
return new_emg
def group_by_track(self, emg_list):
merge = {}
emg_list.insert(0, self)
for emg in emg_list:
for track in emg.header.tracks_names:
if track in merge:
merge[track].append(emg)
else:
merge[track] = [emg]
merged_emg = []
for new_emg_name in merge:
new_emg = Emg()
new_emg.name = new_emg_name
new_emg.header = self.header.copy()
new_emg.header.tracks_nb = len(merge[new_emg_name])
emg_2_group = {emg.name: emg.data for emg in merge[new_emg_name]}
new_emg.data = EmgData.group_track(new_emg_name, emg_2_group)
new_emg.header.tracks_names = new_emg.data.tracks
new_emg.header.frames = new_emg.data.frames
merged_emg.append(new_emg)
return merged_emg
def describe(self):
"""
:return:
"""
return self.data.describe()
def select(self, rest_matrix, coef=1.5):
"""
select data greater that
:param rest_matrix:
:param coef:
:return:
"""
new_emg = Emg()
new_header = self.header.copy()
new_data, thresholds = self.data.select(rest_matrix, coef=coef)
new_header.frames = new_data.frames
new_header.start_time = self.data.start_time
new_emg.header = new_header
new_emg.data = new_data
return new_emg, thresholds
def to_emt(self, file=None):
"""
Write the emg in .emt file format
:param file: Optional buffer to write to.
If None is provided the result is returned as a string.
:type file: StringIO-like or file-like object.
:returns: The emg formatted to *'.emt'* format
:rtype: file-like object or string
"""
buffer = file if file is not None else StringIO()
self.header.to_tsv(file=buffer)
self.data.to_tsv(file=buffer)
if file is None:
buffer = buffer.getvalue()
return buffer
def to_plot(self, out_dir=None, y_scale_auto=False):
"""
:param out_dir:
:return:
"""
figs_path = []
ymin = self.data.min
ymax = self.data.max
for track in self.data.tracks:
fig_name = "{}_{}.{}".format(self.name, track, 'png')
transtab = str.maketrans('/ :', '___')
fig_name = fig_name.translate(transtab)
_log.info("Compute figure: " + fig_name)
with plt.style.context('dark_background'):
plt.close('all')
fig, ax = plt.subplots()
width, heigth = fig.get_size_inches()
fig.set_size_inches([width * 2, heigth])
x = self.data['Time']
y = self.data[track]
ax.plot(x, y,
color='red',
linewidth=1,
label=self.name)
ax.set(xlabel='time (s)',
ylabel='voltage ({})'.format(self.header.unit),
title=track)
if not y_scale_auto:
ax.set_ylim([ymin, ymax])
ax.grid(color='darkgrey', linestyle='--', linewidth=1)
plt.legend()
fig_path = os.path.join(out_dir, fig_name)
fig.savefig(fig_path)
figs_path.append(fig_path)
del fig
return figs_path
class EmgHeader:
"""
Class to handle the header of an *.emt* file
"""
_template = """BTS ASCII format
Type: \t{type}
Measure unit: \t{unit}
Tracks: \t{tracks_nb}
Frequency: \t{freq}
Frames: \t{frames}
Start time: \t{start_time:.3f}
Frame\t Time\t{tracks_names}\t
"""
def __init__(self):
"""
Initialization of EmgHeader object
"""
self.type = None
self.unit = None
self.tracks_nb = None
self.freq = None
self.frames = None
self.start_time = None
self.tracks_names = None
def __eq__(self, other):
for attr, val in self.__dict__.items():
if getattr(other, attr) != val:
return False
return True
def copy(self):
"""
:return: a deep copy of the header
:rtype: a :class:`EmgHeader` object.
"""
new_header = EmgHeader()
for attr, value in self.__dict__.items():
setattr(new_header, attr, value)
return new_header
def parse(self, emt_file):
"""
Parse emt_file to fill this object
:param emt_file: the file to parse
:type emt_file: file object
"""
for line in emt_file:
if line.startswith('BTS'):
pass
elif line.startswith('Type:'):
self.type = line.split('\t')[1].strip()
elif line.startswith('Measure unit:'):
self.unit = line.split('\t')[1].strip()
elif line.startswith('Tracks:'):
self.tracks_nb = int(line.split('\t')[1].strip())
elif line.startswith('Frequency:'):
self.freq = line.split('\t')[1].strip()
elif line.startswith('Frames:'):
self.frames = int(line.split('\t')[1].strip())
elif line.startswith('Start time:'):
self.start_time = float(line.split('\t')[1].strip())
elif line.startswith(' Frame\t'):
columns = line.strip().split('\t')
columns = columns[2:]
self.tracks_names = [c.split(':')[1].split('~')[0].strip() for c in columns]
if self.tracks_names[0].startswith('Dev1/'):
self.tracks_names = [c.replace('Dev1/', '') for c in self.tracks_names]
break
else:
continue
assert all([v is not None for v in self.__dict__.values()]), \
"ERROR during parsing '{}': {}".format(emt_file.name,
', '.join([k for k, v in self.__dict__.items() if v is None]))
assert len(self.tracks_names) == self.tracks_nb,\
"ERROR during parsing '{}': tracks number '{}'" \
" does not match tracks: {}.".format(emt_file.name,
self.tracks_nb,
", ".join(self.tracks_names))
def to_tsv(self, file=None):
"""
Write this header in tsv according the *.emt* file format
:param file: Optional buffer to write to.
If None is provided the result is returned as a string.
:type file: StringIO-like or file-like object.
:returns: The header formatted into *'.emt'* format
:rtype: file-like object or string
"""
buffer = file if file is not None else StringIO()
fields = {k: v for k, v in self.__dict__.items()}
fields['tracks_names'] = '\t'.join(['Voltage:{}'.format(m) for m in self.tracks_names])
buffer.write(self._template.format(**fields))
if file is None:
buffer = buffer.getvalue()
return buffer
class EmgData:
"""
Class to handle the data of an *.emt* file
"""
def __init__(self):
"""
Initialization of EmgData object.
"""
self.data = None
def __eq__(self, other):
if other.data.shape != self.data.shape:
return False
if all(other.data.columns == self.data.columns):
return np.isclose(self.data, other.data).all()
else:
return False
def parse(self, emt_file, tracks):
"""
Parse emt_file to fill this object.
:param emt_file: the file to parse
:type emt_file: file object
:param tracks: The list of the tracks to parse.
:type tracks: List of string
"""
columns = ['Frame', 'Time'] + tracks
self.data = pd.read_table(emt_file,
sep='\t',
names=columns,
header=None,
skip_blank_lines=True,
index_col=0,
usecols=list(range(len(columns)))
)
@property
def tracks(self):
"""
:return: The list of the tracks in this EMG.
:rtype: List of string
"""
if self.data.columns[0].upper() == "TIME":
return list(self.data.columns)[1:]
else:
# this is probably the results of a concatanation
# time was removed because it has no sense
return list(self.data.columns)
@property
def frames(self):
"""
:return: The number of frames
:rtype: int
"""
return len(self.data)
@property
def max(self):
time, data = self._split_data()
return data.max().max()
@property
def min(self):
time, data = self._split_data()
return data.min().min()
@property
def start_time(self):
return self.data['Time'][0]
def _split_data(self):
"""
:return: split data in 2 DataFrame
the first DataFrame contain time and the second one correspond to tracks.
:rtype: tuple of 2 :class:`pd.DataFrame` object
"""
if self.data.columns[0].upper() == 'TIME':
time = self.data.iloc[:, 0:1]
data = self.data.iloc[:, 1:]
return time, data
else:
raise RuntimeError("The first column is not Time: abort splitting")
@staticmethod
def _new_data(data):
"""
:param data: DataFrame to put in EmgData
:type data: :class:`pd.DataFrame` object
:return: new EmgData
:rtype: :class:`pd.DataFrame` object
"""
new_data = EmgData()
new_data.data = data
return new_data
def __getitem__(self, track_name):
"""
:param str track_name:
:return: return all frames corresponding to the track track_name
:rtype: :class:`pandas.Serie` object
"""
return self.data[track_name]
def get_frames(self, start, stop):
"""
:param int start:
:param int stop:
:return: the frames between start ans stop included
"""
return self.data.loc[start:stop]
def norm_by_track(self, tracks_names, dyn_cal=None):
"""
Compute a new EmgData where each track is normalized
independently following the formula below
.. math::
zi = xi - min(x) / max(x) - min(x)
where x=(x1,...,xn) and zi is now your matrix with normalized data.
:param tracks_names: The name of the tracks to normalize.
:type tracks_names: list of string. each string must match to a data column.
:param dyn_cal: The min and max for each muscle to normalize
The data Frame must have the following structure
muscle1 muscle2 muscle3 ...
min 0 1.1 ...
max 10.1 12.3 ...
:type dyn_cal: :class:`pandas.DataFrame` object
:return: a new EmgData
:rtype: :class:`EmgData` object
"""
time, data = self._split_data()
for col in tracks_names:
track = data[col]
if dyn_cal is not None:
v_min = dyn_cal[col]['min']
v_max = dyn_cal[col]['max']
else:
v_min = track.min()
v_max = track.max()
_log.debug("vmin = " + str(v_min))
_log.debug("vmax = " + str(v_max))
track -= v_min # do it in place on data frame
track /= (v_max - v_min)
data = data.round(decimals=3)
data = pd.concat([time, data], axis=1)
return self._new_data(data)
def norm(self, v_min=None, v_max=None):
"""
Compute a new EmgData where tracks are normalized following the formula below
.. math::
zi = xi - min(x) / max(x) - min(x)
where x=(x1,...,xn) and zi is now your matrix with normalized data.
:param float v_min: The min value to use to normalize, if None use the min of the matrix.
:param float v_max: The max value to use to normalize, if None use the max of the matrix.
:return: a new EmgData
:rtype: :class:`EmgData` object
"""
time, data = self._split_data()
if v_min is None:
v_min = data.min().min()
_log.debug("v_min = " + str(v_min))
data -= v_min
if v_max is None:
v_max = data.max().max()
_log.debug("v_max = " + str(v_max))
data /= v_max
data = data.round(decimals=3)
data = pd.concat([time, data], axis=1)
return self._new_data(data)
def describe(self):
"""
:return: basic statistics which describe each columns except time.
:rtype: :class:`pandas.dataFrame` object
"""
return self.data.iloc[:, 1:].describe()
def select(self, rest_matrix, coef=1.5):
"""
:param float threshold:
:return:
"""
# split cols
cols = self.data.columns
filtered_cols = []
thresholds = {}
for col in cols[1:]:
# The first col should be Time
c = self.data[col]
threshold = rest_matrix[col]['mean'] + (rest_matrix[col]['std'] * coef)
thresholds[col] = threshold
s = c[c > threshold]
filtered_cols.append(s)
new_cols = pd.concat(filtered_cols, axis=1)
sel_time = self.data['Time']
new_df = pd.concat([sel_time, new_cols], axis=1)
return self._new_data(new_df), thresholds
def to_tsv(self, file=None, header=False):
"""
Write this data in tsv according the *.emt* file format
:param file: Optional buffer to write to.
If None is provided the result is returned as a string.
:type file: StringIO-like or file-like object.
:param header: boolean or list of string, default False
Write out the column names.
If a list of strings is given it is assumed
to be aliases for the column names.
:type header: boolean
:returns: The header formatted into *'.emt'* format
:rtype: file-like object or string
"""
buffer = file if file is not None else StringIO()
self.data.to_csv(path_or_buf=buffer,
header=header,
sep='\t',
float_format='%.3f',
na_rep='NaN')
if file is None:
buffer = buffer.getvalue()
return buffer
@staticmethod
def group_track(track, emg_2_group):
"""
:param emg_2_group: the data where to extract track and merge
:param track: dict {'name': EmgData}
:return: new EmgData where all tracks come from
EmgData emg_2_group in named 'track' ::
exp1: Frame Time A B
0 0 1 10
1 1 2 20
2 2 3 30
exp2: Frame Time A B
0 0 1.2 10.2
1 1 2.2 20.2
2 2 3.2 30.2
A: Frame Time exp1 exp2
0 0 1 1.2
1 1 2 2.2
2 2 3 3.2
:rtype: :class:`EmgData` object
"""
one_emg = next(iter(emg_2_group.values()))
data = one_emg.data['Time']
series = []
for name, emg in emg_2_group.items():
# s is a pandas.Serie
s = emg.data[track]
s.name = name
series.append(s)
series.insert(0, data)
data = pd.concat(series, axis=1)
return EmgData._new_data(data)
def desc_summary(sel_summary, index_names=('Experiment', 'Muscle')):
"""
:param str emg_sel: The path of selection to summarize
:param index_names:
:return:
"""
summaries = []
for summary_path in sel_summary:
with open(summary_path) as sel_file:
header = next(sel_file)
if not header.startswith('# Summary of activities for condition:'):
raise RuntimeError("{} is not a selection summary file".format(summary_path))
mvt = os.path.splitext(os.path.basename(summary_path))[0]
mvt = mvt.replace('FiltradoRectificado_norm', '')
summary = pd.read_table(sel_file, comment='#', index_col=0)
mi = | pd.MultiIndex.from_tuples([(mvt, muscle) for muscle in summary.index], names=index_names) | pandas.MultiIndex.from_tuples |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df), expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df), expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
def test_notnull():
assert (compute(nt.name.notnull(), ndf) == ndf.name.notnull()).all()
def test_isnan():
assert (compute(nt.amount.isnan(), ndf) == ndf.amount.isnull()).all()
@pytest.mark.parametrize('keys', [[1], [2, 3]])
def test_isin(keys):
expr = t[t.id.isin(keys)]
result = compute(expr, df)
expected = df.loc[df.id.isin(keys)]
tm.assert_frame_equal(result, expected)
def test_nunique_table():
expr = t.nunique()
result = compute(expr, df)
assert result == len(df.drop_duplicates())
def test_str_concat():
a = | Series(('a', 'b', 'c')) | pandas.Series |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = | IntervalIndex(index, copy=False) | pandas.IntervalIndex |
#####################################################################
####### Dash Plotly with Bootstrap Components #########
#####################################################################
import os
import pandas as pd
import numpy as np
from datetime import datetime
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import app
import plotly.graph_objects as go
import plotly.express as px
from cleaning_datas_docks import cargo, last_file_cargo, vessels, last_file_vessels, seacode, config
#Plot
years = vessels.groupby("date")[["counter"]].sum().fillna(0)[1:].index
vessels_number = go.Figure()
vessels_number.add_trace(go.Bar(x=years,
y=vessels[vessels["cal_place_code"]=="FRFOS"].groupby("date")[["counter"]].sum().fillna(0)["counter"],
name='FOS',
visible=True,
marker_color='#4b6584'
))
vessels_number.add_trace(go.Bar(x=years,
y=vessels[vessels["cal_place_code"]=="FRMRS"].groupby("date")[["counter"]].sum().fillna(0)["counter"],
name='MRS',
visible=True,
marker_color='#2d98da'
))
vessels_number.add_trace(go.Bar(x=years,
y=vessels[vessels["cal_place_code"]=="FRFOS"].groupby("date")[["counter"]].sum().fillna(0).diff()[1:]["counter"],
name='FOS',
visible=False,
marker_color='#4b6584'
))
vessels_number.add_trace(go.Bar(x=years,
y=vessels[vessels["cal_place_code"]=="FRMRS"].groupby("date")[["counter"]].sum().fillna(0).diff()[1:]["counter"],
name='MRS',
visible=False,
marker_color='#2d98da'
))
vessels_number.update_layout(
# highlight from today to last forecast
shapes=[
dict(
type="rect",
xref="x",
yref="paper",
x0=pd.Timestamp.today().strftime("%Y-%m-16"),
y0=0,
x1=years.max()+pd.Timedelta("30 days"),
y1=1,
fillcolor="#fd9644",
opacity=0.5,
layer="below",
line_width=0,
),
],
#Graphs settings
margin={"b":0},
height=500,
template="plotly_white",
title='Evolution of the number of ships by months in the port of Marseille/Fos',
yaxis=dict(
title='Number of vessels',
),
barmode='group',
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0.5)',
bordercolor='rgba(255, 255, 255, 0)'
),
)
# Add range slider
vessels_number.update_layout(
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(count=5,
label="5y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
#Add buttons
vessels_number.update_layout(
updatemenus=[
dict(
type = "buttons",
direction = "left",
buttons=list([
dict(
args=["barmode", "group"],
label="Groups",
method="relayout"
),
dict(
args=["barmode", "stack"],
label="Stacks",
method="relayout"
)
]),
pad={"r": 0, "t": 0},
showactive=True,
x=0.5,
xanchor="left",
y=1,
yanchor="bottom"
),
dict(
type="buttons",
direction="left",
pad={"r": 0, "t": 0},
showactive=True,
x=0.48,
xanchor="right",
y=1,
yanchor="bottom",
buttons=list([
dict(label="None",
method="update",
args=[{"visible": [True, True, False, False]}]),
dict(label="Diff",
method="update",
args=[{"visible": [False, False, True, True]}]),
]),
),
],
)
#Plot VESSELS numbers in months
vessels_trend=vessels[(vessels["cal_place_code"]=="FRMRS") | (vessels["cal_place_code"]=="FRFOS") ].groupby(["date","cal_place_code"], as_index=False)[["counter"]].sum().fillna(0)
#Keep if month < current month
vessels_trend=vessels_trend[(vessels_trend.date.dt.year < pd.Timestamp.today().year) | (vessels_trend.date.dt.month < pd.Timestamp.today().month)]
#Put months in full letters for a nice plotting
vessels_trend["monthofyear"]=vessels_trend.date.dt.month_name()
fig_months = go.Figure()
i=0
ls_buttons=[]
for elem in ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']:
for dock in vessels.cal_place_code.unique():
vessels_trend_month=vessels_trend[(vessels_trend.monthofyear==elem) & (vessels_trend.cal_place_code==dock)]
fig_months.add_trace(go.Bar(
x=vessels_trend_month.date.dt.year,
y=vessels_trend_month.counter,
text=round((vessels_trend_month.counter.diff()/vessels_trend_month.counter.shift())*100,2).astype(str)+"%",
textposition="outside",
name=dock,
hoverinfo='skip',
marker_color=("#2d98da" if dock=="FRMRS" else "#4b6584"),
visible=(True if elem == "January" else False),
))
fig_months.add_trace(go.Scatter(
x=vessels_trend_month.date.dt.year,
y=vessels_trend_month.counter,
name=dock,
hoverinfo="skip",
visible=(True if elem == "January" else False),
line=dict(color=("#45aaf2" if dock=="FRMRS" else "#778ca3"), dash="dash")
))
#Prepare buttons layout
ls_visible=pd.Series([False]*12*4)
ls_visible.update(pd.Series([True]*4, index=[i,i+1,i+2,i+3]))
ls_buttons.append(
dict(label=elem, method="update",
args=[{"visible": ls_visible}]
),
)
i=i+4
fig_months.update_layout(
updatemenus=[
dict(
type="buttons",
direction="right",
active=0,
x=1.1,
y=1,
xanchor="right",
yanchor="bottom",
buttons=list(ls_buttons),
),
],
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(count=5,
label="5y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date",
),
title="Vessels evolution by months",
yaxis=dict(title='Number of vessels'),
#showlegend=False,
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0.5)',
bordercolor='rgba(255, 255, 255, 0)'
),
template="plotly_white",
barmode='group',
)
#Plot expeditions
fig_exp = go.Figure()
for i in range(0,2):
if i==0:
visibility=False
else:
visibility=True
fig_exp.add_trace(go.Bar(x=cargo[cargo.export==i].groupby("date").sum().index,
y=cargo[cargo.export==i].groupby("date").sum().FOS,
name='FOS',
visible=visibility,
marker_color='#4b6584'
))
fig_exp.add_trace(go.Bar(x=cargo[cargo.export==i].groupby("date").sum().index,
y=cargo[cargo.export==i].groupby("date").sum().MRS,
name='MRS',
visible=visibility,
marker_color='#2d98da'
))
fig_exp.update_layout(
height=500,
margin={"b":0},
template="plotly_white",
title='Number of expeditions imports-exports at Marseille/Fos docks',
yaxis=dict(
title='Quantities',
),
barmode='group',
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0.5)',
bordercolor='rgba(255, 255, 255, 0)'
),
)
# Add buttons
fig_exp.update_layout(
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(count=5,
label="5y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date"
)
)
fig_exp.update_layout(
updatemenus=[
dict(
type = "buttons",
direction = "left",
buttons=list([
dict(
args=["barmode", "group"],
label="Groups",
method="relayout"
),
dict(
args=["barmode", "stack"],
label="Stacks",
method="relayout"
)
]),
pad={"r": 0, "t": 0},
showactive=True,
x=0.5,
xanchor="left",
y=1,
yanchor="bottom"
),
dict(
type="buttons",
direction="left",
pad={"r": 0, "t": 0},
showactive=True,
x=0.48,
xanchor="right",
y=1,
yanchor="bottom",
buttons=list([
dict(label="Exports",
method="update",
args=[{"visible": [False, False, True, True]}]),
dict(label="Imports",
method="update",
args=[{"visible": [True, True, False, False]}]),
]),
),
],
)
#Plot cargo import qte in months
cargo_trend=cargo.groupby(["date","export"], as_index=False).sum()
#Put months in full letters for a nice plotting
cargo_trend["monthofyear"]=cargo_trend.date.dt.month_name()
fig_cargo_months_imp = go.Figure()
i=0
ls_buttons=[]
for elem in ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']:
for dock in ["MRS","FOS"]:
cargo_trend_month=cargo_trend[(cargo_trend.monthofyear==elem) & (cargo_trend.export==0)]
fig_cargo_months_imp.add_trace(go.Bar(
x=cargo_trend_month.date.dt.year,
y=cargo_trend_month[dock],
text=round((cargo_trend_month[dock].diff()/cargo_trend_month[dock].shift())*100,2).astype(str)+"%",
textposition="outside",
name=dock,
hoverinfo='skip',
marker_color=("#2d98da" if dock=="MRS" else "#4b6584"),
visible=(True if elem == "January" else False),
))
fig_cargo_months_imp.add_trace(go.Scatter(
x=cargo_trend_month.date.dt.year,
y=cargo_trend_month[dock],
name=dock,
hoverinfo="skip",
visible=(True if elem == "January" else False),
line=dict(color=("#45aaf2" if dock=="MRS" else "#778ca3"), dash="dash")
))
#Prepare buttons layout
ls_visible=pd.Series([False]*12*4)
ls_visible.update(pd.Series([True]*4, index=[i,i+1,i+2,i+3]))
ls_buttons.append(
dict(label=elem, method="update",
args=[{"visible": ls_visible}]
),
)
i=i+4
fig_cargo_months_imp.update_layout(
updatemenus=[
dict(
type="buttons",
direction="right",
active=0,
x=1.1,
y=1,
xanchor="right",
yanchor="bottom",
buttons=list(ls_buttons),
),
],
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(count=5,
label="5y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date",
),
title="Cargo import evolution by months",
yaxis=dict(title='Quantity of goods imported'),
#showlegend=False,
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0.5)',
bordercolor='rgba(255, 255, 255, 0)'
),
template="plotly_white",
barmode='group',
)
#Plot cargo exports qte in months
cargo_trend=cargo.groupby(["date","export"], as_index=False).sum()
#Put months in full letters for a nice plotting
cargo_trend["monthofyear"]=cargo_trend.date.dt.month_name()
fig_cargo_months_exp = go.Figure()
i=0
ls_buttons=[]
for elem in ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']:
for dock in ["MRS","FOS"]:
cargo_trend_month=cargo_trend[(cargo_trend.monthofyear==elem) & (cargo_trend.export==1)]
fig_cargo_months_exp.add_trace(go.Bar(
x=cargo_trend_month.date.dt.year,
y=cargo_trend_month[dock],
text=round((cargo_trend_month[dock].diff()/cargo_trend_month[dock].shift())*100,2).astype(str)+"%",
textposition="outside",
name=dock,
hoverinfo='skip',
marker_color=("#2d98da" if dock=="MRS" else "#4b6584"),
visible=(True if elem == "January" else False),
))
fig_cargo_months_exp.add_trace(go.Scatter(
x=cargo_trend_month.date.dt.year,
y=cargo_trend_month[dock],
name=dock,
hoverinfo="skip",
visible=(True if elem == "January" else False),
line=dict(color=("#45aaf2" if dock=="MRS" else "#778ca3"), dash="dash")
))
#Prepare buttons layout
ls_visible=pd.Series([False]*12*4)
ls_visible.update(pd.Series([True]*4, index=[i,i+1,i+2,i+3]))
ls_buttons.append(
dict(label=elem, method="update",
args=[{"visible": ls_visible}]
),
)
i=i+4
fig_cargo_months_exp.update_layout(
updatemenus=[
dict(
type="buttons",
direction="right",
active=0,
x=1.1,
y=1,
xanchor="right",
yanchor="bottom",
buttons=list(ls_buttons),
),
],
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=6,
label="6m",
step="month",
stepmode="backward"),
dict(count=1,
label="YTD",
step="year",
stepmode="todate"),
dict(count=1,
label="1y",
step="year",
stepmode="backward"),
dict(count=5,
label="5y",
step="year",
stepmode="backward"),
dict(step="all")
])
),
type="date",
),
title="Cargo export evolution by months",
yaxis=dict(title='Quantity of goods exported'),
#showlegend=False,
legend=dict(
x=0,
y=1.0,
bgcolor='rgba(255, 255, 255, 0.5)',
bordercolor='rgba(255, 255, 255, 0)'
),
template="plotly_white",
barmode='group',
)
colors_palet={'ITALY':"#fad390", 'TURKEY':"#f8c291", 'SPAIN':"#ff6b81", 'FRANCE':"#82ccdd", 'SLOVENIA':"#b8e994", 'MOROCCO':"#f6b93b",
'PORTUGAL':"#e55039", 'TUNISIA':"#4a69bd", 'ALGERIA':"#78e08f", 'UNITED STATES OF AMERICA':"#60a3bc",
'ISRAEL':"#747d8c", 'MALTA':"#70a1ff", 'GREECE':"#786fa6", 'BENIN':"#D980FA", 'JORDAN':"#B53471", 'EGYPT':"#6F1E51", 'SENEGAL':"#C4E538",
'ROMANIA':"#fa983a", 'NIGERIA':"#eb2f06", 'BELGIUM':"#1e3799", 'LIBYA':"#3c6382", 'NETHERLANDS':"#38ada9", 'CANADA':"#e58e26",
"CÔTE D'IVOIRE":"#b71540", 'GHANA':"#0c2461", 'GIBRALTAR':"#0a3d62", 'SINGAPORE':"#079992", 'GERMANY':"#fad390",
'FRENCH GUIANA':"#f8c291"}
#Fig last pos
#Link informations previous and next port
vessels=vessels.merge(seacode, left_on="cal_last_place_code", right_on="CODE", how="left").rename(columns={"Country":"country_last", "Port":"port_last", "latitude":"latitude_last", "longitude":"longitude_last"})
vessels=vessels.merge(seacode, left_on="cal_next_place_code", right_on="CODE", how="left").rename(columns={"Country":"country_next", "Port":"port_next", "latitude":"latitude_next", "longitude":"longitude_next"})
vessels=vessels.rename(columns={"CODE_x":"CODE_last", "CODE_y":"CODE_next"})
#Groups for plot
vessels_groups=vessels.groupby([vessels.date.dt.year, vessels.date.dt.month, "country_last","port_last"], as_index=False).agg({'counter': 'sum', 'cal_diff': 'mean', 'date':'max'})
#Remove if obs < 2018 and if date < today
vessels_groups=vessels_groups[(vessels_groups.date > "2018-01-01") & (vessels_groups.date.dt.date <= pd.Timestamp.today())]
#Manipulation to get full horizontal bar chart
vessels_groups=vessels_groups.merge(vessels_groups.groupby("date", as_index=False)[["date","counter"]].sum(), left_on="date", right_on="date", how="left").rename(columns={"counter_x":"counter","counter_y":"sum_day"})
vessels_groups["counter_rel"]=vessels_groups.counter/vessels_groups.sum_day
fig_last_pos = px.bar(vessels_groups, x="counter_rel", color_discrete_map=colors_palet, y="date", color="country_last", title="Previous position of the ship", template="plotly_white", orientation='h')
fig_last_pos.update_layout(
height=550,
yaxis=dict(
title='date',
),
xaxis=dict(
title='distribution',
),
)
#Fig next pos
#Groups for plot
vessels_groups=vessels.groupby([vessels.date.dt.year, vessels.date.dt.month, "country_next","port_next"], as_index=False).agg({'counter': 'sum', 'cal_diff': 'mean', 'date':'max'})
#Remove if obs < 2018 and if date < today
vessels_groups=vessels_groups[(vessels_groups.date > "2018-01-01") & (vessels_groups.date.dt.date <= | pd.Timestamp.today() | pandas.Timestamp.today |
# This file is part of the
# Garpar Project (https://github.com/quatrope/garpar).
# Copyright (c) 2021, 2022, <NAME>, <NAME> and QuatroPe
# License: MIT
# Full Text: https://github.com/quatrope/garpar/blob/master/LICENSE
# =============================================================================
# IMPORTS
# =============================================================================
from numpy import exp
from garpar.optimize import BlackLitterman, Markowitz, OptimizerABC
from garpar.optimize import mean_historical_return, sample_covariance
from garpar.core import Portfolio
import pandas as pd
import pandas.testing as pdt
import pytest
# =============================================================================
# TESTS WRAPPER FUNCTIONS
# =============================================================================
def test_mean_historical_return():
pf = Portfolio.from_dfkws(
df=pd.DataFrame(
{
"stock0": [1.11, 1.12, 1.10, 1.13, 1.18],
"stock1": [10.10, 10.32, 10.89, 10.93, 11.05],
},
),
entropy=0.5,
window_size=5,
)
result = mean_historical_return(pf)
expected = pd.Series({"stock0": 46.121466, "stock1": 287.122362})
expected.index.name = "Stocks"
| pdt.assert_series_equal(result, expected) | pandas.testing.assert_series_equal |
import pandas as pd
from scipy.io.arff import loadarff
def data_albrecht():
raw_data = loadarff("../data_experiment/classic/albrecht.arff")
df_data = pd.DataFrame(raw_data[0])
new_alb = df_data.drop(columns=['FPAdj', 'RawFPcounts', 'AdjFP'])
return new_alb
def data_china():
raw_data = loadarff("../data_experiment/classic/china.arff")
df_data = pd.DataFrame(raw_data[0])
new_chn = df_data.drop(columns=['ID', 'AFP', 'Added', 'Changed', 'Deleted', 'PDR_AFP', 'PDR_UFP', 'NPDR_AFP'
, 'NPDU_UFP', 'Dev.Type', 'Duration', 'N_effort'])
return new_chn
def data_desharnais():
raw_data = loadarff("../data_experiment/classic/desharnais.arff")
df_data = pd.DataFrame(raw_data[0])
new_desh = df_data.drop(index=[37, 43, 65, 74],
columns=['Project', 'YearEnd', 'Envergure', 'PointsNonAjust', 'Language'])
columnsTitles = ['TeamExp', 'ManagerExp', 'Length', 'Transactions', 'Entities', 'PointsAdjust', 'Effort']
new_desh = new_desh.reindex(columns=columnsTitles)
new_desh = new_desh.drop(columns=['Length'])
return new_desh
def data_finnish():
raw_data = loadarff("../data_experiment/classic/finnish.arff")
df_data = pd.DataFrame(raw_data[0])
new_finn = df_data.drop(columns=['ID'])
columnsTitles = ['hw', 'at', 'FP', 'co', 'prod', 'lnsize', 'lneff', 'dev.eff.hrs.']
new_finn = new_finn.reindex(columns=columnsTitles)
new_finn = new_finn.drop(columns=['prod', 'lnsize', 'lneff'])
return new_finn
def data_isbsg10():
raw_data = [
[1, 1, 1, 1, 1, 1, 1, 225, 1, 1, 1, 1856],
[1, 1, 1, 1, 1, 2, 1, 599, 2, 1, 2, 10083],
[1, 1, 1, 2, 1, 2, 1, 333, 2, 1, 3, 5208],
[1, 1, 2, 3, 2, 3, 1, 748, 2, 2, 3, 1518],
[1, 1, 1, 1, 1, 4, 1, 158, 1, 1, 4, 3376],
[1, 1, 1, 1, 1, 2, 1, 427, 2, 1, 3, 5170],
[2, 2, 3, 4, 3, 5, 1, 461, 2, 3, 4, 12149],
[1, 1, 4, 3, 2, 3, 1, 257, 1, 2, 3, 452],
[1, 1, 1, 2, 3, 6, 1, 115, 1, 1, 4, 441],
[1, 1, 5, 3, 2, 3, 1, 116, 1, 4, 4, 112],
[1, 1, 1, 2, 1, 7, 1, 323, 2, 1, 3, 1530],
[1, 1, 1, 2, 1, 1, 1, 134, 1, 1, 3, 1807],
[1, 1, 1, 2, 1, 14, 1, 292, 1, 1, 3, 1087],
[2, 2, 4, 4, 1, 8, 1, 399, 2, 3, 3, 7037],
[1, 1, 1, 1, 1, 2, 1, 44, 3, 1, 4, 784],
[1, 1, 1, 2, 1, 9, 1, 298, 1, 1, 4, 3268],
[1, 1, 1, 2, 1, 2, 1, 66, 3, 1, 3, 800],
[1, 1, 6, 3, 2, 3, 1, 243, 1, 2, 4, 257],
[1, 1, 1, 4, 1, 10, 1, 1105, 4, 1, 5, 14453],
[1, 1, 4, 3, 2, 3, 1, 679, 2, 4, 4, 326],
[2, 2, 7, 5, 1, 4, 1, 303, 2, 3, 4, 8490],
[1, 1, 1, 2, 1, 1, 1, 147, 1, 1, 3, 672],
[1, 1, 7, 3, 2, 3, 1, 143, 1, 2, 3, 98],
[1, 1, 1, 2, 1, 11, 1, 614, 2, 1, 4, 3280],
[2, 2, 7, 4, 3, 5, 1, 183, 1, 3, 4, 7327],
[1, 1, 8, 3, 2, 3, 1, 138, 1, 2, 4, 87],
[1, 1, 1, 2, 3, 12, 1, 129, 1, 1, 3, 1261],
[1, 1, 1, 2, 1, 2, 1, 205, 1, 1, 3, 3272],
[1, 1, 1, 2, 1, 1, 1, 471, 2, 1, 3, 1464],
[1, 1, 1, 5, 1, 4, 1, 97, 3, 1, 3, 1273],
[1, 1, 3, 3, 2, 3, 1, 1371, 4, 2, 3, 2274],
[1, 1, 1, 4, 1, 2, 1, 291, 1, 1, 4, 1772],
[1, 1, 9, 3, 2, 3, 1, 995, 2, 2, 4, 614],
[1, 2, 4, 2, 3, 6, 2, 211, 1, 3, 4, 1021],
[2, 2, 10, 2, 3, 13, 2, 192, 1, 3, 4, 1806],
[2, 2, 10, 2, 3, 13, 2, 98, 3, 3, 4, 921],
[2, 2, 7, 4, 1, 14, 1, 112, 1, 3, 4, 2134]
]
df_isbsg10 = pd.DataFrame(raw_data, columns=['Data_Quality','UFP','IS','DP','LT','PPL','CA','FS','RS',
'Recording_Method','FPS','Effort'])
return df_isbsg10
def data_kemerer():
raw_data = loadarff("../data_experiment/classic/kemerer.arff")
df_data = | pd.DataFrame(raw_data[0]) | pandas.DataFrame |
"""
Collection of functions to prepare the master curve for the identification
of the Prony series parameters. Methods are provided to shift the raw
measurement data into a master curve and remove measurement outliers through
smoothing of the master curve.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
"""
--------------------------------------------------------------------------------
Methods to shift raw data into master curve based on shift factors
--------------------------------------------------------------------------------
"""
def pwr_y(x, a, b, e):
"""
Calculate the Power Law relation with a deviation term.
Parameters
----------
x : numeric
Input to Power Law relation.
a : numeric
Constant.
b : numeric
Exponent.
e : numeric
Deviation term.
Returns
-------
numeric
Output of Power Law relation.
Notes
-----
Power Law relation: :math:`y = a x^b + e`
"""
return a*x**b+e
def pwr_x(y, a, b, e):
r"""
Calculate the inverse Power Law relation with a deviation term.
Parameters
----------
y : numeric
Output of Power Law relation.
a : numeric
Constant.
b : numeric
Exponent.
e : numeric
Deviation term.
Returns
-------
numeric
Input to Power Law relation.
Notes
-----
Inverse Power Law relation: :math:`x=\left(\frac{y-e}{a}\right)^{\frac{1}{b}}`
"""
return ((y-e)/a)**(1/b)
def fit_pwr(xdata, ydata):
"""
Define bounds for curve fitting routine and fit power law.
Parameters
----------
xdata : array-like
x data to be fitted.
ydata : array-like
y data to be fitted.
Returns
-------
popt : array-like
Optimal values for the parameters.
pcov : 2D array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate.
See also
--------
scipy.optimize.curve_fit : Non-linear least squares fit to a function.
"""
bnd = ([-np.inf, -np.inf, -ydata.max()], [np.inf, np.inf, ydata.max()])
popt, pcov = curve_fit(pwr_y, xdata, ydata, bounds = bnd)
return popt, pcov
def fit_at_pwr(df_raw, gb_ref, gb_shift):
"""
Obtain shift factor between two measurement sets at different tempeatures.
The raw measurement data at each temperature level are fitted by a Power Law
function. These Power Law functions improve the robustness of the
shifting algorithm, because they functions smooth outliers and bridge
possible gaps between the data sets. The intersection of the functions
is calculated and used to obtain the shift factor.
Parameters
----------
df_raw : pandas.DataFrame
Contains the processed raw measurement data.
gb_ref : int
Dataframe 'Set' number of the reference measurement set.
gb_shift : int
Dataframe 'Set' number of the measurement set that is shifted.
Returns
-------
log_aT : numeric
The decadic logarithm of the shift factor between the two measurement
sets.
Notes
-----
In certain circumstances the equilibration time between measurements at
different temperature levels can be too short to reach a steady state
leading to errors in the first data point of the measurement set.
To account for such situation, tow Power law fits are conducted. The first
fit contains all data points and the second fit drops the first data point.
If dropping the data point increased the goodness of fit, this
Power Law fit is used to calculate the shift factor.
"""
modul = df_raw.modul
if df_raw.domain == 'freq':
_modul = '{}_stor'.format(modul)
elif df_raw.domain == 'time':
_modul = '{}_relax'.format(modul)
#Get data for the reference set and the set to be shifted
gb = df_raw.groupby('Set')
ref_xdata = gb.get_group(gb_ref)['f_set'].values
ref_ydata = gb.get_group(gb_ref)[_modul].values
shift_xdata = gb.get_group(gb_shift)['f_set'].values
shift_ydata = gb.get_group(gb_shift)[_modul].values
#Curve fit power law
ref_popt, ref_pcov = fit_pwr(ref_xdata, ref_ydata)
shift_popt, shift_pcov = fit_pwr(shift_xdata, shift_ydata)
#Check and remove first measurement point if outlier
ref_popt_rem, ref_pcov_rem = fit_pwr(ref_xdata[1:], ref_ydata[1:])
perr = np.sqrt(np.abs(np.diag(ref_pcov)))
perr_rem = np.sqrt(np.abs(np.diag(ref_pcov_rem)))
if all(perr_rem < perr):
ref_popt = ref_popt_rem
ref_xdata = ref_xdata[1:]
ref_ydata = ref_ydata[1:]
shift_popt_rem, shift_pcov_rem = fit_pwr(shift_xdata[1:], shift_ydata[1:])
perr = np.sqrt(np.abs(np.diag(shift_pcov)))
perr_rem = np.sqrt(np.abs(np.diag(shift_pcov_rem)))
if all(perr_rem < perr):
shift_popt = shift_popt_rem
shift_xdata = shift_xdata[1:]
shift_ydata = shift_ydata[1:]
#Calculate fit
ref_ydata_fit = pwr_y(ref_xdata, *ref_popt)
shift_ydata_fit = pwr_y(shift_xdata, *shift_popt)
#Get interpolation or extrapolation range
if ref_ydata_fit.max() > shift_ydata_fit.max():
#Ref is on top
top_xdata = ref_xdata
top_ydata = ref_ydata
top_popt = ref_popt
bot_xdata = shift_xdata
bot_ydata = shift_ydata
bot_popt = shift_popt
sign = 1
else:
#Shift is on top
top_xdata = shift_xdata
top_ydata = shift_ydata
top_popt = shift_popt
bot_xdata = ref_xdata
bot_ydata = ref_ydata
bot_popt = ref_popt
sign = -1
if top_ydata.min() < bot_ydata.max():
#overlap range
ymin = top_ydata.min()
ymax = bot_ydata.max()
else:
#gap range
ymin = bot_ydata.max()
ymax = top_ydata.min()
#Define three points along inter/extrapolation range
ymid = (ymin+ymax)/2
y = np.array([ymin, ymid, ymax])
#Compute average shift factor for the three points
x_top = pwr_x(y, *top_popt)
x_bot = pwr_x(y, *bot_popt)
#Calculate shift factor
log_aT = sign * np.log10(x_top/x_bot).mean()
return log_aT
def get_aT(df_raw, RefT):
"""
Get shift factors for each temperature level in the raw measurement data.
A reference temperature is specified for which the master curve is created.
Measurement sets below the desired reference temperatures are shifted to
lower frequencies (longer time periods), whereas measurement sets at
temperatures higher than the reference temperature are shifted to higher
frequencies (shorter time periods).
Parameters
----------
df_raw : pandas.DataFrame
Contains the processed raw measurement data.
RefT : int or float
Reference tempeature of the master curve in Celsius.
Returns
-------
df_aT : pandas.DataFrame
Contains the decadic logarithm of the shift factors 'log_aT'
and the corresponding temperature values 'T' in degree Celsius.
See also
--------
load.Eplexor_raw : Returns df_raw from Eplexor Excel file.
load.user_raw: Returns df_raw from csv file.
"""
#Create df_aT
Temp = []
for i, df_set in df_raw.groupby('Set'):
T = df_set['T_round'].iloc[0]
Temp.append(T)
if T == RefT:
idx = i
df_aT = | pd.DataFrame(Temp, columns=['T']) | pandas.DataFrame |
from sklearn.metrics.ranking import roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Activation
from imblearn.keras import balanced_batch_generator
from imblearn.under_sampling import NearMiss
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import shap
import tqdm
import os
class Trainer:
"""Manages the training of deep neural networks. For each species five models are trained, and the model which has
scored the highest AUC value on the testing dataset is subsequently saved to file. Using a random subset of the
test set the impact of the environmental layers is estimated and saved to an image file. The class also creates an
evaluation file containing various (average) performance metrics per species like accuracy, loss, AUC, etc.
:param oh: an Occurrence object: holds occurrence files and tables
:param gh: a GIS object: holds path and file names required for computation of gis data.
:param ch: a Config object: holds instance variables that determines the random seed, batch size, number of epochs,
number of nodes in each model layers and dropout for each model layer.
:param verbose: a boolean: prints a progress bar if True, silent if False
:return: Object. Used to train five models, and save the one with the highest AUC score to file.
Performed by calling class method train on Trainer object.
"""
def __init__(self, oh, gh, ch, verbose):
self.oh = oh
self.gh = gh
self.ch = ch
self.verbose = verbose
self.spec = ''
self.variables = []
self.test_loss = []
self.test_acc = []
self.test_AUC = []
self.test_tpr = []
self.test_uci = []
self.test_lci = []
self.best_model_auc = [0]
self.occ_len = 0
self.abs_len = 0
self.random_seed = self.ch.random_seed
self.batch = self.ch.batchsize
self.epoch = self.ch.epoch
self.model_layers = self.ch.model_layers
self.model_dropout = self.ch.model_dropout
def create_eval(self):
"""Creates a new eval file containing a basic column layout.
:return: None. Writes column layout to evaluation file. Overwrites previous evaluation file if present.
"""
if not os.path.isdir(self.ch.result_path + '/_DNN_performance'):
os.makedirs(self.ch.result_path + '/_DNN_performance')
with open(self.ch.result_path + '/_DNN_performance/DNN_eval.txt', 'w+') as file:
file.write(
"Species" + "\t" + "Test_loss" + "\t" + "Test_acc" + "\t" + "Test_tpr" + "\t" + "Test_AUC" + "\t" + "Test_LCI95%" + "\t" + "Test_UCI95%" + "\t" + "occ_samples" + "\t" + "abs_samples" + "\n")
file.close()
def create_input_data(self):
"""Loads data from file and performs final data preparations before training. Returns all input data for
training the model and determining variable importance.
:return: Tuple. Containing:
array 'X' an array containing all occurrences for a certain species;
array 'X_train' an array containing all training data for a certain species;
array 'X_test' an array containing all testing data for a certain species;
array 'y_train' an array holding all the labels (ground truth, in this case absent=0 / present=1) for the
training set;
array 'y_test' an array holding all the labels for the test set;
table 'test_set' pandas dataframe containing a copy of array 'X_test';
array 'shuffled_X_train' an array containing a random subset of the X_train data;
array 'shuffled_X_test' an array containing a random subset of the X_test data.
"""
np.random.seed(self.random_seed)
self.variables = self.gh.names.copy()
self.variables.remove("%s_presence_map" % self.spec)
table = pd.read_csv(self.gh.spec_ppa_env + '/%s_env_dataframe.csv' % self.spec)
table = table.drop('%s_presence_map' % self.spec, axis=1)
table = table.dropna(axis=0, how="any")
band_columns = [column for column in table.columns[1:len(self.gh.names)]]
X = []
y = []
for _, row in table.iterrows():
x = row[band_columns].values
x = x.tolist()
x.append(row["present/pseudo_absent"])
X.append(x)
df = pd.DataFrame(data=X, columns=band_columns + ["presence"])
df.to_csv(self.gh.root + '/filtered.csv', index=None)
self.occ_len = int(len(df[df["presence"] == 1]))
self.abs_len = int(len(df[df["presence"] == 0]))
X = []
y = []
band_columns = [column for column in df.columns[:-1]]
for _, row in df.iterrows():
X.append(row[band_columns].values.tolist())
y.append([1 - row["presence"], row["presence"]])
X = np.vstack(X)
y = np.vstack(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, stratify=y, random_state=self.random_seed)
test_set = pd.DataFrame(X_test)
test_set.rename(columns=dict(zip(test_set.columns[0:len(self.gh.names) - 1], self.variables)), inplace=True)
shuffled_X_train = X_train.copy()
np.random.shuffle(shuffled_X_train)
shuffled_X_train = shuffled_X_train[:1000]
shuffled_X_test = X_test.copy()
np.random.shuffle(shuffled_X_test)
shuffled_X_test = shuffled_X_test[:1000]
return X, X_train, X_test, y_train, y_test, test_set, shuffled_X_train, shuffled_X_test
def create_model_architecture(self, X):
"""Creates a model architecture based on instance variables 'model_layers' and 'models_dropout'. Default is a
model with 4 layers [250, 200, 150 and 100 nodes per layer] with dropout [0.5, 0.3, 0.5, 0.3 per layer]
:param X: Array. used to define the input dimensions (number of nodes in the first layer) of the model.
:return: Keras Model Object. Initialized model with a specific architecture ready for training.
"""
num_classes = 2
num_inputs = X.shape[1]
model = Sequential()
if len(self.model_layers) == len(self.model_dropout):
for l in range(len(self.model_layers)):
layer = Dense(self.model_layers[l], activation='relu', input_shape=(num_inputs,))
model.add(layer)
model.add(Dropout(self.model_dropout[l]))
out_layer = Dense(num_classes, activation=None)
model.add(out_layer)
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
def train_model(self, model, X_train, X_test, y_train, y_test):
"""Training a model to predict the presence or absence of a species. Various instance variables are used to
define how the model trains, like: batch size, random seed and number of epochs.
:param model: Keras Model Object. Initialized model ready for training.
:param X_train: Array. Contains training data.
:param X_test: Array. Contains testing data.
:param y_train: Array. Contains training (ground truth) labels.
:param y_test: Array. Contains testing (ground truth) labels.
:return: Tuple. Containing:
float 'AUC' performance metric between 0 and 1 (0 = 100% wrong, 1 = 100% right);
keras model 'model' a keras model with an identical architecture to the input variable 'model' but with trained
weights.
"""
training_generator, steps_per_epoch = balanced_batch_generator(X_train, y_train, sampler=NearMiss(),
batch_size=self.batch, random_state=self.random_seed)
model.fit_generator(generator=training_generator, steps_per_epoch=steps_per_epoch, epochs=self.epoch,
verbose=0)
score = model.evaluate(X_test, y_test, verbose=0)
predictions = model.predict(X_test)
fpr, tpr, thresholds = roc_curve(y_test[:, 1], predictions[:, 1])
len_tpr = int(len(tpr) / 2)
self.test_loss.append(score[0])
self.test_acc.append(score[1])
self.test_AUC.append(roc_auc_score(y_test[:, 1], predictions[:, 1]))
self.test_tpr.append(tpr[len_tpr])
AUC = roc_auc_score(y_test[:, 1], predictions[:, 1])
n_bootstraps = 1000
y_pred = predictions[:, 1]
y_true = y_test[:, 1]
bootstrapped_scores = []
rng = np.random.RandomState(self.random_seed)
for i in range(n_bootstraps):
indices = rng.randint(0, len(y_pred) - 1, len(y_pred))
if len(np.unique(y_true[indices])) < 2:
continue
score = roc_auc_score(y_true[indices], y_pred[indices])
bootstrapped_scores.append(score)
sorted_scores = np.array(bootstrapped_scores)
sorted_scores.sort()
ci_lower = sorted_scores[int(0.05 * len(sorted_scores))]
ci_upper = sorted_scores[int(0.95 * len(sorted_scores))]
self.test_lci.append(ci_lower)
self.test_uci.append(ci_upper)
return AUC, model
def validate_model(self, model, AUC, X_train, X_test, shuffled_X_train, shuffled_X_test, test_set):
"""Validate the model based on the AUC score. If the current models AUC is higher then previous version(s) of
the model is saved to file, and the feature importance for the current model is calculated, and subsequently
saved to image.
:param model: Keras Model. Model object trained using the class method train_model.
:param AUC: Float. Performance metric with a score between 0 and 1 (0 = 100% wrong, 1 = 100% right).
:param X_train: Array. Contains training data.
:param X_test: Array. Contains testing data.
:param shuffled_X_train: an array containing a random subset of the X_train data;
:param shuffled_X_test:an array containing a random subset of the X_test data;
:param test_set: pandas dataframe containing a copy of array 'X_test';
:return: None. Does not return value or object, instead saves a model to file if the AUC score is higher than
previous versions. If a model is saved to file the feature importance is tested and also saved to an image file.
"""
if AUC > self.best_model_auc[0]:
if not os.path.isdir(self.ch.result_path + '/%s' % self.spec):
os.makedirs(self.ch.result_path + '/%s' % self.spec, exist_ok=True)
self.best_model_auc[0] = AUC
model_json = model.to_json()
with open(self.ch.result_path + '/{}/{}_model.json'.format(self.spec, self.spec), 'w') as json_file:
json_file.write(model_json)
model.save_weights(self.ch.result_path + '/{}/{}_model.h5'.format(self.spec, self.spec))
if int(len(X_train)) > 5000:
explainer = shap.DeepExplainer(model, shuffled_X_train)
test_set = | pd.DataFrame(shuffled_X_test) | pandas.DataFrame |
"""
Test for utility class functionality
"""
from core.utility import Utility
import pandas as pd
import numpy as np
d1 = [4,3,2,1]
d2 = [1,2,3,4]
d3 = [2,np.nan,6,8]
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import itertools
import collections
def findDuplicates(N, L, MOI):
'''
This function takes as an input the number of cells (N), the library size (L)
and the average MOI for the virus and returns the number of duplicate cells.
'''
n_tags_per_cell = np.random.poisson(MOI, N)
cell_tags = np.empty(N, dtype=object)
rr = range(1, L + 1)
for i in range(N):
cell_tag_digits = np.random.randint(1, L + 1, size = n_tags_per_cell[i])
cell_tags[i] = list(cell_tag_digits)
num_duplicates = 0
seen = set()
for x in cell_tags:
if len(x) > 1:
srtd = tuple(sorted(set(x)))
if srtd not in seen:
seen.add(srtd)
else:
num_duplicates += 1
return num_duplicates
def runDuplicatesExperiment(n_experiments, N, L, MOI):
'''
Runs the experiment n_experiments times to initialize the cell population.
'''
n_experiments = int(n_experiments)
N = int(N)
L = int(L)
MOI = int(MOI)
duplicates_experiment = np.empty(n_experiments)
for i in range(int(n_experiments)):
duplicates_experiment[i] = findDuplicates(N, L, MOI)
#duplicates_experiment_normalized = duplicates_experiment/N * 100
duplicates_experiment_normalized = duplicates_experiment/N
return duplicates_experiment_normalized
def initialCellPopulation(N, L, MOI):
'''
Create the starting cell population with the CellTags.
'''
n_tags_per_cell = np.random.poisson(MOI, N)
cell_tags = np.empty(N, dtype=object)
for i in range(N):
cell_tag_digits = np.random.randint(1, L + 1, size = n_tags_per_cell[i])
cell_tags[i] = list(cell_tag_digits)
seen = set()
clones = {}
for x in cell_tags:
srtd = tuple(sorted(set(x)))
if srtd not in seen:
seen.add(srtd)
clones[srtd] = 1
else:
clones[srtd] += 1
return clones
def runClonalSimulation(N, L, MOI, division_rate, passage_rate, passage_fraction, sequence_time):
'''
Returns clonesDistribution:
1. First row is the number of cell tags in each clone
2. Each subsequent row is the number of cells in each clone
3. The number of columns is the number of different clones
'''
clones = initialCellPopulation(N, L, MOI)
clonesDistribution = np.zeros((int(sequence_time/division_rate) + 2, len(clones)), dtype=int)
x = 0
for seq in clones:
clonesDistribution[0][x] = len(seq)
clonesDistribution[1][x] = clones[seq]
x += 1
dividing_times = list(range(division_rate, (int(sequence_time/division_rate) + 1) * division_rate, division_rate))
passage_times = list(range(passage_rate, (int(sequence_time/passage_rate) + 1) * passage_rate, passage_rate))
all_times = dividing_times + passage_times
all_times = list(set(all_times))
all_times.sort()
y = 2
for i in range(len(all_times)):
if all_times[i] in dividing_times and all_times[i] in passage_times:
# Double the cells first
clonesDistribution[y][:] = clonesDistribution[y - 1][:] * 2
# Passage the Cells
clonesDistribution[y][:] = np.random.binomial(clonesDistribution[y][:], passage_fraction)
y += 1
else:
if all_times[i] in dividing_times:
clonesDistribution[y][:] = clonesDistribution[y - 1][:] * 2
y += 1
if all_times[i] in passage_times:
clonesDistribution[y - 1][:] = np.random.binomial(clonesDistribution[y - 1][:], passage_fraction)
return clonesDistribution
def meanClonePopulation(n_simulations, N, L, MOI, division_rate, passage_rate, passage_fraction, sequence_time):
mean_clones = np.zeros(n_simulations)
for i in range(n_simulations):
cDist = runClonalSimulation(N, L, MOI, division_rate, passage_rate, passage_fraction, sequence_time)
final_clone_distribution = cDist[-1][:]
mean_clones[i] = np.mean(final_clone_distribution)
return mean_clones
def meanSequencedCloneSize(n_simulations, N, L, MOI, division_rate, passage_rate, passage_fraction, sequence_time,
n_cells_sequenced, threshold_clone_size):
n_simulations = int(n_simulations)
N = int(N)
L = int(L)
MOI = int(MOI)
division_rate = int(division_rate)
passage_rate = int(passage_rate)
sequence_time = int(sequence_time)
n_cells_sequenced = int(n_cells_sequenced)
threshold_clone_size = int(threshold_clone_size)
mean_clones = np.zeros(n_simulations)
mean_clones_above_threshold = np.zeros(n_simulations)
for i in range(n_simulations):
cDist = runClonalSimulation(N, L, MOI, division_rate, passage_rate, passage_fraction, sequence_time)
df = | pd.DataFrame() | pandas.DataFrame |
from typing import List, Dict, Union
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
import h5py
def extract_result(results: Dict, key: str) -> pd.Series:
df = pd.concat({(int(res['hp_ix']), int(bs_ix), k): | pd.DataFrame(v, index=[0]) | pandas.DataFrame |
### 第一批数据:1:敏感语料(短语) 2:微博评论原文(senti100k,未处理),各6754条,测试集比例0.1
import pandas as pd
df_1 = pd.read_excel('/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/网络信息语料 文德 20210122.xlsx', sheet_name='测试集')
df_0 = pd.read_csv('/Users/leo/Data/项目数据/文德数慧-文本内容审核/分类实验/数据/weibo_senti_100k.csv')
df_0 = df_0.sample(n=6754).reset_index(drop=True)
assert len(df_0) == len(df_1)
data = pd.DataFrame(columns=['label','text'])
for i in range(len(df_0)):
label = 0
text = df_0.iloc[i]['review']
data = data.append( | pd.DataFrame({'label':[label],'text':[text]}) | pandas.DataFrame |
"""
Author: <EMAIL> / <EMAIL>
Purpose: ease OCT image access and analyses
"""
import pandas as pd
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import plotly.graph_objects as go
class TopconSegmentationData:
# extract thickness data from OCT segmentation file by Topcon Triton data collector
def __init__(self, oct_data, scan_number):
self.df = oct_data
self.scan_number = scan_number
(self.rnfl,
self.gclp,
self.gclpp,
self.retina,
self.choroid) = self.find_scan_data(self.df, self.scan_number)
self.scan_quality = self.get_quality(self.df, self.scan_number)
@staticmethod
def get_quality(df, scan_number):
data_no_col_idx = df.loc[0, :].index[df.loc[0, :].values == 'Data No.'][0]
idx = df.iloc[:, data_no_col_idx][df.iloc[:, data_no_col_idx].values == str(scan_number)].index[0]
quality_col_idx = df.loc[0, :].index[df.loc[0, :].values == 'TopQ Image Quality'][0]
return df.iloc[idx, quality_col_idx]
@staticmethod
def find_scan_data(df, scan_number):
data_no_col_idx = df.loc[0, :].index[df.loc[0, :].values == 'Data No.'][0]
idx = df.iloc[:, data_no_col_idx][df.iloc[:, data_no_col_idx].values == str(scan_number)].index[0]
# find rnfl and extract data
idx_rnfl = df.iloc[idx:, 0][df.iloc[idx:, 0].values == 'RNFL'].index[0]
rnfl = df.iloc[idx_rnfl + 1: idx_rnfl + 257, 1:].reset_index(drop=True)
rnfl.columns = [np.arange(0, 512)]
# find GCL+ aka gclp
idx_gclp = df.iloc[idx:, 0][df.iloc[idx:, 0].values == 'GCL+'].index[0]
gclp = df.iloc[idx_gclp + 1: idx_gclp + 257, 1:].reset_index(drop=True)
gclp.columns = [np.arange(0, 512)]
# find GCL+ aka gclpp
idx_gclpp = df.iloc[idx:, 0][df.iloc[idx:, 0].values == 'GCL++'].index[0]
gclpp = df.iloc[idx_gclpp + 1: idx_gclpp + 257, 1:].reset_index(drop=True)
gclpp.columns = [np.arange(0, 512)]
# find Retina
idx_retina = df.iloc[idx:, 0][df.iloc[idx:, 0].values == 'Retina'].index[0]
retina = df.iloc[idx_retina + 1: idx_retina + 257, 1:].reset_index(drop=True)
retina.columns = [np.arange(0, 512)]
# find Choroid
idx_choroid = df.iloc[idx:, 0][df.iloc[idx:, 0].values == 'Choroid'].index[0]
choroid = df.iloc[idx_choroid + 1: idx_choroid + 257, 1:].reset_index(drop=True)
choroid.columns = [np.arange(0, 512)]
return rnfl, gclp, gclpp, retina, choroid
@staticmethod
def create_averaging_figures(mean, std, max_data, min_data, px_id, quality, scan_type, scan_ids, fp):
# creating figure choroid thickness figures
# direct view
im_output_fn_d = (scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] +
'_mean_direct_view_choroid_thickness.png')
# # display choroid values
fig1 = go.Figure(data=[go.Surface(z=mean)])
fig1_title_name = ('PX_id: ' + str(px_id) + ', Scan Nr: ' + str(scan_ids) +
', Quality: ' + str(quality))
fig1.update_layout(title=fig1_title_name, autosize=False,
scene_camera_eye=dict(x=0.5, y=-10, z=-20),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig1.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 512], ),
yaxis=dict(nticks=10, range=[0, 256], ),
zaxis=dict(nticks=10, range=[0, 500], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=12, y=9, z=10)
)
fig1.write_image(os.path.join(fp, 'images', im_output_fn_d))
im_output_fn_i = (scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] +
'_mean_indirect_view_hist_minmax_choroid_thickness.png')
# indirect view
fig2 = go.Figure(data=[go.Surface(z=mean)])
fig2_title_name = ('PX_id: ' + str(px_id) + ', Scan Nr: ' + str(scan_ids) +
', Quality: ' + str(quality))
fig2.update_layout(title=fig2_title_name, autosize=False,
scene_camera_eye=dict(x=0.5, y=-10, z=20),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig2.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 512], ),
yaxis=dict(nticks=10, range=[0, 256], ),
zaxis=dict(nticks=10, range=[0, 500], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=12, y=9, z=10)
)
fig2.write_image(os.path.join(fp, 'images', im_output_fn_i))
# std of delta
fig3_output_fn = scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] + '_std_choroid_thickness.png'
# # display choroid values
fig3 = go.Figure(data=[go.Surface(z=std)])
fig3_title_name = ('PX_id: ' + str(px_id) + ', Scan Nr: ' + str(scan_ids) +
', Quality: ' + str(quality))
fig3.update_layout(title=fig3_title_name, autosize=False,
scene_camera_eye=dict(x=0, y=-10, z=20),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig3.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 512], ),
yaxis=dict(nticks=10, range=[0, 256], ),
zaxis=dict(nticks=10, range=[0, 100], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=12, y=9, z=10)
)
fig3.write_image(os.path.join(fp, 'images', fig3_output_fn))
# hist of fig 3
fig4_output_fn = scan_type.split('_')[-1] + '_' + scan_type.split('_')[
-2] + '_hist_std_choroid_thickness.png'
disp_data_all_choroids_std = std.flatten()
disp_data_all_choroids_std[disp_data_all_choroids_std > 40] = 40
fig4 = plt.figure(figsize=(14, 5), dpi=80)
fig4 = plt.hist(disp_data_all_choroids_std, 40)
fig4 = plt.title('Std; mean: ' + str(np.round(np.nanmean(std), 4)) + ', std: ' +
str(np.round(np.nanstd(std), 4)), fontsize=30)
plt.savefig(os.path.join(fp, 'images', fig4_output_fn))
fig5_output_fn = scan_type.split('_')[-1] + '_' + scan_type.split('_')[
-2] + '_minmax_choroid_thickness.png'
# # display choroid values
fig5 = go.Figure(data=[go.Surface(z=(max_data - min_data))])
fig5_title_name = ('PX_id: ' + ', max-min values ' + str(scan_ids) +
', Quality: ' + str(quality))
fig5.update_layout(title=fig5_title_name, autosize=False,
scene_camera_eye=dict(x=0, y=-10, z=20),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig5.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 512], ),
yaxis=dict(nticks=10, range=[0, 256], ),
zaxis=dict(nticks=10, range=[0, 200], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=12, y=9, z=10)
)
fig5.write_image(os.path.join(fp, 'images', fig5_output_fn))
fig6_output_fn = scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] + \
'_hist_minmax_choroid_thickness.png'
min_max = max_data - min_data
min_max = min_max.flatten()
min_max[min_max > 40] = 40
fig6 = plt.figure(figsize=(14, 5), dpi=80)
fig6 = plt.hist(min_max, 40)
fig6 = plt.title('Max - min values mean: ' + str(np.round(np.nanmean(min_max), 4)) + ', std: ' +
str(np.round(np.nanstd(min_max), 4)), fontsize=30)
plt.savefig(os.path.join(fp, 'images', fig6_output_fn))
# creat one figure
fig_sum = plt.figure(figsize=(16, 20), dpi=300, facecolor='blue', edgecolor='k')
grid = plt.GridSpec(20, 16, wspace=0, hspace=0)
# From this we can specify subplot locations and extents using the familiary Python slicing syntax:
fig1_sum = plt.subplot(grid[0:8, 0:8])
fig1_sum = plt.axis('off')
fig1_sum = plt.imshow(mpimg.imread(os.path.join(fp, 'images', im_output_fn_d)))
fig2_sum = plt.subplot(grid[0:8, 8:16])
fig2_sum = plt.axis('off')
fig2_sum = plt.imshow(mpimg.imread(os.path.join(fp, 'images', im_output_fn_i)))
fig3_sum = plt.subplot(grid[8:16, 0:8])
fig3_sum = plt.axis('off')
fig3_sum= plt.imshow(mpimg.imread(os.path.join(fp, 'images', scan_type.split('_')[-1] + '_' +
scan_type.split('_')[-2] +'_std_choroid_thickness.png')))
fig4_sum = plt.subplot(grid[8:16, 8:16])
fig4_sum = plt.axis('off')
fig4_sum = plt.imshow(mpimg.imread(os.path.join(fp, 'images', scan_type.split('_')[-1] + '_' +
scan_type.split('_')[-2] + '_minmax_choroid_thickness.png')))
fig5_sum = plt.subplot(grid[16:19, 0:8])
fig5_sum = plt.axis('off')
fig5_sum = plt.imshow(mpimg.imread(os.path.join(fp, 'images', scan_type.split('_')[-1] + '_' +
scan_type.split('_')[-2] + '_hist_std_choroid_thickness.png')))
fig6_sum = plt.subplot(grid[16:19, 8:16])
fig6_sum = plt.axis('off')
fig6_sum = plt.imshow(mpimg.imread(os.path.join(fp, 'images', scan_type.split('_')[-1] + '_' +
scan_type.split('_')[-2] + '_hist_minmax_choroid_thickness.png')))
sum_output_fn = scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] + '_summery_reg.png'
fig_sum.savefig(os.path.join(fp, sum_output_fn))
@staticmethod
def create_rnfl_avg_figures(mean_thickness, px_id, quality, scan_type, scan_ids, fp, fp_oct):
# creating figure choroid thickness figures
# direct view
im_output_fn_d = (str(px_id) + scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] +
'_rnfl_thickness_all.png')
# # display choroid values
fig1 = go.Figure(data=[go.Surface(z=abs(-mean_thickness))])
title_name = ('PX_id: ' + str(px_id) + ', Scan Nr: ' + str(scan_ids) +
', Quality: ' + str(quality))
fig1.update_layout(title=title_name, autosize=False,
scene_camera_eye=dict(x=0.1, y=-10, z=20),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig1.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 512], ),
yaxis=dict(nticks=10, range=[0, 256], ),
zaxis=dict(nticks=10, range=[0, 300], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=12, y=9, z=10)
)
fig1.write_image(os.path.join(fp_oct, 'images\\rnfl', im_output_fn_d))
fig1.write_image(os.path.join(fp, 'images', im_output_fn_d))
im_output_fn_i = (str(px_id) + scan_type.split('_')[-1] + '_' + scan_type.split('_')[-2] +
'_rnfl_thickness_macula.png')
# indirect view
fig2 = go.Figure(data=[go.Surface(z=abs(-mean_thickness[50:200, 175:325]))])
title_name = ('PX_id: ' + str(px_id) + ', Scan Nr: ' + str(scan_ids) +
', Quality: ' + str(quality))
fig2.update_layout(title=title_name, autosize=False,
scene_camera_eye=dict(x=0.1, y=-3, z=10),
width=900, height=900,
margin=dict(l=50, r=50, b=50, t=50))
fig2.update_layout(scene=dict(xaxis=dict(nticks=10, range=[0, 150], ),
yaxis=dict(nticks=10, range=[0, 150], ),
zaxis=dict(nticks=10, range=[-1, 150], ), ),
scene_aspectmode='manual',
scene_aspectratio=dict(x=9, y=9, z=10)
)
fig2.write_image(os.path.join(fp_oct, 'images\\rnfl', im_output_fn_i))
fig2.write_image(os.path.join(fp, 'images', im_output_fn_i))
@staticmethod
def macula_pos_from_rnfl(fp_fn_logbook, px_ids, scan_types, path_oct):
px_meta = OctDataAccess(fp_fn_logbook, px_ids, scan_types, path_oct)
columns_names = ['px_id', 'scan_type', 'macula_row_pos', 'macula_col_pos']
order_test_meta = | pd.DataFrame(columns=columns_names) | pandas.DataFrame |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/8/28 21:26
Desc: 东方财富网-行情首页-上证 A 股-每日行情
获取最近一个交易日的交易信息
使用示例(直接运行main函数,获取最近一个交易日的交易信息):
main()
"""
import time
import json
import pandas as pd
import os
from data_urls import a_detail_url as url
from comm_funcs import requests_get
from comm_funcs import get_config
from comm_funcs import find_trade_date
from comm_funcs import get_db_engine_for_pandas
def main(page_size: int = 6000) -> pd.DataFrame:
"""
获取a股当前交易日个股交易信息
:param page_size: 一次拉取的数量
:return: pandas.DataFrame
"""
res = json.loads(requests_get(url(psize=page_size)))
if "data" in res and "diff" in res["data"]:
data = res['data']['diff']
detail_df = pd.DataFrame(data)
save_df = detail_df.loc[:, ['f12', 'f14', 'f17', 'f15', 'f16', 'f2', 'f18', 'f4', 'f3',
'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f20', 'f21']]
# 去掉没有交易的
find_index = save_df[save_df["f15"] == "-"].index
save_df.drop(index=find_index, inplace=True)
save_df.columns = [
'股票代码',
'股票名称',
'开盘',
'最高',
'最低',
'最新价',
'昨收',
'涨跌额',
'涨跌幅',
'成交量',
'成交额',
'振幅',
'换手率',
'市盈率',
'量比',
'流通市值',
'总市值'
]
save_df["开盘"] = pd.to_numeric(save_df["开盘"], errors="coerce")
save_df["最高"] = pd.to_numeric(save_df["最高"], errors="coerce")
save_df["最低"] = pd.to_numeric(save_df["最低"], errors="coerce")
save_df["最新价"] = pd.to_numeric(save_df["最新价"], errors="coerce")
save_df["昨收"] = pd.to_numeric(save_df["昨收"], errors="coerce")
save_df["涨跌额"] = pd.to_numeric(save_df["涨跌额"], errors="coerce")
save_df["涨跌幅"] = pd.to_ | numeric(save_df["涨跌幅"], errors="coerce") | pandas.to_numeric |
import pandas as pd
import streamlit as st
@st.cache(suppress_st_warning=True)
def load_zero_data(fast_file) -> pd.DataFrame:
"""
Load a Zero Fasting data export CSV file and return a pandas DataFrame version of the file.
DataFrame is reindexed chronologically, oldest to newest, before returned.
Args:
fast_file: file to be converted to a DataFrame.
Returns: pandas DataFrame of sleep data.
"""
expected_cols = ['Date', 'Start', 'End', 'Hours', 'Night Eating']
try:
fasts = pd.read_csv(fast_file,
header=0,
parse_dates=['Date'],
usecols=expected_cols)
except ValueError:
st.error(f"""
Incorrect format of fast data CSV. Please make sure you uploaded the correct file.
\nThe following columns must be present in the first row:
\n {expected_cols}
""")
raise
fasts = fasts.iloc[::-1].reset_index(drop=True) # order by oldest to newest
return fasts
@st.cache(suppress_st_warning=True)
def fasts_start_end(fasts: pd.DataFrame) -> pd.DataFrame:
"""
Calculate the start and end datetimes of each logged fast from a file exported from Zero Fasting.
Args:
fasts: The Zero Fasting file export as a pandas DataFrame.
Returns: A DataFrame with the start and end datetimes as individuals columns.
"""
start_times = pd.to_datetime(fasts['Start']).dt.strftime('%H:%M:%S')
start_times_duration = pd.to_timedelta(start_times)
end_times = pd.to_datetime(fasts['End']).dt.strftime('%H:%M:%S')
end_times_duration = pd.to_timedelta(end_times)
start_dates = fasts.Date
start_dt = (start_dates + start_times_duration).rename('start_dt')
end_dates = (start_dt + pd.to_timedelta(fasts.Hours, 'H')).dt.date
end_dt = (pd.to_datetime(end_dates) + end_times_duration).rename('end_dt')
start_end = | pd.concat([start_dt, end_dt], axis=1) | pandas.concat |
import os
""" First change the following directory link to where all input files do exist """
os.chdir("D:\\Book writing\\Codes\\Chapter 2")
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
#from sklearn.model_selection import train_test_split
#from sklearn.metrics import r2_score
wine_quality = pd.read_csv("winequality-red.csv",sep=';')
# Step for converting white space in columns to _ value for better handling
wine_quality.rename(columns=lambda x: x.replace(" ", "_"), inplace=True)
# Simple Linear Regression - chart
model = sm.OLS(wine_quality['quality'],sm.add_constant(wine_quality['alcohol'])).fit()
print (model.summary())
plt.scatter(wine_quality['alcohol'],wine_quality['quality'],label = 'Actual Data')
plt.plot(wine_quality['alcohol'],model.params[0]+model.params[1]*wine_quality['alcohol'],
c ='r',label="Regression fit")
plt.title('Wine Quality regressed on Alchohol')
plt.xlabel('Alcohol')
plt.ylabel('Quality')
plt.show()
# Simple Linear Regression - Model fit
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
wine_quality = pd.read_csv("winequality-red.csv",sep=';')
wine_quality.rename(columns=lambda x: x.replace(" ", "_"), inplace=True)
x_train,x_test,y_train,y_test = train_test_split(wine_quality['alcohol'],wine_quality["quality"],train_size = 0.7,random_state=42)
x_train = pd.DataFrame(x_train);x_test = pd.DataFrame(x_test)
y_train = pd.DataFrame(y_train);y_test = | pd.DataFrame(y_test) | pandas.DataFrame |
"""Read in hourly weather file."""
import os
import glob
import yaml
from datetime import datetime
from dateutil import tz
import numpy as np
import pandas as pd
import xarray as xr
from timezonefinder import TimezoneFinder
from ideotype import DATA_PATH
from ideotype.utils import CC_RH, CC_VPD
from ideotype.data_process import read_data
from ideotype.nass_process import nass_summarize
def read_wea(year_start, year_end, climate_treatment=None):
"""
Read in raw hourly weather data.
- Data source: NOAA Integrated Surface Hourly Database
- Link: https://www.ncdc.noaa.gov/isd
- Weather data: temperature, RH, precipitation
- Raw data stored: ~/data/ISH/
- Output csv files stored: ~/upscale/weadata/process/
* note:
For years 1991-2010, only select data from class 1
(refer to NSRDB manual p.7-8 for more details)
- class 1: have complete period of record of 1991-2010.
- class 2: have complete period of record but with
significant periods of interpolated, filler,
or otherwise low-quality input data for solar models.
- class 3: have have some gaps in the period of record
but have at least 3 years of data.
Parameters
----------
year_start : int
year_end : int
climate_treatment : int
Create weather data for future climate projections.
2050 or 2100.
"""
# setting up np.read_fwf arguments
colnames = ['time',
'temp', 'temp_quality',
'dew_temp', 'dtemp_quality',
'precip', 'precip_time',
'precip_depth', 'precip_quality',
'precip_perhr', 'rh']
colspecs = [(15, 25), # time
(87, 92), # temp
(92, 93), # temp_quality
(93, 98), # dew_temp
(98, 99), # dtemp_quality
(105, 8193)] # precip string
# Read in relevant file paths
fpaths_wea = os.path.join(DATA_PATH, 'files', 'filepaths_wea.yml')
with open(fpaths_wea) as pfile:
dict_fpaths = yaml.safe_load(pfile)
# Read in info on conversion between WBAN & USAF id numbering system
fpath_id_conversion = os.path.join(
DATA_PATH, 'sites', dict_fpaths['id_conversion'])
df_stations = pd.read_csv(fpath_id_conversion, header=None, dtype=str)
df_stations.columns = ['WBAN', 'USAF']
# Read in stations info
fpath_stations_info = os.path.join(
DATA_PATH, 'sites', dict_fpaths['stations_info'])
df_sites = | pd.read_csv(fpath_stations_info) | pandas.read_csv |
import logging
from tqdm import tqdm
import pandas as pd
import kex
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
types = {
"Inspec": "Abst",
"www": "Abst",
"kdd": "Abst",
"Krapivin2009": "Full",
"SemEval2010": "Full",
"SemEval2017": "Para",
"citeulike180": "Full",
"PubMed": "Full",
"Schutz2008": "Full",
"theses100": "Full",
"fao30": "Full",
"fao780": "Full",
"Nguyen2007": "Full",
"wiki20": "Report",
"500N-KPCrowd-v1.1": "News"
}
domain = {
"Inspec": "CS",
"www": "CS",
"kdd": "CS",
"Krapivin2009": "CS",
"SemEval2010": "CS",
"SemEval2017": "-",
"citeulike180": "BI",
"PubMed": "BM",
"Schutz2008": "BM",
"theses100": "-",
"fao30": "AG",
"fao780": "AG",
"Nguyen2007": "-",
"wiki20": "CS",
"500N-KPCrowd-v1.1": "-"
}
# BI: bioinfomatics
# BM biomedical
# AD: agricultural document
data_list = ["500N-KPCrowd-v1.1", 'Inspec', 'Krapivin2009', 'Nguyen2007', 'PubMed', 'Schutz2008', 'SemEval2010',
'SemEval2017', 'citeulike180', 'fao30', 'fao780', 'theses100', 'kdd', 'wiki20', 'www']
phraser = kex.PhraseConstructor()
def get_statistics(data: str):
""" get statistics"""
word = []
n_vocab = []
n_word = []
n_phrase = []
n_label = []
n_label_in_candidates = []
n_label_in_candidates_multi = []
label_in_candidates = []
dataset, language = kex.get_benchmark_dataset(data, keep_only_valid_label=False)
if language == 'en':
return
output = {'Data size': len(dataset), "Domain": domain[data], "Type": types[data]}
all_data = []
for data in tqdm(dataset):
phrase, stemmed_token = phraser.tokenize_and_stem_and_phrase(data['source'])
keywords_valid = list(set(phrase.keys()).intersection(set(data['keywords'])))
word.append(stemmed_token)
n_vocab.append(len(list(set(stemmed_token))))
n_word.append(len(stemmed_token))
n_phrase.append(len(phrase))
n_label.append(len(data['keywords']))
n_label_in_candidates.append(len(keywords_valid))
n_label_in_candidates_multi.append(len([k for k in keywords_valid if len(k.split(' ')) > 1]))
label_in_candidates.append(keywords_valid)
all_data.append(
{'filename': data['id'], 'n_phrase': len(phrase), 'n_word': len(stemmed_token),
'n_vocab': len(list(set(stemmed_token))), 'n_label': len(data['keywords']),
'n_label_in_candidate': len(keywords_valid),
'n_label_in_candidate_multi': len([k for k in keywords_valid if len(k.split(' ')) > 1])}
)
output['Avg phrase'] = sum(n_phrase) / len(dataset)
output['Std phrase'] = (sum([(a - output['Avg phrase']) ** 2 for a in n_phrase]) / len(dataset)) ** 0.5
output['Avg word'] = sum(n_word) / len(dataset)
output['Std word'] = (sum([(a - output['Avg word']) ** 2 for a in n_word]) / len(dataset)) ** 0.5
output['Avg vocab'] = sum(n_vocab) / len(dataset)
output['Std vocab'] = (sum([(a - output['Avg vocab']) ** 2 for a in n_vocab]) / len(dataset)) ** 0.5
output['Avg keyword'] = sum(n_label) / len(dataset)
output['Std vocab'] = (sum([(a - output['Avg vocab']) ** 2 for a in n_vocab]) / len(dataset)) ** 0.5
output['Avg keyword (in candidate)'] = sum(n_label_in_candidates) / len(dataset)
output['Std keyword (in candidate)'] = (sum([(a - output['Avg keyword (in candidate)']) ** 2 for a in n_label_in_candidates]) / len(dataset)) ** 0.5
output['Avg keyword (in candidate & multi)'] = sum(n_label_in_candidates_multi) / len(dataset)
output['Std keyword (in candidate & multi)'] = (sum([(a - output['Avg keyword (in candidate & multi)']) ** 2 for a in n_label_in_candidates_multi]) / len(dataset)) ** 0.5
output['Vocab diversity'] = sum(n_word) / sum(n_vocab)
return output, all_data
if __name__ == '__main__':
all_stats = {}
each_data = []
for data_name in data_list:
logging.info('data: {}'.format(data_name))
a, b = get_statistics(data_name)
all_stats[data_name] = a
each_data += b
pd.DataFrame(all_stats).to_csv('./benchmark/data_statistics.csv')
| pd.DataFrame(each_data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Simple tool to analyze data from www.data.gouv.fr
#
# **Note:** This is a Jupyter notebook which is also available as its executable export as a Python 3 script (therefore with automatically generated comments).
# # Libraries
# In[ ]:
import sys,os
addPath= [os.path.abspath("../source"),
os.path.abspath("../venv/lib/python3.9/site-packages/")]
addPath.extend(sys.path)
sys.path = addPath
# In[ ]:
# Sys import
import sys, os, re
# Common imports
import math
import numpy as NP
import numpy.random as RAND
import scipy.stats as STATS
from scipy import sparse
from scipy import linalg
# Better formatting functions
from IPython.display import display, HTML
from IPython import get_ipython
import matplotlib as MPL
import matplotlib.pyplot as PLT
import seaborn as SNS
SNS.set(font_scale=1)
# Python programming
from itertools import cycle
from time import time
import datetime
# Using pandas
import pandas as PAN
import xlrd
# In[ ]:
import warnings
warnings.filterwarnings('ignore')
print("For now, reduce python warnings, I will look into this later")
# ### Import my own modules
# The next cell attempts to give user some information if things improperly setup.
# Intended to work both in Jupyter and when executing the Python file directly.
# In[ ]:
if not get_ipython() is None and os.path.abspath("../source/") not in sys.path:
sys.path.append(os.path.abspath("../source/"))
try:
from lib.utilities import *
from lib.figureHelpers import *
from lib.DataMgrJSON import *
from lib.DataMgr import *
import lib.basicDataCTE as DCTE
except Exception as err:
print("Could not find library 'lib' with contents 'DataGouvFr' ")
if get_ipython() is None:
print("Check the PYTHONPATH environment variable which should point to 'source' wich contains 'lib'")
else:
print("You are supposed to be running in JupySessions, and '../source/lib' should exist")
raise err
# ## Check environment
#
# It is expected that:
# - your working directory is named `JupySessions`,
# - that it has subdirectories
# - `images/*` where generated images may be stored to avoid overcrowding.
# - At the same level as your working dir there should be directories
# - `../data` for storing input data and
# - `../source` for python scripts.
#
# My package library is in `../source/lib`, and users running under Python (not in Jupyter) should
# set their PYTHONPATH to include "../source" ( *or whatever appropriate* ).
# In[ ]:
checkSetup(chap="Chap01")
ImgMgr = ImageMgr(chapdir="Chap01")
# # Load Data
# Vaccination data: https://www.data.gouv.fr/en/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19/
# https://www.data.gouv.fr/en/datasets/r/eb672d49-7cc7-4114-a5a1-fa6fd147406b
# https://www.data.gouv.fr/en/datasets/r/eb672d49-7cc7-4114-a5a1-fa6fd147406b
# https://www.data.gouv.fr/en/datasets/r/b234a041-b5ea-4954-889b-67e64a25ce0d
#
# Badges: vaccins covid19 (sans tiret???)
# ## Functions
# ## Load CSV and XLSX data from remote
# The `dataFileVMgr` will manage a cache of data files in `../data`, the data will be downloaded
# from www.data.gouv.fr using a request for datasets with badge '`covid-19`' if a more recent
# version is present on the remote site. The meta information is stored/cached in `../data/.data`
# as the pickle of a json.
#
# We check what is in the cache/data directory; for each file, we identify the latest version,
# and list this below to make sure. The file name will usually contain a time stamp; this has to do with
# the version management/identification technique used when downloading from www.data.gouv.fr.
#
# For the files used in this notebook, the latest version is used/loaded irrespective of the
# timestamp used in the notebook.
# In[ ]:
tagset1 = ({"tag":"covid"}, {"tag":"covid19"})
# In[ ]:
specOpts={ 'cacheFname': '.cache.rqtTest2.json',
"dumpMetaFile" : "rqtTest2.meta.dump",
"dumpMetaInfoFile" : "rqtTest2.metainfo.dump",
'ApiInq' : 'datasets',
'ApiInqQuery' : tagset1,
'InqParmsDir' : {},
}
# In[ ]:
rex = re.compile('(.*sursaud|^donnees-hospitalieres|^covid-hospit-incid|^sp-pos-).*')
def uselFn(urqt):
return rex.match(urqt.fname) or rex.match(urqt.url)
# In[ ]:
dataFileVMgr = manageAndCacheDataFilesFRAPI("../data", maxDirSz= 170*(2**10)**2,
**specOpts)
# In[ ]:
dataFileVMgr.getRemoteInfo()
dataFileVMgr.updatePrepare()
dataFileVMgr.updateSelect(displayCount=10 , URqtSelector = uselFn)
dataFileVMgr.cacheUpdate()
# In[ ]:
print("Most recent versions of files in data directory:")
for f in dataFileVMgr.listMostRecent() :
print(f"\t{f}")
# In[ ]:
last = lambda x: dataFileVMgr.getRecentVersion(x,default=True)
# This ensures we load the most recent version, so that it is not required to update the list
# below. The timestamps shown in the following sequence will be update by the call to `getRecentVersion`.
# In[ ]:
dailyDepCsv = last("sursaud-corona-quot-dep-2021-04-08-21h20.csv")
dailyRegionCsv = last("sursaud-corona-quot-reg-2021-04-03-19h33.csv")
dailyFranceCsv = last("sursaud-covid19-quotidien-2020-04-12-19h00-france.csv")
dailyXlsx = last("sursaud-covid19-quotidien-2020-04-12-19h00.xlsx")
weeklyCsv = last("sursaud-covid19-hebdomadaire-2020-04-08-19h00.csv")
hospAgeCsv = last("donnees-hospitalieres-classe-age-covid19-2020-04-11-19h00.csv")
hospNouveauCsv = last("donnees-hospitalieres-nouveaux-covid19-2020-04-11-19h00.csv")
hospCsv = last("donnees-hospitalieres-covid19-2020-04-11-19h00.csv")
hospEtablCsv = last("donnees-hospitalieres-etablissements-covid19-2020-04-12-19h00.csv")
weeklyLabCsv = last("sp-pos-heb-fra-2021-08-09-19h06.csv")
dailyLabCsv = last("sp-pos-quot-fra-2021-08-09-19h06.csv")
S1 = set (dataFileVMgr.listMostRecent())
S2 =set((dailyDepCsv,dailyRegionCsv,dailyFranceCsv, dailyXlsx, weeklyCsv,
hospAgeCsv, hospNouveauCsv, hospCsv, hospEtablCsv, weeklyLabCsv, dailyLabCsv ))
missing = S1. difference(S2)
if len(missing) > 0:
print (f"Missing comparing with most recent files in ../data:")
for f in missing:
print(f"\t{f}")
metaHebdoCsv = "../data/metadonnee-urgenceshos-sosmedecins-covid19-hebdo.csv"
metaQuotRegCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot-reg.csv"
metaQuotFraCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot-fra.csv"
metaQuotCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot.csv"
metaHospservices = "../data/metadonnees-services-hospitaliers-covid19.csv"
metaHospAge = "../data/metadonnees-donnees-hospitalieres-covid19-classes-age.csv"
metaHospIncid = "../data/metadonnees-hospit-incid.csv"
metaHosp = "../data/metadonnees-donnees-hospitalieres-covid19.csv"
metaHospEtabl = "../data/donnees-hospitalieres-etablissements-covid19-2020-04-11-19h00.csv"
metaSexeCsv = "../data/metadonnees-sexe.csv"
metaRegionsCsv="../data/regions-france.csv"
metaTranchesAgeCsv="../data/code-tranches-dage.csv"
# In[ ]:
ad = lambda x: "../data/"+x
S1 = set (map(ad, dataFileVMgr.listMostRecent(nonTS=True)))
S2 =set((metaHebdoCsv, metaQuotRegCsv, metaQuotFraCsv, metaQuotCsv,
metaHospservices, metaHospAge, metaHospIncid, metaHosp, metaHospEtabl, metaRegionsCsv, metaTranchesAgeCsv ))
missing = S1. difference(S2)
if len(missing) > 0:
print (f"Missing comparing with non timestamped files in ../data:")
print ("These may eventually be exploited in other notebooks (e.g. COVID-MoreData-FromGouv)")
for f in missing:
print(f"\t{f}")
# Now load the stuff
#
# In[ ]:
ad = lambda x: "../data/"+x
data_dailyRegion = read_csvPandas(ad(dailyRegionCsv), error_bad_lines=False,sep=";" )
data_dailyDep = read_csvPandas(ad(dailyDepCsv), error_bad_lines=False,sep=";")
data_dailyFrance = read_csvPandas(ad(dailyFranceCsv), error_bad_lines=False,sep=",")
data_daily = read_xlsxPandas(ad(dailyXlsx))
data_weekly = read_csvPandas(ad(weeklyCsv), error_bad_lines=False,sep=";")
data_hospNouveau = read_csvPandas(ad(hospNouveauCsv), error_bad_lines=False,sep=";")
data_hosp = read_csvPandas(ad(hospCsv), error_bad_lines=False,sep=";")
data_hospAge = read_csvPandas(ad(hospAgeCsv), error_bad_lines=False,sep=";")
data_hospEtabl = read_csvPandas(ad(hospEtablCsv), error_bad_lines=False,sep=";")
data_weeklyLab = read_csvPandas(ad(weeklyLabCsv), error_bad_lines=False,sep=";")
data_dailyLab = read_csvPandas(ad(dailyLabCsv), error_bad_lines=False,sep=";")
meta_Hebdo = read_csvPandas(metaHebdoCsv, clearNaN=True, error_bad_lines=False,sep=";", header=2)
meta_QuotReg = read_csvPandas(metaQuotRegCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_QuotFra = read_csvPandas(metaQuotFraCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_Quot = read_csvPandas(metaQuotCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_HospServices = read_csvPandas(metaHospservices, clearNaN=True, error_bad_lines=False,sep=";")
meta_HospAge = read_csvPandas(metaHospAge, clearNaN=True, error_bad_lines=False,sep=";")
meta_HospIncid = read_csvPandas(metaHospIncid, clearNaN=True, error_bad_lines=False,sep=";")
meta_Hosp = read_csvPandas(metaHosp, clearNaN=True, error_bad_lines=False,sep=";")
meta_Sexe = read_csvPandas(metaSexeCsv, clearNaN=True, error_bad_lines=False,sep=";",header=0)
meta_Regions = read_csvPandas(metaRegionsCsv, clearNaN=True, error_bad_lines=False,sep=",")
meta_Ages = read_csvPandas(metaTranchesAgeCsv, clearNaN=True, error_bad_lines=False,sep=";")
# ## Figure out data characteristics
# In[ ]:
def showBasics(data,dataName):
print(f"{dataName:24}\thas shape {data.shape}")
dataListDescr = ((data_dailyRegion, "data_dailyRegion"),
(data_dailyDep,"data_dailyDep"),
(data_hospAge,"data_hospAge"),
(data_dailyFrance, "data_dailyFrance"),
(data_daily,"data_daily"),
(data_weekly , "data_weekly "),
(data_hospNouveau,"data_hospNouveau"),
(data_hosp,"data_hosp"),
(data_hospAge,"data_hospAge"),
(data_hospEtabl,"data_hospEtabl"),
(data_weeklyLab,"data_weeklyLab"),
(data_dailyLab ,"data_dailyLab"),
(meta_Hebdo,"meta_Hebdo"),
(meta_QuotReg,"meta_QuotReg"),
(meta_QuotFra,"meta_QuotFra"),
(meta_Quot,"meta_Quot"),
(meta_HospServices,"meta_HospServices"),
(meta_HospAge,"meta_HospAge"),
(meta_HospIncid,"meta_HospIncid"),
(meta_Hosp,"meta_Hosp"),
(meta_Sexe,"meta_Sexe"),
(meta_Regions,'meta_Regions'),
(meta_Ages,'meta_Ages'))
for (dat,name) in dataListDescr:
showBasics(dat,name)
# ### Help with meta data
# Of course I encountered some surprises, see `checkRepresentedRegions` issue with unknown codes which
# did occur in some files!
# In[ ]:
def checkRepresentedRegions(df,col='reg',**kwOpts):
"list regions represented in a dataframe, if kwd print=True, will print list of code->string"
regs = set(df[col])
if "print" in kwOpts:
for r in regs:
extract = meta_Regions[ meta_Regions['code_region'] == r]
# print (f"r={r}\t{extract}\t{extract.shape}")
if extract.shape[0] == 0:
lib = f"**Unknown:{r}**"
else:
lib=extract.iloc[0]. at ['nom_region']
print(f"Region: code={r}\t->{lib}")
return regs
# In[ ]:
for (dat,name) in dataListDescr:
if name[0:5]=="meta_": continue
print(f"\nDescription of data in '{name}'\n")
display(dat.describe().transpose())
# In[ ]:
for (dat,name) in dataListDescr:
if name[0:5]!="meta_": continue
print(f"\nMeta data in '{name}'\n")
display(dat)
# ## Read the meta data characterising resources on the remote site
# This is a demo of the capabilities of class `manageAndCacheDataFile`.
# In[ ]:
dataFileVMgr.pprintDataItem( item=".*org.*/^(name|class)$")
dataFileVMgr.pprintDataItem( item="resource.*/(f.*|title.*)")
# ## Get some demographics data from INSEE
# For the time being, these data are obtained / loaded from Insee web site using a manual process and are placed in a different directory, therefore a distinct FileManager is used, and loading this data is done here; for more details see the notebook `Pop-Data-FromGouv.ipy`
#
# Using the base version which does not try to update the "../dataPop" directory
# In[ ]:
dataFileVMgrInsee = manageDataFileVersions("../dataPop")
inseeDepXLS ="../dataPop/InseeDep.xls"
inseeDep = read_xlsxPandas(inseeDepXLS, sheet_name=1, header=7)
inseeReg = read_xlsxPandas(inseeDepXLS, sheet_name=0, header=7)
# Now we can display our demographics data (summarized)
# In[ ]:
display(inseeDep.iloc[:,4:].sum())
display(inseeReg.iloc[:,4:].sum())
# ## Let's do some graphics!
# ### Données de urgences hospitalières et de SOS médecins
# Df: dailyRegion ( file sursaud-covid19-quotidien)
# #### Structure the data
# Select age category '0', thus getting all ages
# In[ ]:
def select_Ages(df, ageGroup='0'):
return df.loc[df['sursaud_cl_age_corona'] == ageGroup]
def select_AllAges(df):
return select_Ages(df)
# In[ ]:
def groupByDate(df):
return df.groupby('date_de_passage')
# First, I work with the dailyRegion data, summing up for all regions.
# In[ ]:
gr_all_age_regions = groupByDate(select_AllAges(data_dailyRegion)).sum()
checkRepresentedRegions(data_dailyRegion, print=True);
# In[ ]:
dfGr = PAN.DataFrame(gr_all_age_regions.copy(), columns=gr_all_age_regions.columns[1:])
painter = figureTSFromFrame(dfGr,figsize=(12,8))
painter.doPlot()
painter.setAttrs(label=f"Days since {painter.dt[0]}",
title="Whole France/Data ER + SOS-medecin\nAll age groups",
legend=True,
xlabel=f"Days since {painter.dt[0]}")
PAN.set_option('display.max_colwidth', None)
display(meta_QuotReg[[ "Colonne","Description_FR" ]])
ImgMgr.save_fig("FIG002")
# Then, I look at the national data, as represented in `data_dailyFrance` and `data_daily`
# In[ ]:
print(f"data_daily: {data_daily.shape}")
print(f"{','.join(data_daily.columns)}")
display(data_daily.describe())
display(data_daily[:5])
print("data_dailyFrance: {data_dailyFrance.shape}")
print(f"{','.join(data_dailyFrance.columns)}")
display(data_dailyFrance.describe())
display(data_dailyFrance[:5])
# ### Hospital data
# DF: hospNouveau File: donnees-hospitalieres-nouveaux-covid19
# In[ ]:
gr_all_data_hospNouveau=data_hospNouveau.groupby('jour').sum()
dfGrHN = PAN.DataFrame(gr_all_data_hospNouveau)
colOpts = {'incid_dc': {"c":"b","marker":"v"},
'incid_rea': {"c":"r","marker":"o", "linestyle":"--"},
'incid_rad': {"marker":"+"},
'incid_hosp': {"marker":"*"}
}
painter = figureTSFromFrame(dfGrHN)
painter.doPlot()
painter.setAttrs(colOpts=colOpts,
xlabel=f"Days since {painter.dt[0]}",
title="Whole France (Hospital)\nDaily variation in patient status",
legend=True )
PAN.set_option('display.max_colwidth', None)
display(meta_HospIncid[[ "Colonne","Description_EN" ]])
# In[ ]:
gr_all_data_hosp=data_hosp.loc[data_hosp["sexe"] == 0 ].groupby('jour').sum()
cols = [ c for c in gr_all_data_hosp.columns if c != 'sexe']
dfGrH = PAN.DataFrame(gr_all_data_hosp[cols])
colOpts = { 'dc': {"c":"b","marker":"v"},
'rea': {"c":"r","marker":"o", "linestyle":"--"},
'rad': {"marker":"+"},
'hosp': {"marker":"*"}
}
painter = figureTSFromFrame(dfGrH)
painter.doPlot()
painter.setAttrs( colOpts=colOpts,
xlabel=f"Days since {painter.dt[0]}",
title="Whole France / Hospital\n:Daily patient status (ICU,Hosp) / Accumulated (discharged, dead)",
legend=True)
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG003")
# ### Now analyze hospital data according to sex
# In[ ]:
data_hosp_DepSex=data_hosp.set_index(["dep","sexe"])
data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)!=0]
d1 = data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)==1]
d2 = data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)==2]
d1s=d1.groupby("jour").sum()
d2s=d2.groupby("jour").sum()
dm= PAN.concat([d1s,d2s], axis=1)
cols1 = list(map (lambda x: x+"_M", d1s.columns))
cols2 = list(map (lambda x: x+"_F", d2s.columns))
dm.columns = (*cols1,*cols2)
# In[ ]:
painter = figureTSFromFrame(dm)
colOpts = {'dc_F': {"c":"r", "marker":"v"},
'dc_M': {"c":"b", "marker":"v"},
'rea_F': {"c":"r", "marker":"o", "linestyle":"--"},
'rea_M': {"c":"b", "marker":"o", "linestyle":"--"},
'rad_F': {"c":"k", "marker":"+"},
'rad_M': {"c":"y", "marker":"+"},
'hosp_M':{"c":"b"},
'HospConv_M':{'c':'c'},
'SSR_USLD_M' :{'c':'c',"marker":'p' },
'hosp_F':{'c':'r'},
'HospConv_F':{'c':'m'},
'SSR_USLD_F' :{'c':'m',"marker":'p'},
'autres_M':{'c':'c', "linestyle":":"},
'autres_F':{'c':'m', "linestyle":":"}
}
painter.doPlotBycol()
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title="Whole France\ / Hospital\n Male / Female\n:Daily patient status (ICU,Hosp) / Accumulated (discharged, dead)",
legend=True )
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG004")
# ### Now analyze hospital data according to age
# For now the data available in table `data_hospAge` covers a small number of days.... hopefully this may improve, either by more earlier data becoming available, or just by more data being collected day after day!
# In[ ]:
data_hosp_RegAge=data_hospAge.set_index(["reg","jour",'cl_age90'])
ddd= data_hosp_RegAge[ data_hosp_RegAge.index.get_level_values(2)!=0 ]
# We may have multiple entries for same day, this is an issue in the way
# this table is made up. For now, seems that best strategy is to sum!
# We keep track of previous strategy which was to arbitrarily select a value among duplicate indices,
# therefore the if True
if True:
dhRA = ddd.groupby(by=list(ddd.index.names)).sum().copy()
dhRAg = dhRA.unstack('cl_age90').groupby("jour").sum()
else:
# older strategy, kept for referral, parameter keep has several possible values
# remove duplicate entries, not performing selection between multiple values
duplic = ~ddd.duplicated(keep=False)
print( f"Number of duplicated lines: {duplic.sum()} {duplic.sum()/duplic.size*100:.2f}%")
dhRA = ddd[ duplic ].unstack('cl_age90')
dhRAg = dhRA.groupby("jour").sum()
# In[ ]:
ageClasses = sorted(set(dhRAg.columns.get_level_values(1)))
print(f"age classes = {ageClasses}")
levCat = sorted(set(dhRAg.columns.get_level_values(0)))
levAge = sorted(set(dhRAg.columns.get_level_values(1)))
subnodeSpec=(lambda i,j:{"nrows":i,"ncols":j})(*subPlotShape(len(levAge),maxCol=6))
print(f"nb age classes:{len(levAge)}\tsubnodeSpec:{subnodeSpec}")
if len(levAge) != len(ageClasses):
raise RuntimeError("Inconsistent values for number of age classes")
# In[ ]:
colOpts = {'dc': {"c":"b","marker":"v"},
'rea': {"c":"b","marker":"o", "linestyle":"--"},
'rad': {"c":"r", "marker":"+"},
'hosp':{"c":"k", "linestyle":"-"},
'HospConv':{"c":"c", "linestyle":"-"},
'SSR_USLD' :{"c":"g","linestyle":"-"},
'autres':{'c':'m'}
}
# In[ ]:
painter = figureTSFromFrame(None, subplots=subnodeSpec, figsize=(15,15))
for i in range(len(levAge)):
cat = ageClasses[i]
if cat < 90:
title = f"Age {cat-9}-{cat}"
else:
title = "Age 90+"
dfExtract = dhRAg.loc(axis=1)[:,cat]
# remove the now redundant information labeled 'cl_age90'
dfExtract.columns = dfExtract.columns.levels[0]
painter.doPlotBycol(dfExtract);
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title = title,
legend = True )
painter.advancePlotIndex()
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG005")
# ## Testing : Laboratory data
#
# This concerns testing (I have not found the meta data yet, but column labels are clear enough).
# The `data_dailyLab` data is split between age classes and departements.
# In[ ]:
todayStr = datetime.date.today().isoformat() # handle a data error that appeared on 5/5/2020
# Modif. August 2021:
# - we do not have the comfort of cat 0 summing all ages anymore
# - may be it would be better to divide per population.. this would give a different figure
# - should we rename columns with the old labels?
# In[ ]:
msk=d=data_dailyLab.loc[:,"jour"]<=todayStr #there is an error in the version of the data distrib 05/05/2020
dl=data_dailyLab.loc[msk,:]
dlGrA = dl.groupby('jour').sum()
#dlGr["cl_age90"]=0 #
dlGr = dlGrA.drop(columns=["cl_age90", "pop"])
# In[ ]:
dlGr.columns
# In[ ]:
def LaboRelabCols(tble):
corresp={
'P_f' : 'Pos_f', 'P_h': 'Pos_h', 'P':'Positive',
'T_f':'Tested_f', 'T_h':'Tested_h', 'T':'Tested'
}
rc = [corresp.get(x) for x in tble.columns]
tble.columns=rc
# In[ ]:
LaboRelabCols(dlGr)
# In[ ]:
painter = figureTSFromFrame(dlGr)
colOpts = {'Tested': {"c":"b", "marker":"*"},
'Positive': {"c":"r", "marker":"+"},
'Tested_h': {"c":"b","marker":"o", "linestyle":"--"},
'Tested_f': {"c":"g","marker":"o", "linestyle":"--"},
'Pos_h': {"c":"b", "marker":"+"},
'Pos_f': {"c":"g", "marker":"+"}
}
painter.doPlotBycol()
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title="Whole France laboratory: tested, positive for male(h) and female(f)",
legend=True )
ImgMgr.save_fig("FIG006")
# Analyze laboratory data according to age
# In[ ]:
data_dailyLab.columns
dataDLab = data_dailyLab.loc[msk,:].copy()
# In[ ]:
dataDLab
# In[ ]:
dhRA
# In[ ]:
dataDLab=dataDLab.set_index(["jour",'cl_age90'])
# In[ ]:
dhRA = dataDLab.drop(columns=["pop"]).unstack('cl_age90')
dhRAg = dhRA.groupby("jour").sum()
# In[ ]:
ageClasses = sorted(set(dhRAg.columns.get_level_values(1)))
print(f"age classes = {ageClasses}")
levCat = sorted(set(dhRA.columns.get_level_values(0)))
levAge = sorted(set(dhRA.columns.get_level_values(1)))
subnodeSpec=(lambda i,j:{"nrows":i,"ncols":j})(*subPlotShape(len(levAge),maxCol=6))
print(f"nb age classes:{len(levAge)}\tsubnodeSpec:{subnodeSpec}")
if len(levAge) != len(ageClasses):
raise RuntimeError("Inconsistent values for number of age classes")
ageLabs=['All']+[f"{x-9}-{x}" for x in ageClasses[1:-1]]+["90+"]
# In[ ]:
colOpts = {'Tested': {"c":"b", "marker":"*"},
'Positive': {"c":"r", "marker":"+"},
'Tested_h': {"c":"b","marker":"o", "linestyle":"--"},
'Tested_f': {"c":"g","marker":"o", "linestyle":"--"},
'Pos_h': {"c":"b", "marker":"+"},
'Pos_f': {"c":"g", "marker":"+"}
}
# In[ ]:
painter = figureTSFromFrame(None, subplots=subnodeSpec, figsize=(15,15))
for i in range(len(ageClasses)):
cat = ageLabs[i]
ageSpec = ageClasses[i]
title = f"Labo Tests\nAge: {cat}"
dfExtract = dhRAg.loc(axis=1)[:,ageSpec]
# remove the not needed information since we selected by ageSpec
dfExtract.columns = [col[0] for col in dfExtract.columns]
LaboRelabCols(dfExtract)
painter.doPlotBycol(dfExtract);
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title = title,
legend = True )
painter.advancePlotIndex()
ImgMgr.save_fig("FIG007")
# # Merge COVID and demographics data
# See the `Pop-Data-FromGouv.ipynb` notebook for more details on the demographics data obtained from
# INSEE (https://www.insee.fr/fr/accueil).
# Prepare the data for a database style join/merge, documented on https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html.
# First we need to establish "dep" as an index in hospital data:
# In[ ]:
hndDf = data_hospNouveau.copy()
hndDf.set_index("dep");
# Then we extract the demographic information and set index "dep"
# In[ ]:
depStats = inseeDep.iloc[:,[2,3,7,8]].copy()
cols = depStats.columns.values
cols[0]="dep"
depStats.columns = cols
depStats.set_index("dep");
# Now we perform the merge, and group by date and 'départements':
# In[ ]:
hndMerged = PAN.merge(hndDf,depStats, on="dep" )
hndGrMerged=hndMerged.groupby(["jour","dep"]).sum()
# For now, look at daily statistics normalized by concerned population (unit= event per million people)
# In[ ]:
hndGMJour = hndGrMerged.groupby("jour").sum()
colLabs = ("incid_hosp", "incid_rea", "incid_dc", "incid_rad")
for lab in colLabs:
hndGMJour[lab+"_rate"] = hndGMJour[lab]/hndGMJour["Population totale"]*1.0e6
# And the graph can be readily generated:
# In[ ]:
ncolLabs = list ( x+"_rate" for x in colLabs)
df=hndGMJour.loc[:,ncolLabs]
colOpts = {'incid_dc_rate' : {"c":"b","marker":"v"},
'incid_rea_rate' : {"c":"r","marker":"o", "linestyle":"--"},
'incid_rad_rate' : {"marker":"+"},
'incid_hosp_rate': {"marker":"*"}
}
painter = figureTSFromFrame(df)
painter.doPlot()
painter.setAttrs(colOpts=colOpts,
xlabel=f"Days since {painter.dt[0]}",
ylabel="Events per million people",
title="Whole France (Hospital)\nDaily variation in patient status",
legend=True )
| PAN.set_option('display.max_colwidth', None) | pandas.set_option |
import io
import os
from datetime import datetime
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.api.quote import get_security_list
from fooltrader.contract.data_contract import KDATA_COLUMN_STOCK, KDATA_COLUMN_163
from fooltrader.contract.files_contract import get_kdata_path
from fooltrader.utils import utils
class FutureShfeSpider(scrapy.Spider):
name = "future_shfe_spider"
custom_settings = {
# 'DOWNLOAD_DELAY': 2,
# 'CONCURRENT_REQUESTS_PER_DOMAIN': 8,
'SPIDER_MIDDLEWARES': {
'fooltrader.middlewares.FoolErrorMiddleware': 1000,
}
}
# 指定日期的话,是用来抓增量数据的
def yield_request(self, item, start_date=None, end_date=None):
data_path = get_kdata_path(item, source='163')
if start_date:
start = start_date.strftime('%Y%m%d')
else:
start = item['listDate'].replace('-', '')
if end_date:
end = end_date.strftime('%Y%m%d')
else:
end = datetime.today().strftime('%Y%m%d')
if not os.path.exists(data_path) or start_date or end_date:
if item['exchange'] == 'sh':
exchange_flag = 0
else:
exchange_flag = 1
url = self.get_k_data_url(exchange_flag, item['code'], start, end)
yield Request(url=url, meta={'path': data_path, 'item': item},
callback=self.download_day_k_data)
def start_requests(self):
item = self.settings.get("security_item")
start_date = self.settings.get("start_date")
end_date = self.settings.get("end_date")
if item is not None:
for request in self.yield_request(item, start_date, end_date):
yield request
else:
for _, item in get_security_list().iterrows():
for request in self.yield_request(item):
yield request
def download_day_k_data(self, response):
path = response.meta['path']
item = response.meta['item']
try:
# 已经保存的csv数据
if os.path.exists(path):
df_current = pd.read_csv(path, dtype=str)
else:
df_current = pd.DataFrame()
df = utils.read_csv(io.BytesIO(response.body), encoding='GB2312', na_values='None')
df['code'] = item['code']
df['securityId'] = item['id']
df = df.loc[:,
['日期', 'code', '最低价', '开盘价', '收盘价', '最高价', '成交量', '成交金额', 'securityId', '前收盘', '涨跌额', '涨跌幅', '换手率',
'总市值', '流通市值']]
df['factor'] = None
df.columns = KDATA_COLUMN_STOCK
# 合并到当前csv中
df_current = df_current.append(df, ignore_index=True)
df_current = df_current.dropna(subset=KDATA_COLUMN_163)
df_current = df_current.drop_duplicates(subset='timestamp', keep='last')
df_current = df_current.set_index(df_current['timestamp'])
df_current.index = | pd.to_datetime(df_current.index) | pandas.to_datetime |
import datetime as dt
import itertools
import json
import logging
import re
from functools import cached_property
from itertools import product
from typing import Callable, List, Mapping, Optional, Sequence, Union
import numpy as np
import pandas as pd
import tushare as ts
from ratelimiter import RateLimiter
from retrying import retry
from tqdm import tqdm
from .data_source import DataSource
from .. import config, constants, date_utils, utils
from ..database_interface import DBInterface
from ..tickers import FundTickers, FundWithStocksTickers, StockFundTickers, StockTickers
START_DATE = {
'common': dt.datetime(1990, 1, 1),
'shibor': dt.datetime(2006, 10, 8),
'ggt': dt.datetime(2016, 6, 29), # 港股通
'hk_cal': dt.datetime(1980, 1, 1),
'hk_daily': dt.datetime(1990, 1, 2),
'fund_daily': dt.datetime(1998, 4, 6),
'index_daily': dt.datetime(2008, 1, 1),
'index_weight': dt.datetime(2005, 1, 1)
}
class TushareData(DataSource):
def __init__(self, tushare_token: str = None, db_interface: DBInterface = None, param_json_loc: str = None) -> None:
"""
Tushare to Database. 将tushare下载的数据写入数据库中
:param tushare_token: tushare token
:param db_interface: DBInterface
:param param_json_loc: tushare 返回df的列名信息
"""
if tushare_token is None:
tushare_token = config.get_global_config()['tushare']['token']
db_interface = config.get_db_interface()
super().__init__(db_interface)
self.token = tushare_token
self._pro = None
self._factor_param = utils.load_param('tushare_param.json', param_json_loc)
def login(self):
self._pro = ts.pro_api(self.token)
def logout(self):
self._pro = ts.pro_api('')
def init_db(self):
"""Initialize database data. They cannot be achieved by naive ``update_*`` function"""
self.init_hk_calendar()
self.init_stock_names()
self.init_accounting_data()
fund_tickers = FundTickers(self.db_interface).all_ticker()
self.update_fund_portfolio(fund_tickers)
def update_base_info(self):
"""Update calendar and ticker lists"""
self.update_calendar()
self.update_hk_calendar()
self.update_stock_list_date()
self.update_convertible_bond_list_date()
# self.update_fund_list_date()
self.update_future_list_date()
self.update_option_list_date()
#######################################
# init func
#######################################
def init_hk_calendar(self) -> None:
""" 更新港交所交易日历 """
table_name = '港股交易日历'
if self.db_interface.get_latest_timestamp(table_name):
df = self._pro.hk_tradecal(is_open=1)
else:
storage = []
end_dates = ['19850101', '19900101', '19950101', '20000101', '20050101', '20100101', '20150101', '20200101']
for end_date in end_dates:
storage.append(self._pro.hk_tradecal(is_open=1, end_date=end_date))
storage.append(self._pro.hk_tradecal(is_open=1))
df = pd.concat(storage, ignore_index=True).drop_duplicates()
cal_date = df.cal_date
cal_date = cal_date.sort_values()
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
self.db_interface.update_df(cal_date, table_name)
def init_stock_names(self):
"""获取所有股票的曾用名"""
raw_df = self.update_stock_names()
raw_df_start_dates = raw_df.index.get_level_values('DateTime').min()
uncovered_stocks = self.stock_tickers.ticker(raw_df_start_dates)
with tqdm(uncovered_stocks) as pbar:
for stock in uncovered_stocks:
pbar.set_description(f'下载{stock}的股票名称')
self.update_stock_names(stock)
pbar.update()
logging.getLogger(__name__).info('股票曾用名下载完成.')
@cached_property
def stock_tickers(self) -> StockTickers:
return StockTickers(self.db_interface)
#######################################
# listing funcs
#######################################
def update_calendar(self) -> None:
""" 更新上交所交易日历 """
table_name = '交易日历'
df = self._pro.trade_cal(is_open=1)
cal_date = df.cal_date
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
self.db_interface.purge_table(table_name)
self.db_interface.insert_df(cal_date, table_name)
def update_hk_calendar(self) -> None:
""" 更新港交所交易日历 """
table_name = '港股交易日历'
df = self._pro.hk_tradecal(is_open=1)
cal_date = df.cal_date
cal_date = cal_date.sort_values()
cal_date.name = '交易日期'
cal_date = cal_date.map(date_utils.date_type2datetime)
cal_date.index.name = 'index'
db_data = self.db_interface.read_table(table_name)
db_data = db_data.loc[db_data['交易日期'] < cal_date.min(), :]
data = pd.concat([db_data.set_index('index').iloc[:, 0], cal_date], ignore_index=True)
self.db_interface.purge_table(table_name)
self.db_interface.insert_df(data, table_name)
def update_stock_list_date(self) -> None:
""" 更新所有股票列表, 包括上市, 退市和暂停上市的股票
ref: https://tushare.pro/document/2?doc_id=25
"""
data_category = '股票列表'
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
list_status = ['L', 'D', 'P']
fields = ['ts_code', 'list_date', 'delist_date']
for status in list_status:
storage.append(self._pro.stock_basic(exchange='', list_status=status, fields=fields))
output = pd.concat(storage)
output['证券类型'] = 'A股股票'
list_info = self._format_list_date(output.loc[:, ['ts_code', 'list_date', 'delist_date', '证券类型']])
self.db_interface.update_df(list_info, '证券代码')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
# TODO
def get_hk_stock_list_date(self):
""" 更新所有港股股票列表, 包括上市, 退市和暂停上市的股票
ref: https://tushare.pro/document/2?doc_id=25
"""
data_category = '股票列表'
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
list_status = ['L', 'D']
for status in list_status:
storage.append(self._pro.hk_basic(list_status=status))
output = pd.concat(storage)
output['证券类型'] = '港股股票'
list_info = self._format_list_date(output.loc[:, ['ts_code', 'list_date', 'delist_date', '证券类型']])
self.db_interface.update_df(list_info, '证券代码')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
def update_convertible_bond_list_date(self) -> None:
""" 更新可转债信息
ref: https://tushare.pro/document/2?doc_id=185
"""
data_category = '可转债基本信息'
desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
output = self._pro.cb_basic(fields=list(desc.keys()))
# list date
list_info = output.loc[:, ['ts_code', 'list_date', 'delist_date']]
list_info['证券类型'] = '可转债'
list_info = self._format_list_date(list_info, extend_delist_date=True)
self.db_interface.update_df(list_info, '证券代码')
# names
name_info = output.loc[:, ['list_date', 'ts_code', 'bond_short_name']].rename({'list_date': 'DateTime'},
axis=1).dropna()
name_info = self._standardize_df(name_info, desc)
self.db_interface.update_df(name_info, '证券名称')
# info
output = self._standardize_df(output, desc)
self.db_interface.update_df(output, '可转债列表')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
def update_future_list_date(self) -> None:
""" 更新期货合约
ref: https://tushare.pro/document/2?doc_id=135
"""
data_category = '期货合约信息表'
desc = self._factor_param[data_category]['输出参数']
def find_start_num(a):
g = re.match(r'[\d.]*', a)
return float(g.group(0))
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
for exchange in constants.FUTURE_EXCHANGES:
storage.append(self._pro.fut_basic(exchange=exchange, fields=list(desc.keys()) + ['per_unit']))
output = pd.concat(storage, ignore_index=True)
output.ts_code = self.format_ticker(output['ts_code'].tolist())
output.multiplier = output.multiplier.where(output.multiplier.notna(), output.per_unit)
output = output.dropna(subset=['multiplier']).drop('per_unit', axis=1)
output.quote_unit_desc = output.quote_unit_desc.apply(find_start_num)
# exclude XINE's TAS contracts
output = output.loc[~output.symbol.str.endswith('TAS'), :]
# drop AP2107.CZC
output = output.loc[output.symbol != 'AP107', :]
db_ids = self.db_interface.get_all_id('证券代码')
output = output.loc[~output['ts_code'].isin(db_ids), :]
# list date
list_info = output.loc[:, ['ts_code', 'list_date', 'delist_date']]
list_info['证券类型'] = '期货'
list_info = self._format_list_date(list_info, extend_delist_date=True)
self.db_interface.insert_df(list_info, '证券代码')
# names
name_info = output.loc[:, ['list_date', 'ts_code', 'name']].rename({'list_date': 'DateTime'}, axis=1)
name_info = self._standardize_df(name_info, desc)
self.db_interface.update_df(name_info, '证券名称')
# info
output = self._standardize_df(output, desc)
self.db_interface.insert_df(output, '期货合约')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
def update_option_list_date(self) -> None:
""" 更新期权合约
ref: https://tushare.pro/document/2?doc_id=158
"""
data_category = '期权合约信息'
desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
for exchange in constants.FUTURE_EXCHANGES + constants.STOCK_EXCHANGES:
storage.append(self._pro.opt_basic(exchange=exchange, fields=list(desc.keys())))
output = pd.concat(storage)
output.opt_code = output.opt_code.str.replace('OP$', '', regex=True)
output.opt_code = self.format_ticker(output['opt_code'].tolist())
output.ts_code = self.format_ticker(output['ts_code'].tolist())
db_ids = self.db_interface.get_all_id('证券代码')
output = output.loc[~output['ts_code'].isin(db_ids), :]
# list date
list_info = output.loc[:, ['ts_code', 'list_date', 'delist_date', 'opt_type']]
list_info = self._format_list_date(list_info, extend_delist_date=True)
self.db_interface.insert_df(list_info, '证券代码')
# names
name_info = output.loc[:, ['list_date', 'ts_code', 'name']].rename({'list_date': 'DateTime'}, axis=1)
name_info = self._standardize_df(name_info, desc)
self.db_interface.insert_df(name_info, '证券名称')
# info
info = self._standardize_df(output, desc)
self.db_interface.insert_df(info, '期权合约')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
def update_fund_list_date(self) -> None:
""" 更新基金列表
ref: https://tushare.pro/document/2?doc_id=19
"""
data_category = '公募基金列表'
desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
for market, status in itertools.product(['E', 'O'], ['D', 'I', 'L']):
storage.append(self._pro.fund_basic(market=market, status=status, fields=list(desc.keys())))
output = pd.concat(storage)
output = output.loc[self.filter_valid_cn_equity_ticker(output.ts_code), :]
etf_type = ['ETF' if it.endswith('ETF') else '' for it in output['name']]
openness = ['' if it == '契约型开放式' else '封闭' for it in output['type']]
exchange_type = ['' if it == 'E' else '场外' for it in output['market']]
end_type = '基金'
output.fund_type = output.fund_type + etf_type + openness + exchange_type + end_type
# list date
exchange_part = output.loc[output.market == 'E', :]
listed1 = exchange_part.loc[:, ['ts_code', 'list_date', 'delist_date', 'fund_type']]
list_info1 = self._format_list_date(listed1, extend_delist_date=True)
otc_part = output.loc[output.market == 'O', :]
listed2 = otc_part.loc[:, ['ts_code', 'found_date', 'due_date', 'fund_type']]
list_info2 = self._format_list_date(listed2, extend_delist_date=True)
list_info = pd.concat([list_info1, list_info2])
self.db_interface.update_df(list_info, '证券代码')
# names
exchange_name = exchange_part.loc[:, ['ts_code', 'list_date', 'name']]
otc_name = otc_part.loc[:, ['ts_code', 'found_date', 'name']].rename({'found_date': 'list_date'}, axis=1)
name_info = pd.concat([exchange_name, otc_name]).dropna()
name_info.columns = ['ID', 'DateTime', '证券名称']
name_info.DateTime = date_utils.date_type2datetime(name_info['DateTime'].tolist())
name_info = name_info.set_index(['DateTime', 'ID'])
self.db_interface.update_df(name_info, '证券名称')
# info
output = output.drop(['type', 'market'], axis=1)
content = self._standardize_df(output, desc)
self.db_interface.purge_table('基金列表')
self.db_interface.insert_df(content, '基金列表')
logging.getLogger(__name__).info(f'{data_category}下载完成.')
#######################################
# interest funcs
#######################################
@date_utils.strlize_input_dates
def get_shibor(self, start_date: date_utils.DateType = None, end_date: date_utils.DateType = None) -> pd.DataFrame:
""" Shibor利率数据 """
data_category = 'Shibor利率数据'
desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
df = self._pro.shibor(start_date=start_date, end_date=end_date)
df = self._standardize_df(df, desc)
self.db_interface.update_df(df, data_category)
logging.getLogger(__name__).info(f'{data_category}下载完成.')
return df
#######################################
# stock funcs
#######################################
def get_company_info(self) -> pd.DataFrame:
"""
获取上市公司基本信息
:ref: https://tushare.pro/document/2?doc_id=112
:return: 上市公司基础信息df
"""
data_category = '上市公司基本信息'
column_desc = self._factor_param[data_category]['输出参数']
fields = list(column_desc.keys())
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
for exchange in constants.STOCK_EXCHANGES:
storage.append(self._pro.stock_company(exchange=exchange, fields=fields))
df = pd.concat(storage)
df = self._standardize_df(df, column_desc)
self.db_interface.update_df(df, data_category)
logging.getLogger(__name__).info(f'{data_category}下载完成.')
return df
@date_utils.strlize_input_dates
def get_ipo_info(self, start_date: date_utils.DateType = None) -> pd.DataFrame:
""" IPO新股列表 """
data_category = 'IPO新股列表'
column_desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
df = self._pro.new_share(start_date=start_date)
df[['amount', 'market_amount', 'limit_amount']] = df[['amount', 'market_amount', 'limit_amount']] * 10000
df['funds'] = df['funds'] * 100000000
# list_date
list_date_data = df.loc[df.issue_date != '', ['issue_date', 'ts_code']]
list_date_data['证券类型'] = 'A股股票'
list_date_data['上市状态'] = True
list_date_data = self._standardize_df(list_date_data, {'issue_date': 'DateTime', 'ts_code': 'ID'})
list_date_data = list_date_data.loc[list_date_data.index.get_level_values('DateTime') < dt.datetime.now(), :]
self.db_interface.update_df(list_date_data, '证券代码')
# info
df = self._standardize_df(df, column_desc)
self.db_interface.update_df(df, data_category)
logging.getLogger(__name__).info(f'{data_category}下载完成.')
return df
def update_stock_names(self, ticker: str = None) -> pd.DataFrame:
"""更新曾用名
ref: https://tushare.pro/document/2?doc_id=100
:param ticker: 证券代码(000001.SZ)
"""
data_category = '证券名称'
column_desc = self._factor_param[data_category]['输出参数']
fields = list(column_desc.keys())
logging.getLogger(__name__).debug(f'开始下载{ticker if ticker else ""}{data_category}.')
df = self._pro.namechange(ts_code=ticker, fields=fields)
df = self._standardize_df(df, column_desc)
self.db_interface.update_df(df, data_category)
logging.getLogger(__name__).debug(f'{ticker if ticker else ""}{data_category}下载完成.')
return df
def get_daily_hq(self, trade_date: date_utils.DateType = None,
start_date: date_utils.DateType = None, end_date: date_utils.DateType = None) -> None:
"""更新每日行情
行情信息包括: 开高低收, 量额, 复权因子, 股本
:param trade_date: 交易日期
:param start_date: 开始日期
:param end_date: 结束日期
交易日期查询一天, 开始结束日期查询区间. 二选一
:return: None
"""
if (not trade_date) & (not start_date):
raise ValueError('trade_date 和 start_date 必填一个!')
if end_date is None:
end_date = dt.datetime.today()
dates = [trade_date] if trade_date else self.calendar.select_dates(start_date, end_date)
pre_date = self.calendar.offset(dates[0], -1)
output_fields = '输出参数'
price_desc = self._factor_param['日线行情'][output_fields]
price_fields = list(price_desc.keys())
adj_factor_desc = self._factor_param['复权因子'][output_fields]
indicator_desc = self._factor_param['每日指标'][output_fields]
indicator_fields = list(indicator_desc.keys())
# pre data:
def get_pre_data(tn: str) -> pd.Series:
return self.db_interface.read_table(tn, tn, end_date=pre_date).groupby('ID').tail(1)
pre_adj_factor = get_pre_data('复权因子')
pre_dict = {'total_share': get_pre_data('总股本'),
'float_share': get_pre_data('流通股本'),
'free_share': get_pre_data('自由流通股本')}
with tqdm(dates) as pbar:
for date in dates:
current_date_str = date_utils.date_type2str(date)
pbar.set_description(f'下载{current_date_str}的日行情')
# price data
df = self._pro.daily(trade_date=current_date_str, fields=price_fields)
df['vol'] = df['vol'] * 100
df['amount'] = df['amount'] * 1000
price_df = self._standardize_df(df, price_desc)
self.db_interface.update_df(price_df, '股票日行情')
# adj_factor data
df = self._pro.adj_factor(trade_date=current_date_str)
adj_df = self._standardize_df(df, adj_factor_desc)
self.db_interface.update_compact_df(adj_df, '复权因子', pre_adj_factor)
pre_adj_factor = adj_df
# indicator data
df = self._pro.daily_basic(trade_date=current_date_str, fields=indicator_fields)
df = self._standardize_df(df, indicator_desc).multiply(10000)
for key, value in pre_dict.items():
col_name = indicator_desc[key]
self.db_interface.update_compact_df(df[col_name], col_name, value)
pre_dict[key] = df[col_name]
pbar.update()
def update_pause_stock_info(self):
"""更新股票停牌信息"""
table_name = '股票停牌'
renaming_dict = self._factor_param[table_name]['输出参数']
start_date = self.db_interface.get_latest_timestamp(table_name, dt.date(1990, 12, 10)) + dt.timedelta(days=1)
end_date = self.calendar.yesterday()
df = self._pro.suspend_d(start_date=date_utils.date_type2str(start_date),
end_date=date_utils.date_type2str(end_date),
suspend_type='S')
output = df.loc[pd.isna(df.suspend_timing), ['ts_code', 'trade_date']]
output['停牌类型'] = '停牌一天'
output['停牌原因'] = ''
output = self._standardize_df(output, renaming_dict)
self.db_interface.insert_df(output, table_name)
def get_all_dividend(self) -> None:
""" 获取上市公司分红送股信息 """
data_category = '分红送股'
column_desc = self._factor_param[data_category]['输出参数']
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
tickers = self.stock_tickers.all_ticker()
with tqdm(tickers) as pbar:
for stock in tickers:
pbar.set_description(f'下载{stock}的分红送股数据')
df = self._pro.dividend(ts_code=stock, fields=(list(column_desc.keys())))
df = df.loc[df['div_proc'] == '实施', :]
# 无公布时间的权宜之计
df['ann_date'].where(df['ann_date'].notnull(), df['imp_ann_date'], inplace=True)
df.drop(['div_proc', 'imp_ann_date'], axis=1, inplace=True)
df.dropna(subset=['ann_date'], inplace=True)
df = self._standardize_df(df, column_desc)
df = df.drop_duplicates()
try:
self.db_interface.insert_df(df, data_category)
except Exception:
print(f'请手动处理{stock}的分红数据')
pbar.update()
logging.getLogger(__name__).info(f'{data_category}信息下载完成.')
def update_dividend(self) -> None:
""" 更新上市公司分红送股信息 """
data_category = '分红送股'
column_desc = self._factor_param[data_category]['输出参数']
db_date = self.db_interface.get_column_max(data_category, '股权登记日')
dates_range = self.calendar.select_dates(db_date, dt.date.today(), inclusive=(False, True))
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
with tqdm(dates_range) as pbar:
for date in dates_range:
pbar.set_description(f'下载{date}的分红送股数据')
date_str = date_utils.date_type2str(date)
df = self._pro.dividend(record_date=date_str, fields=(list(column_desc.keys())))
df = df.loc[df['div_proc'] == '实施', :]
# 无公布时间的权宜之计
df['ann_date'].where(df['ann_date'].notnull(), df['imp_ann_date'], inplace=True)
df.drop(['div_proc', 'imp_ann_date'], axis=1, inplace=True)
df = self._standardize_df(df, column_desc)
self.db_interface.update_df(df, data_category)
pbar.update()
logging.getLogger(__name__).info(f'{data_category}信息下载完成.')
def get_financial(self, ticker: str) -> None:
""" 获取公司的 资产负债表, 现金流量表 和 利润表, 并写入数据库 """
balance_sheet = '资产负债表'
income = '利润表'
cash_flow = '现金流量表'
balance_sheet_desc = self._factor_param[balance_sheet]['输出参数']
income_desc = self._factor_param[income]['输出参数']
cash_flow_desc = self._factor_param[cash_flow]['输出参数']
company_type_desc = self._factor_param[balance_sheet]['公司类型']
combined_types = ['1', '4', '5', '11']
mother_types = ['6', '9', '10', '12']
def download_data(api_func: Callable, report_type_list: Sequence[str],
column_name_dict: Mapping[str, str], table_name: str) -> None:
storage = []
for i in report_type_list:
storage.append(api_func(ts_code=ticker, report_type=i, fields=list(column_name_dict.keys())))
df = pd.concat(storage, ignore_index=True)
if df.empty: # 000508 无数据
return
df = df.dropna(subset=['ann_date', 'f_ann_date', 'end_date']) # 000166 申万宏源 早期数据无时间戳
df = df.sort_values('update_flag').groupby(['ann_date', 'end_date', 'report_type']).tail(1)
df = df.drop('update_flag', axis=1).fillna(np.nan).replace(0, np.nan).dropna(how='all', axis=1)
df = df.set_index(['ann_date', 'f_ann_date', 'report_type']).sort_index().drop_duplicates(
keep='first').reset_index()
df = df.sort_values('report_type').drop(['ann_date', 'report_type'], axis=1)
df = df.replace({'comp_type': company_type_desc})
df = self._standardize_df(df, column_name_dict)
df = df.loc[~df.index.duplicated(), :].sort_index()
df = df.loc[df.index.get_level_values('报告期').month % 3 == 0, :]
df = self.append_report_date_cache(df)
self.db_interface.delete_id_records(table_name, ticker)
try:
self.db_interface.insert_df(df, table_name)
except:
logging.getLogger(__name__).error(f'{ticker} - {table_name} failed to get coherent data')
loop_vars = [(self._pro.income, income_desc, income),
(self._pro.cashflow, cash_flow_desc, cash_flow),
(self._pro.balancesheet, balance_sheet_desc, balance_sheet)]
for f, desc, table in loop_vars:
download_data(f, mother_types, desc, f'母公司{table}')
download_data(f, combined_types, desc, f'合并{table}')
def init_accounting_data(self):
tickers = self.stock_tickers.all_ticker()
db_ticker = self.db_interface.get_column_max('合并资产负债表', 'ID')
if db_ticker:
tickers = tickers[tickers.index(db_ticker):]
rate_limiter = RateLimiter(self._factor_param['资产负债表']['每分钟限速'] / 8, 60)
logging.getLogger(__name__).debug('开始下载财报.')
with tqdm(tickers) as pbar:
for ticker in tickers:
with rate_limiter:
pbar.set_description(f'下载 {ticker} 的财务数据')
self.get_financial(ticker)
pbar.update()
logging.getLogger(__name__).info('财报下载完成')
@retry(stop_max_attempt_number=3)
def update_financial_data(self, date: dt.datetime = None):
table_name = '财报披露计划'
desc = self._factor_param[table_name]['输出参数']
ref_table = '合并资产负债表'
db_data = self.db_interface.read_table(ref_table, '期末总股本')
latest = db_data.groupby('ID').tail(1).reset_index().loc[:, ['DateTime', 'ID']].rename({'ID': 'ts_code'},
axis=1)
update_tickers = set(self.stock_tickers.all_ticker()) - set(latest.ts_code.tolist())
report_dates = date_utils.ReportingDate.get_latest_report_date(date)
for report_date in report_dates:
df = self._pro.disclosure_date(end_date=date_utils.date_type2str(report_date), fields=list(desc.keys()))
df.actual_date = df.actual_date.apply(date_utils.date_type2datetime)
tmp = df.modify_date.str.split(',').apply(lambda x: x[-1] if x else None)
df.modify_date = tmp.apply(date_utils.date_type2datetime)
df2 = df.merge(latest, on='ts_code', how='left')
df3 = df2.loc[(df2.actual_date > df2.DateTime) | (df2.modify_date > df2.DateTime), :]
if not df3.empty:
update_tickers = update_tickers.union(set(df3.ts_code.tolist()))
with tqdm(update_tickers) as pbar:
for ticker in update_tickers:
pbar.set_description(f'更新 {ticker} 的财报')
self.get_financial_index(ticker)
self.get_financial(ticker)
pbar.update()
def get_financial_index(self, ticker: str) -> Optional[pd.DataFrame]:
""" 获取财务指标
:param ticker: 证券代码(000001.SZ)
:return: 财务指标
"""
data_category = '财务指标'
column_desc = self._factor_param[data_category]['输出参数']
df = self._pro.fina_indicator(ts_code=ticker, fields=list(column_desc.keys()))
if df.empty:
return
df = df.dropna(subset=['ann_date', 'end_date'])
df = df.sort_values('update_flag').groupby(['ann_date', 'end_date']).tail(1)
df = df.drop('update_flag', axis=1).fillna(np.nan).replace(0, np.nan).dropna(how='all', axis=1)
df = self._standardize_df(df, column_desc).sort_index()
df = df.loc[df.index.get_level_values('报告期').month % 3 == 0, :]
df = self.append_report_date_cache(df)
self.db_interface.delete_id_records(data_category, ticker)
self.db_interface.insert_df(df, data_category)
return df
def get_hs_constitute(self) -> None:
""" 沪深股通成分股进出记录. 月末更新. """
data_category = '沪深股通成份股'
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
storage = []
for hs_type, is_new in product(['SH', 'SZ'], ['0', '1']):
storage.append(self._pro.hs_const(hs_type=hs_type, is_new=is_new))
df = pd.concat(storage)
in_part = df.loc[:, ['in_date', 'ts_code']]
in_part[data_category] = True
out_part = df.loc[:, ['out_date', 'ts_code']].dropna()
out_part[data_category] = False
out_part.rename({'out_date': 'in_date'}, axis=1, inplace=True)
stacked_df = pd.concat([in_part, out_part])
stacked_df = self._standardize_df(stacked_df, {'in_date': 'DateTime', 'ts_code': 'ID'})
self.db_interface.update_df(stacked_df, data_category)
logging.getLogger(__name__).info(f'{data_category}数据下载完成')
@date_utils.strlize_input_dates
def get_hs_holding(self, date: date_utils.DateType):
data_category = '沪深港股通持股明细'
desc = self._factor_param[data_category]['输出参数']
fields = list(desc.keys())
df = self._pro.hk_hold(trade_date=date, fields=fields)
df = self._standardize_df(df, desc)
self.db_interface.update_df(df, data_category)
def update_hs_holding(self) -> None:
""" 沪深港股通持股明细 """
data_category = '沪深港股通持股明细'
start_date = self.db_interface.get_latest_timestamp(data_category, START_DATE['ggt'])
dates = self.calendar.select_dates(start_date, dt.date.today())
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
with tqdm(dates) as pbar:
for date in dates:
pbar.set_description(f'下载{date}的沪深港股通持股明细')
self.get_hs_holding(date)
pbar.update()
logging.getLogger(__name__).info(f'{data_category}下载完成.')
#######################################
# HK stock funcs
#######################################
def update_hk_stock_daily(self):
table_name = '港股日行情'
hk_cal = date_utils.HKTradingCalendar(self.db_interface)
start_date = self.db_interface.get_latest_timestamp(table_name, START_DATE['hk_daily'])
end_date = hk_cal.yesterday()
dates = hk_cal.select_dates(start_date=start_date, end_date=end_date, inclusive=(False, True))
rate = self._factor_param[table_name]['每分钟限速']
rate_limiter = RateLimiter(rate, 60)
with tqdm(dates) as pbar:
for date in dates:
with rate_limiter:
pbar.set_description(f'下载{date}的{table_name}')
self.get_hk_stock_daily(date)
pbar.update()
@date_utils.strlize_input_dates
def get_hk_stock_daily(self, date: date_utils.DateType) -> pd.DataFrame:
table_name = '港股日行情'
desc = self._factor_param[table_name]['输出参数']
df = self._pro.hk_daily(trade_date=date, fields=list(desc.keys()))
price_df = self._standardize_df(df, desc)
self.db_interface.insert_df(price_df, table_name)
return price_df
#######################################
# index funcs
#######################################
@date_utils.strlize_input_dates
def get_index_daily(self, date: date_utils.DateType) -> None:
"""
获取指数行情信息. 包括开高低收, 量额, 市盈, 市净, 市值
默认指数为沪指, 深指, 中小盘, 创业板, 50, 300, 500
注: 300不包含市盈等指标
:param date: 日期
:return: 指数行情信息
"""
table_name = '指数日行情'
desc = self._factor_param[table_name]['输出参数']
price_fields = list(desc.keys())
basic_desc = self._factor_param['大盘指数每日指标']['输出参数']
basic_fields = list(basic_desc.keys())
storage = []
indexes = list(constants.STOCK_INDEXES.values())
for index in indexes:
storage.append(self._pro.index_daily(ts_code=index, start_date=date, end_date=date, fields=price_fields))
price_info = pd.concat(storage)
price_info['vol'] = price_info['vol'] * 100
price_info['amount'] = price_info['amount'] * 1000
price_info = self._standardize_df(price_info, desc)
valuation_info = self._pro.index_dailybasic(trade_date=date, fields=basic_fields)
valuation_info = self._standardize_df(valuation_info, basic_desc)
data = pd.concat([price_info, valuation_info], axis=1)
data = data.loc[data.index.get_level_values('ID').isin(indexes), :]
self.db_interface.insert_df(data, table_name)
def update_index_daily(self):
table_name = '指数日行情'
start_date = self.db_interface.get_latest_timestamp(table_name, START_DATE['index_daily'])
dates = self.calendar.select_dates(start_date, dt.date.today(), inclusive=(False, True))
rate = self._factor_param[table_name]['每分钟限速']
rate_limiter = RateLimiter(rate, period=60)
logging.getLogger(__name__).debug(f'开始下载{table_name}.')
with tqdm(dates) as pbar:
for date in dates:
with rate_limiter:
pbar.set_description(f'下载{date}的{table_name}')
self.get_index_daily(date)
pbar.update()
logging.getLogger(__name__).info(f'{table_name}下载完成')
@date_utils.dtlize_input_dates
def get_index_weight(self, indexes: Sequence[str] = None,
start_date: date_utils.DateType = None, end_date: date_utils.DateType = None) -> None:
""" 指数成分和权重
默认指数为 ['000016.SH', '399300.SH', '000905.SH'], 即50, 300, 500
:param indexes: 指数代码
:param start_date: 开始时间
:param end_date: 结束时间
:return: None
"""
data_category = '指数成分和权重'
column_desc = self._factor_param[data_category]['输出参数']
indexes = constants.BOARD_INDEXES if indexes is None else indexes
if end_date is None:
end_date = dt.datetime.today()
dates = self.calendar.last_day_of_month(start_date, end_date)
dates = sorted(list(set([start_date] + dates + [end_date])))
logging.getLogger(__name__).debug(f'开始下载{data_category}.')
with tqdm(dates) as pbar:
for i in range(len(dates) - 1):
storage = []
curr_date_str = date_utils.date_type2str(dates[i])
next_date_str = date_utils.date_type2str(dates[i + 1])
for index in indexes:
pbar.set_description(f'下载{curr_date_str} 到 {next_date_str} 的 {index} 的 成分股权重')
storage.append(self._pro.index_weight(index_code=index,
start_date=curr_date_str, end_date=next_date_str))
df = self._standardize_df(pd.concat(storage), column_desc)
self.db_interface.update_df(df, '指数成分股权重')
pbar.update()
logging.getLogger(__name__).info(f'{data_category}下载完成.')
#######################################
# future funcs
#######################################
# TODO
@date_utils.strlize_input_dates
def _get_future_settle_info(self, date):
table_name = '期货结算参数'
desc = self._factor_param[table_name]['输出参数']
storage = []
for exchange in constants.FUTURE_EXCHANGES:
storage.append(self._pro.fut_settle(trade_date=date, exchange=exchange, fields=list(desc.keys())))
data = pd.concat(storage, ignore_index=True)
df = self._standardize_df(data, desc)
return df
#######################################
# funds funcs
#######################################
def update_fund_daily(self):
"""更新基金日行情"""
daily_table_name = '场内基金日行情'
nav_table_name = '公募基金净值'
asset_table_name = '基金规模数据'
daily_params = self._factor_param[daily_table_name]['输出参数']
nav_params = self._factor_param[nav_table_name]['输出参数']
share_params = self._factor_param[asset_table_name]['输出参数']
start_date = self.db_interface.get_latest_timestamp(daily_table_name, START_DATE['fund_daily'])
start_date = self.calendar.offset(start_date, -4)
end_date = dt.date.today()
dates = self.calendar.select_dates(start_date, end_date, (False, True))
rate = self._factor_param[nav_table_name]['每分钟限速']
rate_limiter = RateLimiter(rate, period=60)
with tqdm(dates) as pbar:
for date in dates:
with rate_limiter:
pbar.set_description(f'下载{date}的{daily_table_name}')
date_str = date_utils.date_type2str(date)
daily_data = self._pro.fund_daily(trade_date=date_str, fields=list(daily_params.keys()))
daily_data['vol'] = daily_data['vol'] * 100
daily_data['amount'] = daily_data['amount'] * 1000
daily_data = self._standardize_df(daily_data, daily_params)
ex_nav_data = self._pro.fund_nav(end_date=date_str, market='E')
ex_nav_part = ex_nav_data.loc[:, ['ts_code', 'end_date', 'unit_nav']]
ex_nav_part = self._standardize_df(ex_nav_part, nav_params)
share_data = self._pro.fund_share(trade_date=date_str)
share_data['fd_share'] = share_data['fd_share'] * 10000
ind = share_data['market'] == 'O'
share_data.drop(['fund_type', 'market'], axis=1, inplace=True)
share_data = self._standardize_df(share_data, share_params)
ex_share_data = share_data.loc[~ind, :]
of_share_data = share_data.loc[ind, :]
db_data = daily_data.join(ex_nav_part, how='left').join(ex_share_data, how='left')
nav_data = self._pro.fund_nav(end_date=date_str, market='O', fields=list(nav_params.keys()))
nav_data = nav_data.loc[~(pd.isna(nav_data['accum_nav']) & nav_data['unit_nav'] == 1), :].drop(
'accum_nav', axis=1)
nav_part = self._standardize_df(nav_data.iloc[:, :3].copy(), nav_params)
asset_part = self._standardize_df(nav_data.iloc[:, [0, 1, 3, 4]].dropna(), nav_params)
self.db_interface.update_df(db_data, daily_table_name)
self.db_interface.update_df(nav_part, '场外基金净值')
self.db_interface.update_df(of_share_data, '场外基金份额')
self.db_interface.update_df(asset_part, '场外基金规模')
pbar.update()
logging.getLogger(__name__).info(f'{daily_table_name} 更新完成.')
def update_fund_asset(self, tickers: Sequence[str] = None):
if tickers is None:
tickers = FundTickers(self.db_interface).all_ticker()
tickers = [it for it in tickers if len(it) == 9]
asset_table_name = '场外基金份额'
share_params = self._factor_param[asset_table_name]['输出参数']
rate = self._factor_param[asset_table_name]['每分钟限速'] - 1
rate_limiter = RateLimiter(rate, period=60)
storage = []
with tqdm(tickers) as pbar:
for ticker in tickers:
with rate_limiter:
pbar.set_description(f'更新 {ticker} 的份额')
storage.append(self._pro.fund_share(ts_code=ticker))
pbar.update()
share_data = pd.concat(storage)
share_data['fd_share'] = share_data['fd_share'] * 10000
ind = share_data['market'] == 'O'
share_data.drop(['fund_type', 'market'], axis=1, inplace=True)
share_data = self._standardize_df(share_data, share_params)
ex_share_data = share_data.loc[~ind, :]
ex_to_of_share_data = self.generate_of_data_from_exchange_data(ex_share_data)
of_share_data = pd.concat([share_data.loc[ind, :], ex_to_of_share_data])
self.db_interface.update_df(ex_share_data, '场内基金日行情')
self.db_interface.update_df(of_share_data, '场外基金份额')
def update_fund_dividend(self):
"""更新基金分红信息"""
table_name = '公募基金分红'
params = self._factor_param[table_name]['输出参数']
rate = self._factor_param[table_name]['每分钟限速']
start_date = self.db_interface.get_latest_timestamp(table_name, dt.date(1999, 3, 31))
end_date = dt.date.today()
dates = self.calendar.select_dates(start_date, end_date, (False, True))
with tqdm(dates) as pbar:
rate_limiter = RateLimiter(rate - 1, period=60)
for date in dates:
with rate_limiter:
pbar.set_description(f'下载{date}的{table_name}')
df = self._pro.fund_div(ex_date=date_utils.date_type2str(date), fields=list(params.keys()))
df = df.dropna().drop_duplicates()
shsz_part = df.loc[~df['ts_code'].str.endswith('.OF'), ['ts_code', 'ex_date', 'div_cash']].copy()
shsz_part = self._standardize_df(shsz_part, params)
of_part = df.loc[:, ['ts_code', 'net_ex_date', 'div_cash']].copy()
of_part['ts_code'] = of_part['ts_code'].str.replace('SZ|SH', 'OF', regex=True)
of_part = self._standardize_df(of_part, params)
data = pd.concat([of_part, shsz_part])
self.db_interface.update_df(data, table_name)
pbar.update()
logging.getLogger(__name__).info(f'{table_name} 更新完成.')
def get_update_fund_portfolio_info(self):
tickers = StockFundTickers(self.db_interface).ticker()
fund_manager_info = self.db_interface.read_table('基金列表', ids=tickers)
managers = fund_manager_info['管理人'].drop_duplicates().tolist()
update_managers = []
table_name = '公募基金持仓'
rate = self._factor_param[table_name]['每分钟限速']
rate_limiter = RateLimiter(rate - 1, period=60)
report_period = None
for manager in managers:
for investment_type in ['灵活配置型基金', '偏股混合型基金']:
mask = (fund_manager_info['管理人'] == manager) & (fund_manager_info['投资类型'] == investment_type)
tickers = fund_manager_info.loc[mask, :].index.tolist()
checker = False
for ticker in tickers:
holding = self.db_interface.read_table('公募基金持仓', ids=ticker)
if not holding.empty:
db_reporting_period = holding.index.get_level_values('报告期')[0]
if db_reporting_period >= date_utils.ReportingDate.get_latest_report_date()[-1]:
break
with rate_limiter:
df = self._pro.fund_portfolio(ts_code=ticker)
tmp = date_utils.date_type2datetime(df['end_date'][0])
if tmp >= db_reporting_period:
report_period = tmp
update_managers.append(manager)
checker = True
break
if checker:
break
tickers = FundWithStocksTickers(self.db_interface).ticker()
storage = []
for manager in update_managers:
man_ticker = set(tickers) & set(
fund_manager_info.loc[fund_manager_info['管理人'] == manager, :].index.tolist())
storage.extend(list(man_ticker))
self.update_fund_portfolio(storage[25:], report_period)
def update_fund_portfolio(self, tickers: Sequence[str] = None, end_date: dt.datetime = None):
"""更新公募基金持仓数据
PS: 每个基金只保存最近的4000条记录
"""
table_name = '公募基金持仓'
params = self._factor_param[table_name]['输出参数']
rate = self._factor_param[table_name]['每分钟限速']
if tickers is None:
tickers = FundTickers(self.db_interface).ticker()
with tqdm(tickers) as pbar:
rate_limiter = RateLimiter(rate - 1, period=60)
for ticker in tickers:
with rate_limiter:
pbar.set_description(f'下载{ticker}的{table_name}')
df = self._pro.fund_portfolio(ts_code=ticker, fields=list(params.keys()))
df = df.drop_duplicates(subset=list(params.keys())[:4])
df = self._standardize_df(df, params)
if end_date:
df = df.loc[df.index.get_level_values('报告期') == end_date, :]
try:
self.db_interface.insert_df(df, table_name)
except:
pass
pbar.update()
logging.getLogger(__name__).info(f'{table_name} 更新完成.')
#######################################
# us stock funcs
#######################################
def get_us_stock(self, date: date_utils.DateType):
"""获取美股日行情"""
table_name = '美股日行情'
desc = self._factor_param[table_name]['输出参数']
current_date_str = date_utils.date_type2str(date)
df = self._pro.us_daily(trade_date=current_date_str, fields=list(desc.keys()))
price_df = self._standardize_df(df, desc)
self.db_interface.update_df(price_df, table_name)
#######################################
# utils funcs
#######################################
@staticmethod
def _standardize_df(df: pd.DataFrame, parameter_info: Mapping[str, str]) -> Union[pd.Series, pd.DataFrame]:
dates_columns = [it for it in df.columns if it.endswith('date')]
for it in dates_columns:
df[it] = df[it].apply(date_utils.date_type2datetime)
df.rename(parameter_info, axis=1, inplace=True)
index = [it for it in ['DateTime', 'ID', 'ConstituteTicker', '报告期'] if it in df.columns]
df = df.set_index(index, drop=True)
if df.shape[1] == 1:
df = df.iloc[:, 0]
return df
@staticmethod
def _format_list_date(df: pd.DataFrame, extend_delist_date: bool = False) -> pd.Series:
df.columns = ['ID', 'list_date', 'delist_date', '证券类型']
listed = df.loc[:, ['ID', '证券类型', 'list_date']]
listed['上市状态'] = True
unlisted = df.loc[:, ['ID', '证券类型', 'delist_date']].dropna().rename({'delist_date': 'list_date'}, axis=1)
unlisted['上市状态'] = False
if extend_delist_date:
listed['list_date'] = date_utils.date_type2datetime(listed['list_date'].tolist())
unlisted['list_date'] = \
[d + dt.timedelta(days=1) for d in date_utils.date_type2datetime(unlisted['list_date'].tolist())]
output = pd.concat([listed, unlisted], ignore_index=True).dropna()
output = TushareData._standardize_df(output, {'ts_code': 'ID', 'list_date': 'DateTime'})
output = output.loc[output.index.get_level_values('DateTime') <= dt.datetime.now(), :]
return output
@staticmethod
def format_ticker(tickers: Union[Sequence[str], str]) -> Union[Sequence[str], str]:
if isinstance(tickers, str):
return TushareData._format_ticker(tickers)
else:
return [TushareData._format_ticker(it) for it in tickers]
@staticmethod
def _format_ticker(ticker: str) -> str:
ticker = ticker.replace('.CFX', '.CFE').replace('.ZCE', '.CZC')
if ticker.endswith('.CZC') and len(ticker) <= 10:
ticker = utils.format_czc_ticker(ticker)
return ticker
@classmethod
def from_config(cls, config_loc: str):
with open(config_loc, 'r', encoding='utf-8') as f:
mod_config = json.load(f)
db_interface = config.generate_db_interface_from_config(config_loc)
return cls(db_interface=db_interface, tushare_token=mod_config['tushare']['token'])
@staticmethod
def append_report_date_cache(data: pd.DataFrame) -> pd.DataFrame:
dates = data.index.get_level_values('DateTime').unique()
storage = []
def deal_data(report_date, offset_str: str):
pre_report_date = date_utils.ReportingDate.offset(report_date, offset_str)
pre_record = info.loc[info.index.get_level_values('报告期') == pre_report_date].tail(1)
pre_date = None if pre_record.empty else pre_record.index.get_level_values('DateTime')[0]
return pre_date
for date in dates:
info = data.loc[data.index.get_level_values('DateTime') <= date, :]
newest_entry = info.tail(1)
report_date = pd.to_datetime(newest_entry.index.get_level_values('报告期')[0])
# quarterly
q1 = deal_data(report_date, 'q1')
q2 = deal_data(report_date, 'q2')
q4 = deal_data(report_date, 'q4')
q5 = deal_data(report_date, 'q5')
# yearly
y1 = deal_data(report_date, 'y1')
y2 = deal_data(report_date, 'y2')
y3 = deal_data(report_date, 'y3')
y5 = deal_data(report_date, 'y5')
res = pd.DataFrame([q1, q2, q4, q5, y1, y2, y3, y5]).T.set_index(newest_entry.index)
res.columns = ['q1', 'q2', 'q4', 'q5', 'y1', 'y2', 'y3', 'y5']
storage.append(res)
cache = pd.concat(storage)
return | pd.concat([data, cache], axis=1) | pandas.concat |
import time
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import KFold
from tqdm import tqdm
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if 'log_time' in kw:
name = kw.get('log_name', method.__name__.upper())
kw['log_time'][name] = int((te - ts) * 1000)
else:
print('%r %2.2f ms' % (method.__name__, (te - ts) * 1000))
return result
return timed
@timeit
def fea_date_time(df):
date_org = df['date']
df['date'] = df['date'].astype(str)
df["date"] = df["date"].apply(lambda x: x[:4] + "-" + x[4:6] + "-" + x[6:])
df["date"] = pd.to_datetime(df["date"])
# df["year"] = df['date'].dt.year
df["month"] = df['date'].dt.month
df["day"] = df['date'].dt.day
df['hour'] = pd.to_datetime(df['visitStartTime'], unit='s').dt.hour # aiden
# df["weekofmonth"] = df['day'].astype(int) // 7 # aiden
df["weekday"] = df['date'].dt.weekday
# df['weekofyear'] = df['date'].dt.weekofyear
df['month_unique_user_count'] = df.groupby('month')['fullVisitorId'].transform('nunique')
# df['month_unique_s_count'] = df.groupby('month')['sessionId'].transform('nunique')
# df['day_unique_user_count'] = df.groupby('day')['fullVisitorId'].transform('nunique')
# df['day_unique_s_count'] = df.groupby('day')['sessionId'].transform('nunique')
df['weekday_unique_user_count'] = df.groupby('weekday')['fullVisitorId'].transform('nunique')
# df['weekday_unique_s_count'] = df.groupby('weekday')['sessionId'].transform('nunique')
df['hour_unique_user_count'] = df.groupby('hour')['fullVisitorId'].transform('nunique') # aiden
# df['hour_unique_s_count'] = df.groupby('hour')['sessionId'].transform('nunique') # aiden
df['hour_unique_user_count'] = df.groupby('hour')['fullVisitorId'].transform('nunique')
df['user_hour_mean'] = df.groupby(['fullVisitorId'])['hour'].transform('mean') # aiden
df['user_hour_max'] = df.groupby(['fullVisitorId'])['hour'].transform('max') # aiden
df['user_hour_min'] = df.groupby(['fullVisitorId'])['hour'].transform('min') # aiden
# df['user_hour_var'] = df.groupby(['fullVisitorId'])['hour'].transform('var') # aiden
# df['user_hour_max-min'] = df['user_hour_max'] - df['user_hour_min'] # aiden
# df['user_weekday_hour_mean'] = df.groupby(['fullVisitorId', 'weekday'])['hour'].transform('mean') # aiden
df['date'] = date_org
return df
@timeit
def fea_format(df):
for col in ['visitNumber', 'totals_hits', 'totals_pageviews']:
df[col] = df[col].astype(float)
df['trafficSource_adwordsClickInfo.isVideoAd'].fillna(True, inplace=True)
df['trafficSource_isTrueDirect'].fillna(False, inplace=True)
return df
@timeit
def fea_device(df):
df['browser_category'] = df['device_browser'] + '_' + df['device_deviceCategory']
df['browser_operatingSystem'] = df['device_browser'] + '_' + df['device_operatingSystem']
df['mean_hour_per_browser_operatingSystem'] = df.groupby('browser_operatingSystem')['hour'].transform(
'mean') # aiden
df['source_country'] = df['trafficSource_source'] + '_' + df['geoNetwork_country']
return df
@timeit
def fea_totals(df):
df['visitNumber'] = np.log1p(df['visitNumber'])
df['totals_hits'] = np.log1p(df['totals_hits'])
df['totals_pageviews'] = np.log1p(df['totals_pageviews'].fillna(0))
# df['totals_pageviews_hit_rate'] = df['totals_hits'] - df['totals_pageviews']
# df['mean_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('mean')
df['sum_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('sum')
df['max_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('max')
# df['min_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('min')
df['var_hits_per_day'] = df.groupby(['day'])['totals_hits'].transform('var')
df['mean_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('mean') # aiden
df['sum_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('sum') # aiden
df['max_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('max') # aiden
# df['min_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('min') # aiden
# df['var_hits_per_hour'] = df.groupby(['hour'])['totals_hits'].transform('var') # aiden
return df
@timeit
def fea_geo_network(df):
# df['sum_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('sum')
# df['count_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform(
# 'count')
df['mean_pageviews_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform(
'mean')
df['sum_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('sum')
# df['count_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('count')
# df['mean_hits_per_network_domain'] = df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('mean')
return df
@timeit
def fea_traffic_source(df):
df['campaign_medium'] = df['trafficSource_campaign'] + '_' + df['trafficSource_medium']
df['medium_hits_mean'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('mean')
df['medium_hits_max'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('max')
df['medium_hits_min'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('min')
df['medium_hits_sum'] = df.groupby(['trafficSource_medium'])['totals_hits'].transform('sum')
return df
@timeit
def fea_shift(df): # aiden
df_g_sorted = df.sort_values(['visitStartTime'], ascending=True).groupby(['fullVisitorId'])
df['visitStartTime_b1'] = df_g_sorted['visitStartTime'].shift(1)
df.loc[df['visitStartTime_b1'].isnull(), 'visitStartTime_b1'] = df['visitStartTime']
df['visitStartTime_b2'] = df_g_sorted['visitStartTime'].shift(2)
df.loc[df['visitStartTime_b2'].isnull(), 'visitStartTime_b2'] = df['visitStartTime_b1']
df['visitStartTime_b1_diff'] = np.log1p(df['visitStartTime'] - df['visitStartTime_b1'])
df['visitStartTime_b2_diff'] = np.log1p(df['visitStartTime_b1'] - df['visitStartTime_b2'])
df.drop(['visitStartTime_b1'], axis=1, inplace=True)
df.drop(['visitStartTime_b2'], axis=1, inplace=True)
df['totals_hits_b1'] = df_g_sorted['totals_hits'].shift(1).fillna(0)
df['totals_pageviews_b1'] = df_g_sorted['totals_pageviews'].shift(1).fillna(0)
return df
def get_features(df):
org_cols = df.columns
df = fea_date_time(df)
df = fea_format(df)
df = fea_device(df)
df = fea_totals(df)
df = fea_geo_network(df)
df = fea_traffic_source(df)
df = fea_shift(df)
fea_cols = list(set(df.columns) - set(org_cols))
# print(new_cols)
return df, fea_cols
@timeit
def encode_label(df_train, df_test, categorical_feature):
print(categorical_feature)
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
train_size = df_train.shape[0]
for c in tqdm(categorical_feature):
# st = time.time()
labels, _ = pd.factorize(df_merge[c].values.astype('str'))
df_train[c] = labels[:train_size]
df_test[c] = labels[train_size:]
# print(c, time.time() - st)
return df_train, df_test
@timeit
def encode_frequency(df_train, df_test, categorical_feature):
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
for col in tqdm(categorical_feature):
freq_col = '{}_Frequency'.format(col)
df_freq = df_merge.groupby([col]).size() / df_merge.shape[0]
df_freq = df_freq.reset_index().rename(columns={0: freq_col})
if freq_col in df_train.columns:
df_train.drop(freq_col, axis=1, inplace=True)
if freq_col in df_test.columns:
df_test.drop(freq_col, axis=1, inplace=True)
df_train = df_train.merge(df_freq, on=col, how='left')
df_test = df_test.merge(df_freq, on=col, how='left')
print(df_train.shape, df_test.shape)
return df_train, df_test
@timeit
def encode_mean_k_fold(df_train, df_test, categorical_feature, target_col):
def _encode(col, alpha):
target_mean_global = df_train[target_col].mean()
nrows_cat = df_train.groupby(col)[target_col].count()
target_means_cats = df_train.groupby(col)[target_col].mean()
target_means_cats_adj = (target_means_cats * nrows_cat +
target_mean_global * alpha) / (nrows_cat + alpha)
# Mapping means to test data
encoded_col_test = df_test[col].map(target_means_cats_adj)
kfold = KFold(n_splits=5, shuffle=True, random_state=1989)
parts = []
for trn_inx, val_idx in kfold.split(df_train):
df_for_estimation, df_estimated = df_train.iloc[trn_inx], df_train.iloc[val_idx]
nrows_cat = df_for_estimation.groupby(col)[target_col].count()
target_means_cats = df_for_estimation.groupby(col)[target_col].mean()
target_means_cats_adj = (target_means_cats * nrows_cat +
target_mean_global * alpha) / (nrows_cat + alpha)
encoded_col_train_part = df_estimated[col].map(target_means_cats_adj)
parts.append(encoded_col_train_part)
encoded_col_train = pd.concat(parts, axis=0)
encoded_col_train.fillna(target_mean_global, inplace=True)
encoded_col_train.sort_index(inplace=True)
return encoded_col_train, encoded_col_test
for col in tqdm(categorical_feature):
temp_encoded_tr, temp_encoded_te = _encode(col, 5)
new_feat_name = 'mean_k_fold_{}'.format(col)
df_train[new_feat_name] = temp_encoded_tr.values
df_test[new_feat_name] = temp_encoded_te.values
print(df_train.shape, df_test.shape)
print(df_train.columns)
return df_train, df_test
@timeit
def encode_lda(df_train, df_test, categorical_feature, y_categorized, n_components=10, fea_name='lda'):
print('lda_{}_0to{}'.format(fea_name, n_components - 1))
clf = LinearDiscriminantAnalysis(n_components=n_components)
df_merge = pd.concat([df_train[categorical_feature], df_test[categorical_feature]])
clf.fit(df_merge[categorical_feature], y_categorized)
df_train_lda = pd.DataFrame(clf.transform(df_train[categorical_feature]))
df_test_lda = pd.DataFrame(clf.transform(df_test[categorical_feature]))
col_map = {i: 'lda_{}_{}'.format(fea_name, i) for i in range(n_components)}
df_train_lda.rename(columns=col_map, inplace=True)
df_test_lda.rename(columns=col_map, inplace=True)
for c in col_map:
if c in df_train.columns:
df_train.drop(c, axis=1, inplace=True)
if c in df_test.columns:
df_test.drop(c, axis=1, inplace=True)
df_train = pd.concat([df_train, df_train_lda], axis=1)
df_test = | pd.concat([df_test, df_test_lda], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
from nltk.tokenize import sent_tokenize
from itertools import groupby
import copy
import re
import sys
import textstat
# Method to create a matrix with contains only zeroes and a index starting by 0
def create_matrix_index_zeros(rows, columns):
arr = np.zeros((rows, columns))
for r in range(0, rows):
arr[r, 0] = r
return arr
# Method to get all authors with a given number of texts. Used in chapter 5.1 to get a corpus with 100 Texts for 25
# authors
def get_balanced_df_all_authors(par_df, par_num_text):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
for i in range(0, len(author_count)):
if author_count[i] >= par_num_text and not author_count.index[i] == "Gast-Rezensent":
author_list.append(author_count.index[i])
texts = [par_num_text for i in range(0, len(author_count))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Method to get a specific number of authors with a given number of texts. Used later on to get results for different
# combinations of authors and texts
def get_balanced_df_by_texts_authors(par_df, par_num_text, par_num_author):
author_count = par_df["author"].value_counts()
author_list = []
df_balanced_text = pd.DataFrame(columns=['label_encoded', 'author', 'genres', 'release_date', 'text'])
loop_count, loops = 0, par_num_author
while loop_count < loops:
if author_count[loop_count] >= par_num_text and not author_count.index[loop_count] == "Gast-Rezensent":
author_list.append(author_count.index[loop_count])
# Skip the Author "Gast-Rezensent" if its not the last round and increase the loops by 1
elif author_count.index[loop_count] == "Gast-Rezensent":
loops += 1
loop_count += 1
texts = [par_num_text for i in range(0, len(author_list))]
for index, row in par_df.iterrows():
if row['author'] in author_list:
if texts[author_list.index(row['author'])] != 0:
d = {'author': [row['author']], 'genres': [row['genres']],
'release_date': [row['release_date']], 'text': [row['text']]}
df_balanced_text = df_balanced_text.append(pd.DataFrame.from_dict(d), ignore_index=True)
texts[author_list.index(row['author'])] -= 1
if sum(texts) == 0:
break
# Label encoding and delete author column after
dic_author_mapping = author_encoding(df_balanced_text)
df_balanced_text['label_encoded'] = get_encoded_author_vector(df_balanced_text, dic_author_mapping)[:, 0]
df_balanced_text.drop("author", axis=1, inplace=True)
# Print author mapping in file
original_stdout = sys.stdout
with open('author_mapping.txt', 'w') as f:
sys.stdout = f
print(dic_author_mapping)
sys.stdout = original_stdout
for i in range(0, len(author_list)):
print(f"Autor {i+1}: {par_num_text - texts[i]} Texte")
return df_balanced_text
# Feature extraction of the feature described in chapter 5.6.1
def get_bow_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_bow = {}
d_bow_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
for word in tokens:
try:
d_bow["bow:"+word.lemma_.lower()] += 1
except KeyError:
d_bow["bow:"+word.lemma_.lower()] = 1
d_bow_list.append(copy.deepcopy(d_bow))
d_bow.clear()
return pd.DataFrame(d_bow_list)
# Feature extraction of the feature described in chapter 5.6.2
def get_word_n_grams(par_df, n):
nlp = spacy.load("de_core_news_sm")
d_word_ngram = {}
d_word_ngram_list = []
function_pos = ["ADP", "AUX", "CONJ", "CCONJ", "DET", "PART", "PRON", "SCONJ"]
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not
word.is_digit and word.lemma_ not in STOP_WORDS and word.pos_ not in function_pos]
tokens = [token.lemma_.lower() for token in tokens]
for w in range(0, len(tokens)):
if w + n <= len(tokens):
try:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] += 1
except KeyError:
d_word_ngram["w" + str(n) + "g" + ":" + '|'.join(tokens[w:w + n])] = 1
d_word_ngram_list.append(copy.deepcopy(d_word_ngram))
d_word_ngram.clear()
return pd.DataFrame(d_word_ngram_list)
# Feature extraction of the feature described in chapter 5.6.3
def get_word_count(par_df):
arr_wordcount = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
only_words = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space:
only_words.append(t)
arr_wordcount[index] = len(only_words)
only_words.clear()
return pd.DataFrame(data=arr_wordcount, columns=["word_count"])
# Feature extraction of the feature described in chapter 5.6.4 with some variations
# Count all word lengths individually
def get_word_length_matrix(par_df):
nlp = spacy.load("de_core_news_sm")
d_word_len = {}
d_word_len_list = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
tokens = [word for word in tokens if not word.is_punct and not word.is_space and not word.is_digit]
for word in tokens:
try:
d_word_len["w_len:"+str(len(word.text))] += 1
except KeyError:
d_word_len["w_len:"+str(len(word.text))] = 1
d_word_len_list.append(copy.deepcopy(d_word_len))
d_word_len.clear()
return pd.DataFrame(d_word_len_list)
# Count word lengths and set 2 intervals
def get_word_length_matrix_with_interval(par_df, border_1, border_2):
arr_wordcount_with_interval = np.zeros((len(par_df), border_1 + 2))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= border_1 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif border_1 < len(
word.text) <= border_2 and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -2] += 1
elif not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, border_1+1)]
word_length_labels.append(f"{border_1+1}-{border_2}")
word_length_labels.append(f">{border_2}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count word lengths and sum all above a defined margin
def get_word_length_matrix_with_margin(par_df, par_margin):
arr_wordcount_with_interval = np.zeros((len(par_df), par_margin + 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for word in tokens:
if len(word.text) <= par_margin and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, len(word.text) - 1] += 1
elif par_margin < len(word.text) and not word.is_punct and not word.is_space and not word.is_digit:
arr_wordcount_with_interval[index, -1] += 1
word_length_labels = [str(i) for i in range(1, par_margin+1)]
word_length_labels.append(f">{par_margin}")
return pd.DataFrame(data=arr_wordcount_with_interval, columns=word_length_labels)
# Count the average word length of the article
def get_average_word_length(par_df):
arr_avg_word_len_vector = np.zeros((len(par_df), 1))
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
symbol_sum = 0
words = 0
tokens = nlp(row['text'])
for word in tokens:
if not word.is_punct and not word.is_space and not word.is_digit:
symbol_sum += len(word.text)
words += 1
arr_avg_word_len_vector[index, 0] = symbol_sum / words
return pd.DataFrame(data=arr_avg_word_len_vector, columns=["avg_word_length"])
# Feature extraction of the feature described in chapter 5.6.5
def get_yules_k(par_df):
d = {}
nlp = spacy.load("de_core_news_sm")
arr_yulesk = np.zeros((len(par_df), 1))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
if not t.is_punct and not t.is_space and not t.is_digit:
w = t.lemma_.lower()
try:
d[w] += 1
except KeyError:
d[w] = 1
s1 = float(len(d))
s2 = sum([len(list(g)) * (freq ** 2) for freq, g in groupby(sorted(d.values()))])
try:
k = 10000 * (s2 - s1) / (s1 * s1)
arr_yulesk[index] = k
except ZeroDivisionError:
pass
d.clear()
return pd.DataFrame(data=arr_yulesk, columns=["yulesk"])
# Feature extraction of the feature described in chapter 5.6.6
# Get a vector of all special characters
def get_special_char_label_vector(par_df):
nlp = spacy.load("de_core_news_sm")
special_char_label_vector = []
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.is_punct and c.text not in special_char_label_vector:
special_char_label_vector.append(c.text)
return special_char_label_vector
# Get a matrix of all special character by a given vector of special chars
def get_special_char_matrix(par_df, par_special_char_label_vector):
nlp = spacy.load("de_core_news_sm")
arr_special_char = np.zeros((len(par_df), len(par_special_char_label_vector)))
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for t in tokens:
chars = ' '.join([c for c in t.text])
chars = nlp(chars)
for c in chars:
if c.text in par_special_char_label_vector:
arr_special_char[index, par_special_char_label_vector.index(c.text)] += 1
return arr_special_char
# Feature extraction of the feature described in chapter 5.6.7
# Get the char-affix-n-grams by a defined n
def get_char_affix_n_grams(par_df, n):
d_prefix_list, d_suffix_list, d_space_prefix_list, d_space_suffix_list = [], [], [], []
d_prefix, d_suffix, d_space_prefix, d_space_suffix = {}, {}, {}, {}
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Prefix
if len(tokens[w].text) >= n + 1:
try:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] += 1
except KeyError:
d_prefix["c" + str(n) + "_p: " + tokens[w].text.lower()[0:n]] = 1
# Suffix
if len(tokens[w].text) >= n + 1:
try:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] += 1
except KeyError:
d_suffix["c" + str(n) + "_s: " + tokens[w].text.lower()[-n:]] = 1
d_prefix_list.append(copy.deepcopy(d_prefix))
d_suffix_list.append(copy.deepcopy(d_suffix))
d_prefix.clear()
d_suffix.clear()
for i in range(0, len(row['text'])):
if row['text'][i] == " " and i + n <= len(row['text']) and i - n >= 0:
# Space-prefix
try:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] += 1
except KeyError:
d_space_prefix["c" + str(n) + "_sp: " + row['text'].lower()[i:n + i]] = 1
# Space-suffix
try:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] += 1
except KeyError:
d_space_suffix["c" + str(n) + "_ss: " + row['text'].lower()[i - n + 1:i + 1]] = 1
d_space_prefix_list.append(copy.deepcopy(d_space_prefix))
d_space_suffix_list.append(copy.deepcopy(d_space_suffix))
d_space_prefix.clear()
d_space_suffix.clear()
df_pre = pd.DataFrame(d_prefix_list)
df_su = pd.DataFrame(d_suffix_list)
df_s_pre = pd.DataFrame(d_space_prefix_list)
df_s_su = pd.DataFrame(d_space_suffix_list)
df_affix = pd.concat([df_pre, df_su, df_s_pre, df_s_su], axis=1)
return df_affix
# Get the char-word-n-grams by a defined n
def get_char_word_n_grams(par_df, n):
d_whole_word_list, d_mid_word_list, d_multi_word_list = [], [], []
d_whole_word, d_mid_word, d_multi_word = {}, {}, {}
match_list = []
nlp = spacy.load("de_core_news_sm")
for index, row in par_df.iterrows():
tokens = nlp(row['text'])
for w in range(0, len(tokens)):
# Whole-word
if len(tokens[w].text) == n:
try:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] += 1
except KeyError:
d_whole_word["c" + str(n) + "_ww: " + tokens[w].text.lower()] = 1
# Mid-word
if len(tokens[w].text) >= n + 2:
for i in range(1, len(tokens[w].text) - n):
try:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] += 1
except KeyError:
d_mid_word["c" + str(n) + "_miw: " + tokens[w].text.lower()[i:i + n]] = 1
d_whole_word_list.append(copy.deepcopy(d_whole_word))
d_mid_word_list.append(copy.deepcopy(d_mid_word))
d_whole_word.clear()
d_mid_word.clear()
# Multi-word
# ignore special character
trimmed_text = re.sub(r'[\s]+', ' ', re.sub(r'[^\w ]+', '', row['text']))
match_list.clear()
for i in range(1, n - 1):
regex = r"\w{" + str(i) + r"}\s\w{" + str(n - 1 - i) + r"}"
match_list += re.findall(regex, trimmed_text.lower())
for match in match_list:
try:
d_multi_word["c" + str(n) + "_mw: " + match] += 1
except KeyError:
d_multi_word["c" + str(n) + "_mw: " + match] = 1
d_multi_word_list.append(copy.deepcopy(d_multi_word))
d_multi_word.clear()
df_ww = pd.DataFrame(d_whole_word_list)
df_miw = pd.DataFrame(d_mid_word_list)
df_mw = pd.DataFrame(d_multi_word_list)
df_word = pd.concat([df_ww, df_miw, df_mw], axis=1)
return df_word
# Get the char-punct-n-grams by a defined n
def get_char_punct_n_grams(par_df, n):
d_beg_punct_list, d_mid_punct_list, d_end_punct_list = [], [], []
d_beg_punct, d_mid_punct, d_end_punct = {}, {}, {}
for index, row in par_df.iterrows():
for c in range(0, len(row['text'])):
if row['text'][c] in ["!", "„", "“", "(", ")", "?", "{", "}", "[", "]", "‚", "‘", "-", "_", ".", ",", ";",
"/", "\\", ":"]:
if c <= len(row['text']) - n + 1:
# beg-punct
try:
d_beg_punct["c" + str(n) + "_bp: " + row['text'].lower()[c:c + n]] += 1
except KeyError:
d_beg_punct["c" + str(n) + "_bp: " + row['text'].lower()[c:c + n]] = 1
if c >= n - 1:
# end-punct
try:
d_end_punct["c" + str(n) + "_ep: " + row['text'].lower()[c - n + 1:+1]] += 1
except KeyError:
d_end_punct["c" + str(n) + "_ep: " + row['text'].lower()[c - n + 1:c + 1]] = 1
# Mid-punct
# Run through all combinations of summands around the special char
for i in range(1, n - 1):
if len(row['text']) - i + 1 >= c >= i - 1:
try:
d_mid_punct["c" + str(n) + "_mp: " + row['text'].lower()[c - i:c + n - i]] += 1
except KeyError:
d_mid_punct["c" + str(n) + "_mp: " + row['text'].lower()[c - i:c + n - i]] = 1
d_beg_punct_list.append(copy.deepcopy(d_beg_punct))
d_end_punct_list.append(copy.deepcopy(d_end_punct))
d_mid_punct_list.append(copy.deepcopy(d_mid_punct))
d_beg_punct.clear()
d_end_punct.clear()
d_mid_punct.clear()
df_bp = pd.DataFrame(d_beg_punct_list)
df_mp = pd.DataFrame(d_mid_punct_list)
df_ep = | pd.DataFrame(d_end_punct_list) | pandas.DataFrame |
# Neural network for pop assignment
# Load packages
import tensorflow.keras as tf
from kerastuner.tuners import RandomSearch
from kerastuner import HyperModel
import numpy as np
import pandas as pd
import allel
import zarr
import h5py
from sklearn.model_selection import RepeatedStratifiedKFold, train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import log_loss
import itertools
import shutil
import sys
import os
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sn
class pop_find_class:
# instance attribute
def __init__(self, infile, sample_data, seed=None, train_prop=0.8, save_dir="out"):
self.infile = infile
self.sample_data = sample_data
self.seed=seed
self.train_prop=train_prop
self.save_dir=save_dir
if os.path.exists(self.infile) is False:
raise ValueError("infile does not exist")
if os.path.exists(self.sample_data) is False:
raise ValueError("sample_data does not exist")
self.samp_list, self.dc, self.uk_list, self.dc_uk, self.unknowns = read_data(
infile=self.infile,
sample_data=self.sample_data,
save_allele_counts=False,
)
# Create test set that will be used to assess model performance later
self.X_train_0, self.X_holdout, self.y_train_0, self.y_holdout = train_test_split(
self.dc, self.samp_list, stratify=self.samp_list["pops"], train_size=self.train_prop
)
# Create save_dir if doesn't already exist
print(f"Output will be saved to: {save_dir}")
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Save train and test set to save_dir
np.save(save_dir + "/X_train.npy", self.X_train_0)
self.y_train_0.to_csv(save_dir + "/y_train.csv", index=False)
np.save(save_dir + "/X_holdout.npy", self.X_holdout)
self.y_holdout.to_csv(save_dir + "/y_holdout.csv", index=False)
def hyper_tune(self, y_train_0=None, dc=None,max_trials=10,runs_per_trial=10,max_epochs=100,train_prop=0.8,seed=None,save_dir="out",mod_name="hyper_tune"):
y_train_0 = self.y_train_0
dc = self.X_train_0
seed=self.seed
if isinstance(max_trials, np.int) is False:
raise ValueError("max_trials should be integer")
if isinstance(runs_per_trial, np.int) is False:
raise ValueError("runs_per_trial should be integer")
if isinstance(max_epochs, np.int) is False:
raise ValueError("max_epochs should be integer")
if isinstance(train_prop, np.float) is False:
raise ValueError("train_prop should be float")
if isinstance(seed, np.int) is False and seed is not None:
raise ValueError("seed should be integer or None")
if isinstance(save_dir, str) is False:
raise ValueError("save_dir should be string")
if isinstance(mod_name, str) is False:
raise ValueError("mod_name should be string")
# Train prop can't be greater than num samples
if len(dc) * (1 - train_prop) < len(np.unique(y_train_0["pops"])):
raise ValueError("train_prop is too high; not enough samples for test")
# Split data into training test
X_train, X_val, y_train, y_val = train_test_split(
dc,
y_train_0,
stratify=y_train_0["pops"],
train_size=train_prop,
random_state=seed,
)
if len(np.unique(y_train["pops"])) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
hypermodel = classifierHyperModel(
input_shape=X_train.shape[1], num_classes=len(popnames)
)
tuner = RandomSearch(
hypermodel,
objective="val_loss",
seed=seed,
max_trials=max_trials,
executions_per_trial=runs_per_trial,
directory=save_dir,
project_name=mod_name,
)
tuner.search(
X_train - 1,
y_train_enc,
epochs=max_epochs,
validation_data=(X_val - 1, y_val_enc),
)
self.best_mod = tuner.get_best_models(num_models=1)[0]
tuner.get_best_models(num_models=1)[0].save(save_dir + "/best_mod")
def class_train(self,
ensemble=False,
plot_hist=True,
nbags=10,
save_weights=True,
patience=20,
batch_size=32,
max_epochs=100,
):
print(f"Output will be saved to: {self.save_dir}")
y_train = self.y_train_0
dc = self.X_train_0
train_prop = self.train_prop
if len(dc) * (1 - train_prop) < 1:
raise ValueError(
"train_prop is too high; not enough values for test")
seed=self.seed
save_dir = self.save_dir + "/training_output"
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
y_test_samples = self.y_holdout["samples"].to_numpy()
y_test_pops = self.y_holdout["pops"].to_numpy()
# One hot encode test values
enc = OneHotEncoder(handle_unknown="ignore")
y_test_enc = enc.fit_transform(
self.y_holdout["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
self.popnames=popnames
# results storage
TEST_LOSS = []
TEST_ACCURACY = []
TEST_95CI = []
yhats = []
ypreds = []
test_dict = {"count": [], "df": []}
if hasattr(self, 'best_mod'):
model = self.best_mod
else:
# Test if train_prop is too high
if len(dc) * (1 - train_prop) < 1:
raise ValueError(
"train_prop is too high; not enough values for test")
X_train, X_val, y_train, y_val = train_test_split(
dc,
y_train,
stratify=y_train["pops"],
train_size=train_prop,
random_state=seed,
)
# Make sure all classes represented in y_val
if len(np.unique(y_train["pops"]) ) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames1 = enc.categories_[0]
model = basic_model(X_train,popnames1)
self.best_mod = model
if ensemble:
X_train = self.X_train_0
y_train = self.y_train_0
n_prime = np.int(np.ceil(len(X_train) * 0.8))
self.ensembl_fl = []
if os.path.exists(save_dir + "/ensemble_weights"):
shutil.rmtree(save_dir + "/ensemble_weights")
os.makedirs(save_dir + "/ensemble_weights")
for i in range(nbags):
good_bag = False
while good_bag is False:
bag_X = np.zeros(shape=(n_prime, X_train.shape[1]))
bag_y = pd.DataFrame({"samples": [], "pops": [], "order": []})
for j in range(0, n_prime):
ind = np.random.choice(len(X_train))
bag_X[j] = X_train[ind]
bag_y = bag_y.append(y_train.iloc[ind])
dup_pops_df = bag_y.groupby(["pops"]).agg(["count"])
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and (dup_pops_df[("samples", "count")] > 1).all()
):
# Create validation set from training set
bag_X, X_val, bag_y, y_val = train_test_split(
bag_X, bag_y, stratify=bag_y["pops"],
train_size=train_prop
)
if (
pd.Series(popnames).isin(bag_y["pops"]).all()
and pd.Series(popnames).isin(y_val["pops"]).all()
):
good_bag = True
enc = OneHotEncoder(handle_unknown="ignore")
bag_y_enc = enc.fit_transform(
bag_y["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
# Create callbacks
temp_str = "/ensemble_weights/checkpoint_" + str(i)+ ".h5"
self.ensembl_fl.append(temp_str)
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + temp_str,
verbose=1,
save_best_only=True,
save_weights_only=True,
monitor="val_loss",
# monitor="loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
bag_X - 1,
bag_y_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + temp_str)
if plot_hist:
plot_history(history=history, i=i, save_dir= save_dir, ensemble=True)
test_loss, test_acc = model.evaluate(self.X_holdout - 1, y_test_enc)
test_df = pd.DataFrame(model.predict(self.X_holdout - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_dict["count"].append(1)
test_dict["df"].append(test_df)
test_df.to_csv(save_dir+"/test_results.csv")
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
yhats = np.array(yhats)
# Get ensemble accuracy
tot_bag_df = test_dict["df"][0].iloc[
:, 0:len(popnames)
].copy()
for i in range(0, len(test_dict["df"])):
tot_bag_df += test_dict["df"][i].iloc[:, 0:len(popnames)]
# Normalize values to be between 0 and 1
tot_bag_df = tot_bag_df / nbags
tot_bag_df["top_samp"] = tot_bag_df.idxmax(axis=1)
tot_bag_df["sampleID"] = test_dict["df"][0]["sampleID"]
tot_bag_df["true_pops"] = test_dict["df"][0]["true_pops"]
ENSEMBLE_TEST_ACCURACY = np.sum(
tot_bag_df["top_samp"] == tot_bag_df["true_pops"]
) / len(tot_bag_df)
tot_bag_df.to_csv(save_dir + "/ensemble_test_results.csv")
else:
# Split training data into training and validation
X_train = self.X_train_0
y_train = self.y_train_0
X_train, X_val, y_train, y_val = train_test_split(
dc, y_train, stratify=y_train["pops"],
random_state=seed)
# Make sure all classes represented in y_val
if len(
np.unique(y_train["pops"])
) != len(np.unique(y_val["pops"])):
raise ValueError(
"Not all pops represented in validation set \
choose smaller value for train_prop."
)
# One hot encoding
enc = OneHotEncoder(handle_unknown="ignore")
y_train_enc = enc.fit_transform(
y_train["pops"].values.reshape(-1, 1)).toarray()
y_val_enc = enc.fit_transform(
y_val["pops"].values.reshape(-1, 1)).toarray()
popnames = enc.categories_[0]
# Create callbacks
if os.path.exists(save_dir + "/default_mod_weights"):
shutil.rmtree(save_dir + "/default_mod_weights")
os.makedirs(save_dir + "/default_mod_weights")
checkpointer = tf.callbacks.ModelCheckpoint(
filepath=save_dir + "/default_mod_weights/checkpoint.h5",
verbose=1,
save_best_only=True,
save_weights_only=True,
monitor="val_loss",
save_freq="epoch",
)
earlystop = tf.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=patience
)
reducelr = tf.callbacks.ReduceLROnPlateau(
monitor="val_loss",
factor=0.2,
patience=int(patience / 3),
verbose=1,
mode="auto",
min_delta=0,
cooldown=0,
min_lr=0,
)
callback_list = [checkpointer, earlystop, reducelr]
# Train model
history = model.fit(
X_train - 1,
y_train_enc,
batch_size=int(batch_size),
epochs=int(max_epochs),
callbacks=callback_list,
validation_data=(X_val - 1, y_val_enc),
verbose=0,
)
# Load best model
model.load_weights(save_dir + "/default_mod_weights/checkpoint.h5")
if plot_hist:
plot_history(history=history, save_dir=save_dir, ensemble=False)
tf.backend.clear_session()
test_loss, test_acc = model.evaluate(self.X_holdout - 1, y_test_enc)
test_df = pd.DataFrame(model.predict(self.X_holdout - 1))
test_df.columns = popnames
test_df["sampleID"] = y_test_samples
test_df["true_pops"] = y_test_pops
test_dict["count"].append(1)
test_dict["df"].append(test_df)
test_df.to_csv(save_dir + "/test_results.csv")
# Find confidence interval of best model
test_err = 1 - test_acc
test_95CI = 1.96 * np.sqrt(
(test_err * (1 - test_err)) / len(y_test_enc))
# Fill test lists with information
TEST_LOSS.append(test_loss)
TEST_ACCURACY.append(test_acc)
TEST_95CI.append(test_95CI)
print(
f"Accuracy of model is {np.round(test_acc, 2)}\
+/- {np.round(test_95CI,2)}"
)
# Print metrics to csv
print("Creating outputs...")
metrics = pd.DataFrame(
{
"metric": [
"Test accuracy",
"Test 95% CI",
"Test loss",
],
"value": [
np.round(TEST_ACCURACY, 2),
np.round(TEST_95CI, 2),
np.round(TEST_LOSS, 2),
],
}
)
metrics.to_csv(save_dir + "/metrics.csv", index=False)
print("Process complete")
def predict(self, ensemble=False):
save_dir = self.save_dir + "/training_output"
uksamples = self.unknowns["sampleID"].to_numpy()
ukgen = self.dc_uk
popnames = self.popnames
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
ypreds = []
#if hasattr(self, ensembl_fl):
model = self.best_mod
if ensemble:
i=0
pred_dict = {"count": [], "df": []}
top_pops = {"df": [], "pops": []}
for checkpoint in self.ensembl_fl:
model.load_weights(save_dir + checkpoint)
tmp_df = pd.DataFrame(model.predict(ukgen))
tmp_df.columns = popnames
tmp_df["sampleID"] = uksamples
tmp_df["bag"] = i
pred_dict["count"].append(i)
pred_dict["df"].append(tmp_df)
# Find top populations for each sample
top_pops["df"].append(i)
top_pops["pops"].append(
pred_dict["df"][i].iloc[
:, 0:len(popnames)
].idxmax(axis=1)
)
i= i+1
ypreds = np.array(ypreds)
top_pops_df = pd.DataFrame(top_pops["pops"])
top_pops_df.columns = uksamples
top_freqs = {"sample": [], "freq": []}
for samp in uksamples:
top_freqs["sample"].append(samp)
top_freqs["freq"].append(
top_pops_df[samp].value_counts() / len(top_pops_df)
)
# Save frequencies to csv for plotting
top_freqs_df = | pd.DataFrame(top_freqs["freq"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 7 20:41:59 2020
@author: prasa
"""
import pandas as pd
import re
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
#import the file and here label and message is separeted with tab so i have given as \t
messages = | pd.read_csv('D:/Work space/smsspamcollection/SMSSpamCollection', sep='\t', names=["labels", "Text_Message"]) | pandas.read_csv |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.cm as cm
import pandas as pd
import copy
#Filters
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from scipy import signal
from scipy.fft import fft
from scipy.signal import savgol_filter, find_peaks
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from scipy.stats import norm, pearsonr
from sklearn import linear_model
def loadmat(filename):
'''
this function should be called instead of direct spio.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects
'''
def _check_keys(d):
'''
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
'''
for key in d:
if isinstance(d[key], sio.matlab.mio5_params.mat_struct):
d[key] = _todict(d[key])
return d
def _todict(matobj):
'''
A recursive function which constructs from matobjects nested dictionaries
'''
d = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, sio.matlab.mio5_params.mat_struct):
d[strg] = _todict(elem)
elif isinstance(elem, np.ndarray):
d[strg] = _tolist(elem)
else:
d[strg] = elem
return d
def _tolist(ndarray):
'''
A recursive function which constructs lists from cellarrays
(which are loaded as numpy ndarrays), recursing into the elements
if they contain matobjects.
'''
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, sio.matlab.mio5_params.mat_struct):
elem_list.append(_todict(sub_elem))
elif isinstance(sub_elem, np.ndarray):
elem_list.append(_tolist(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
data = sio.loadmat(filename, struct_as_record=False, squeeze_me=True)
return _check_keys(data)
def func_x_y_sets(df_o,df_file_name,q_files,len_signal,channels_selected,
fs,window_training,max_n_records,
q_signals_file,t_size,r_seed,
sbp_threshold,duration_threshold,skew_threshold,
post_beat_max,penalize_max_t,
lowcut,highcut,order,
window,ploy_grade,s_mode,dx_n,
mixed,
signals_path,all_plots=False):
# FROM VISUAL CONTROL WAS DETERMINED THAT THE MARKER TREND TO DELAY THE
# DICROTIC NOTCH POIT.
shift_dn = -2
df_f = df_o.copy()
df_f = df_f.reset_index(drop=True)
if q_signals_file > 1:
df_f = | pd.concat([df_f] * q_signals_file, ignore_index=False) | pandas.concat |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| :class:`pandas.Series` functions and operators implementations in SDC
| Also, it contains Numba internal operators which are required for Series type handling
"""
import numba
import numpy
import operator
import pandas
import math
import sys
from numba.errors import TypingError
from numba.extending import overload, overload_method, overload_attribute
from numba.typing import signature
from numba.extending import intrinsic
from numba import (types, numpy_support, cgutils)
from numba.typed import Dict
from numba import prange
import sdc
import sdc.datatypes.common_functions as common_functions
from sdc.datatypes.common_functions import (TypeChecker, check_index_is_numeric, find_common_dtype_from_numpy_dtypes,
sdc_join_series_indexes)
from sdc.datatypes.hpat_pandas_series_rolling_types import _hpat_pandas_series_rolling_init
from sdc.datatypes.hpat_pandas_stringmethods_types import StringMethodsType
from sdc.datatypes.hpat_pandas_getitem_types import SeriesGetitemAccessorType
from sdc.hiframes.pd_series_type import SeriesType
from sdc.str_arr_ext import (StringArrayType, string_array_type, str_arr_is_na, str_arr_set_na,
num_total_chars, pre_alloc_string_array, cp_str_list_to_array)
from sdc.utils import to_array, sdc_overload, sdc_overload_method, sdc_overload_attribute
from sdc.datatypes import hpat_pandas_series_autogenerated
@sdc_overload(operator.getitem)
def hpat_pandas_series_accessor_getitem(self, idx):
"""
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesGetitemAccessorType):
return None
accessor = self.accessor.literal_value
if accessor == 'iloc':
if isinstance(idx, (types.List, types.Array, types.SliceType)):
def hpat_pandas_series_iloc_list_slice_impl(self, idx):
result_data = self._series._data[idx]
result_index = self._series.index[idx]
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_iloc_list_slice_impl
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iloc_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iloc_impl
def hpat_pandas_series_iloc_callable_impl(self, idx):
index = numpy.asarray(list(map(idx, self._series._data)))
return pandas.Series(self._series._data[index], self._series.index[index], self._series._name)
return hpat_pandas_series_iloc_callable_impl
raise TypingError('{} The index must be an Integer, Slice or List of Integer or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'iat':
if isinstance(idx, (int, types.Integer)):
def hpat_pandas_series_iat_impl(self, idx):
return self._series._data[idx]
return hpat_pandas_series_iat_impl
raise TypingError('{} The index must be a Integer. Given: {}'.format(_func_name, idx))
if accessor == 'loc':
# Note: Loc return Series
# Note: Index 0 in slice not supported
# Note: Loc slice and callable with String not implement
index_is_none = (self.series.index is None or
isinstance(self.series.index, numba.types.misc.NoneType))
if isinstance(idx, types.SliceType) and index_is_none:
def hpat_pandas_series_loc_slice_noidx_impl(self, idx):
max_slice = sys.maxsize
start = idx.start
stop = idx.stop
if idx.stop == max_slice:
stop = max_slice - 1
result_data = self._series._data[start:stop+1]
result_index = numpy.arange(start, stop + 1)
return pandas.Series(result_data, result_index, self._series._name)
return hpat_pandas_series_loc_slice_noidx_impl
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_loc_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._series._data[mask], index[mask], self._series._name)
return hpat_pandas_series_loc_impl
raise TypingError('{} The index must be an Number, Slice, String, List, Array or a callable.\
Given: {}'.format(_func_name, idx))
if accessor == 'at':
if isinstance(idx, (int, types.Integer, types.UnicodeType, types.StringLiteral)):
def hpat_pandas_series_at_impl(self, idx):
index = self._series.index
mask = numpy.empty(len(self._series._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return self._series._data[mask]
return hpat_pandas_series_at_impl
raise TypingError('{} The index must be a Number or String. Given: {}'.format(_func_name, idx))
raise TypingError('{} Unknown accessor. Only "loc", "iloc", "at", "iat" are supported.\
Given: {}'.format(_func_name, accessor))
@sdc_overload(operator.getitem)
def hpat_pandas_series_getitem(self, idx):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.get
Limitations
-----------
Supported ``key`` can be one of the following:
- Integer scalar, e.g. :obj:`series[0]`
- A slice, e.g. :obj:`series[2:5]`
- Another series
Examples
--------
.. literalinclude:: ../../../examples/series_getitem.py
:language: python
:lines: 27-
:caption: Getting Pandas Series elements
:name: ex_series_getitem
.. command-output:: python ./series_getitem.py
:cwd: ../../../examples
.. todo:: Fix SDC behavior and add the expected output of the > python ./series_getitem.py to the docstring
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.get` implementation
**Algorithm**: result = series[idx]
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_static_getitem_series1
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
_func_name = 'Operator getitem().'
if not isinstance(self, SeriesType):
return None
# Note: Getitem return Series
index_is_none = isinstance(self.index, numba.types.misc.NoneType)
index_is_none_or_numeric = index_is_none or (self.index and isinstance(self.index.dtype, types.Number))
index_is_string = not index_is_none and isinstance(self.index.dtype, (types.UnicodeType, types.StringLiteral))
if (
isinstance(idx, types.Number) and index_is_none_or_numeric or
(isinstance(idx, (types.UnicodeType, types.StringLiteral)) and index_is_string)
):
def hpat_pandas_series_getitem_index_impl(self, idx):
index = self.index
mask = numpy.empty(len(self._data), numpy.bool_)
for i in numba.prange(len(index)):
mask[i] = index[i] == idx
return pandas.Series(self._data[mask], index[mask], self._name)
return hpat_pandas_series_getitem_index_impl
if (isinstance(idx, types.Integer) and index_is_string):
def hpat_pandas_series_idx_impl(self, idx):
return self._data[idx]
return hpat_pandas_series_idx_impl
if isinstance(idx, types.SliceType):
# Return slice for str values not implement
def hpat_pandas_series_getitem_idx_slice_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_slice_impl
if (
isinstance(idx, (types.List, types.Array)) and
isinstance(idx.dtype, (types.Boolean, bool))
):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
return pandas.Series(self._data[idx], self.index[idx], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (index_is_none and isinstance(idx, SeriesType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
index = numpy.arange(len(self._data))
if (index != idx.index).sum() == 0:
return pandas.Series(self._data[idx._data], index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
def hpat_pandas_series_getitem_idx_list_impl(self, idx):
res = numpy.copy(self._data[:len(idx._data)])
index = numpy.arange(len(self._data))
for i in numba.prange(len(res)):
for j in numba.prange(len(index)):
if j == idx._data[i]:
res[i] = self._data[j]
return pandas.Series(res, index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_list_impl
if (isinstance(idx, SeriesType) and not isinstance(self.index, types.NoneType)):
if isinstance(idx.data.dtype, (types.Boolean, bool)):
# Series with str index not implement
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
if (self._index != idx._index).sum() == 0:
return pandas.Series(self._data[idx._data], self._index[idx._data], self._name)
return hpat_pandas_series_getitem_idx_series_impl
def hpat_pandas_series_getitem_idx_series_impl(self, idx):
index = self.index
data = self._data
size = len(index)
data_res = []
index_res = []
for value in idx._data:
mask = numpy.zeros(shape=size, dtype=numpy.bool_)
for i in numba.prange(size):
mask[i] = index[i] == value
data_res.extend(data[mask])
index_res.extend(index[mask])
return pandas.Series(data=data_res, index=index_res, name=self._name)
return hpat_pandas_series_getitem_idx_series_impl
raise TypingError('{} The index must be an Number, Slice, String, Boolean Array or a Series.\
Given: {}'.format(_func_name, idx))
@sdc_overload(operator.setitem)
def hpat_pandas_series_setitem(self, idx, value):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.__setitem__
Examples
--------
.. literalinclude:: ../../../examples/series_setitem_int.py
:language: python
:lines: 27-
:caption: Setting Pandas Series elements
:name: ex_series_setitem
.. code-block:: console
> python ./series_setitem_int.py
0 0
1 4
2 3
3 2
4 1
dtype: int64
> python ./series_setitem_slice.py
0 5
1 4
2 0
3 0
4 0
dtype: int64
> python ./series_setitem_series.py
0 5
1 0
2 3
3 0
4 1
dtype: int64
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series operator :attr:`pandas.Series.set` implementation
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_setitem*
Parameters
----------
series: :obj:`pandas.Series`
input series
idx: :obj:`int`, :obj:`slice` or :obj:`pandas.Series`
input index
value: :object
input value
Returns
-------
:class:`pandas.Series` or an element of the underneath type
object of :class:`pandas.Series`
"""
ty_checker = TypeChecker('Operator setitem.')
ty_checker.check(self, SeriesType)
if not (isinstance(idx, (types.Integer, types.SliceType, SeriesType))):
ty_checker.raise_exc(idx, 'int, Slice, Series', 'idx')
if not((isinstance(value, SeriesType) and isinstance(value.dtype, self.dtype)) or \
isinstance(value, type(self.dtype))):
ty_checker.raise_exc(value, self.dtype, 'value')
if isinstance(idx, types.Integer) or isinstance(idx, types.SliceType):
def hpat_pandas_series_setitem_idx_integer_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_value
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_slice
"""
self._data[idx] = value
return self
return hpat_pandas_series_setitem_idx_integer_impl
if isinstance(idx, SeriesType):
def hpat_pandas_series_setitem_idx_series_impl(self, idx, value):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_setitem_for_series
"""
super_index = idx._data
self._data[super_index] = value
return self
return hpat_pandas_series_setitem_idx_series_impl
@sdc_overload_attribute(SeriesType, 'iloc')
def hpat_pandas_series_iloc(self):
"""
Pandas Series method :meth:`pandas.Series.iloc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iloc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iloc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iloc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iloc')
return hpat_pandas_series_iloc_impl
@sdc_overload_attribute(SeriesType, 'loc')
def hpat_pandas_series_loc(self):
"""
Pandas Series method :meth:`pandas.Series.loc` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_loc*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute loc().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_loc_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'loc')
return hpat_pandas_series_loc_impl
@sdc_overload_attribute(SeriesType, 'iat')
def hpat_pandas_series_iat(self):
"""
Pandas Series method :meth:`pandas.Series.iat` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_iat*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute iat().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_iat_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'iat')
return hpat_pandas_series_iat_impl
@sdc_overload_attribute(SeriesType, 'at')
def hpat_pandas_series_at(self):
"""
Pandas Series method :meth:`pandas.Series.at` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_at*
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`series`
returns an object of :obj:`series`
"""
_func_name = 'Attribute at().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_at_impl(self):
return sdc.datatypes.hpat_pandas_getitem_types.series_getitem_accessor_init(self, 'at')
return hpat_pandas_series_at_impl
@sdc_overload_method(SeriesType, 'nsmallest')
def hpat_pandas_series_nsmallest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nsmallest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nsmallest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nsmallest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nsmallest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nsmallest(). Unsupported parameter. Given 'keep' != 'first'")
# mergesort is used for stable sorting of repeated values
indices = self._data.argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nsmallest_impl
@sdc_overload_method(SeriesType, 'nlargest')
def hpat_pandas_series_nlargest(self, n=5, keep='first'):
"""
Pandas Series method :meth:`pandas.Series.nlargest` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_nlargest*
Parameters
----------
self: :obj:`pandas.Series`
input series
n: :obj:`int`, default 5
Return this many ascending sorted values.
keep: :obj:`str`, default 'first'
When there are duplicate values that cannot all fit in a Series of n elements:
first : return the first n occurrences in order of appearance.
last : return the last n occurrences in reverse order of appearance.
all : keep all occurrences. This can result in a Series of size larger than n.
*unsupported*
Returns
-------
:obj:`series`
returns :obj:`series`
"""
_func_name = 'Method nlargest().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object\n given: {}\n expected: {}'.format(_func_name, self, 'series'))
if not isinstance(n, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object n\n given: {}\n expected: {}'.format(_func_name, n, 'int'))
if not isinstance(keep, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} The object keep\n given: {}\n expected: {}'.format(_func_name, keep, 'str'))
def hpat_pandas_series_nlargest_impl(self, n=5, keep='first'):
if keep != 'first':
raise ValueError("Method nlargest(). Unsupported parameter. Given 'keep' != 'first'")
# data: [0, 1, -1, 1, 0] -> [1, 1, 0, 0, -1]
# index: [0, 1, 2, 3, 4] -> [1, 3, 0, 4, 2] (not [3, 1, 4, 0, 2])
# subtract 1 to ensure reverse ordering at boundaries
indices = (-self._data - 1).argsort(kind='mergesort')[:max(n, 0)]
return self.take(indices)
return hpat_pandas_series_nlargest_impl
@sdc_overload_attribute(SeriesType, 'shape')
def hpat_pandas_series_shape(self):
"""
Pandas Series attribute :attr:`pandas.Series.shape` implementation
**Algorithm**: result = series.shape
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shape1
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:obj:`tuple`
a tuple of the shape of the underlying data
"""
_func_name = 'Attribute shape.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_shape_impl(self):
return self._data.shape
return hpat_pandas_series_shape_impl
@sdc_overload_method(SeriesType, 'std')
def hpat_pandas_series_std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.std` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_std_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method std().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_std_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
var = self.var(axis=axis, skipna=skipna, level=level, ddof=ddof, numeric_only=numeric_only)
return var ** 0.5
return hpat_pandas_series_std_impl
@sdc_overload_attribute(SeriesType, 'values')
def hpat_pandas_series_values(self):
"""
Pandas Series attribute 'values' implementation.
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.values.html#pandas.Series.values
Algorithm: result = series.values
Where:
series: pandas.series
result: pandas.series as ndarray
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_values
"""
_func_name = 'Attribute values.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_values_impl(self):
return self._data
return hpat_pandas_series_values_impl
@sdc_overload_method(SeriesType, 'value_counts')
def hpat_pandas_series_value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.value_counts
Examples
--------
.. literalinclude:: ../../../examples/series/series_value_counts.py
:language: python
:lines: 27-
:caption: Getting the number of values excluding NaNs
:name: ex_series_value_counts
.. command-output:: python ./series/series_value_counts.py
:cwd: ../../../examples
.. note::
Parameter bins and dropna for Strings are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.count <pandas.Series.count>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.value_counts` implementation.
Note: Elements with the same count might appear in result in a different order than in Pandas
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_value_counts*
Parameters
-----------
self: :obj:`pandas.Series`
input series
normalize: :obj:`boolean`, default False
If True then the object returned will contain the relative frequencies of the unique values
sort: :obj: `boolean`, default True
Sort by frequencies
ascending: :obj:`boolean`, default False
Sort in ascending order
bins: :obj:`integer`, default None
*unsupported*
dropna: :obj:`boolean`, default True
Skip counts of NaN
Returns
-------
:returns :obj:`pandas.Series`
"""
_func_name = 'Method value_counts().'
ty_checker = TypeChecker('Method value_counts().')
ty_checker.check(self, SeriesType)
if not isinstance(normalize, (types.Omitted, types.Boolean, bool)) and normalize is True:
ty_checker.raise_exc(normalize, 'boolean', 'normalize')
if not isinstance(sort, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(sort, 'boolean', 'sort')
if not isinstance(ascending, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(ascending, 'boolean', 'ascending')
if not isinstance(bins, (types.Omitted, types.NoneType)) and bins is not None:
ty_checker.raise_exc(bins, 'boolean', 'bins')
if not isinstance(dropna, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(dropna, 'boolean', 'dropna')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_value_counts_str_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=types.unicode_type,
value_type=types.intp
)
nan_counts = 0
for i, value in enumerate(self._data):
if str_arr_is_na(self._data, i):
if not dropna:
nan_counts += 1
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
need_add_nan_count = not dropna and nan_counts
values = [key for key in value_counts_dict]
counts_as_list = [value_counts_dict[key] for key in value_counts_dict.keys()]
values_len = len(values)
if need_add_nan_count:
# append a separate empty string for NaN elements
values_len += 1
values.append('')
counts_as_list.append(nan_counts)
counts = numpy.asarray(counts_as_list, dtype=numpy.intp)
indexes_order = numpy.arange(values_len)
if sort:
indexes_order = counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
counts_sorted = numpy.take(counts, indexes_order)
values_sorted_by_count = [values[i] for i in indexes_order]
# allocate the result index as a StringArray and copy values to it
index_string_lengths = numpy.asarray([len(s) for s in values_sorted_by_count])
index_total_chars = numpy.sum(index_string_lengths)
result_index = pre_alloc_string_array(len(values_sorted_by_count), index_total_chars)
cp_str_list_to_array(result_index, values_sorted_by_count)
if need_add_nan_count:
# set null bit for StringArray element corresponding to NaN element (was added as last in values)
index_previous_nan_pos = values_len - 1
for i in numpy.arange(values_len):
if indexes_order[i] == index_previous_nan_pos:
str_arr_set_na(result_index, i)
break
return pandas.Series(counts_sorted, index=result_index, name=self._name)
return hpat_pandas_series_value_counts_str_impl
elif isinstance(self.dtype, types.Number):
series_dtype = self.dtype
def hpat_pandas_series_value_counts_number_impl(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True):
value_counts_dict = Dict.empty(
key_type=series_dtype,
value_type=types.intp
)
zero_counts = 0
is_zero_found = False
for value in self._data:
if (dropna and numpy.isnan(value)):
continue
# Pandas hash-based value_count_float64 function doesn't distinguish between
# positive and negative zeros, hence we count zero values separately and store
# as a key the first zero value found in the Series
if not value:
zero_counts += 1
if not is_zero_found:
zero_value = value
is_zero_found = True
continue
value_counts_dict[value] = value_counts_dict.get(value, 0) + 1
if zero_counts:
value_counts_dict[zero_value] = zero_counts
unique_values = numpy.asarray(
list(value_counts_dict),
dtype=self._data.dtype
)
value_counts = numpy.asarray(
[value_counts_dict[key] for key in value_counts_dict],
dtype=numpy.intp
)
indexes_order = numpy.arange(len(value_counts))
if sort:
indexes_order = value_counts.argsort()
if not ascending:
indexes_order = indexes_order[::-1]
sorted_unique_values = numpy.take(unique_values, indexes_order)
sorted_value_counts = numpy.take(value_counts, indexes_order)
return pandas.Series(sorted_value_counts, index=sorted_unique_values, name=self._name)
return hpat_pandas_series_value_counts_number_impl
return None
@sdc_overload_method(SeriesType, 'var')
def hpat_pandas_series_var(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.var` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_var_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
level: :obj:`int`, :obj:`str`
If the axis is a MultiIndex (hierarchical),
count along a particular level, collapsing into a scalar
*unsupported*
ddof: :obj:`int`
Delta Degrees of Freedom.
The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only: :obj:`bool`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
Returns
-------
:obj:`scalar`
returns :obj:`scalar`
"""
_func_name = 'Method var().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError('{} The object must be a boolean. Given skipna: {}'.format(_func_name, skipna))
if not isinstance(ddof, (types.Omitted, int, types.Integer)):
raise TypingError('{} The object must be an integer. Given ddof: {}'.format(_func_name, ddof))
for name, arg in [('axis', axis), ('level', level), ('numeric_only', numeric_only)]:
if not isinstance(arg, (types.Omitted, types.NoneType)) and arg is not None:
raise TypingError('{} Unsupported parameters. Given {}: {}'.format(_func_name, name, arg))
def hpat_pandas_series_var_impl(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None):
if skipna is None:
skipna = True
if skipna:
valuable_length = len(self._data) - numpy.sum(numpy.isnan(self._data))
if valuable_length <= ddof:
return numpy.nan
return numpy.nanvar(self._data) * valuable_length / (valuable_length - ddof)
if len(self._data) <= ddof:
return numpy.nan
return self._data.var() * len(self._data) / (len(self._data) - ddof)
return hpat_pandas_series_var_impl
@sdc_overload_attribute(SeriesType, 'index')
def hpat_pandas_series_index(self):
"""
Pandas Series attribute :attr:`pandas.Series.index` implementation
**Algorithm**: result = series.index
**Test**: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index1
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_index2
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
the index of the Series
"""
_func_name = 'Attribute index.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_index_none_impl(self):
return numpy.arange(len(self._data))
return hpat_pandas_series_index_none_impl
else:
def hpat_pandas_series_index_impl(self):
return self._index
return hpat_pandas_series_index_impl
@sdc_overload_method(SeriesType, 'rolling')
def hpat_pandas_series_rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.rolling
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. todo:: Add support of parameters ``center``, ``win_type``, ``on``, ``axis`` and ``closed``
.. seealso::
:ref:`expanding <pandas.Series.expanding>`
Provides expanding transformations.
:ref:`ewm <pandas.Series.ewm>`
Provides exponential weighted functions.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series attribute :attr:`pandas.Series.rolling` implementation
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling
Parameters
----------
series: :obj:`pandas.Series`
Input Series.
window: :obj:`int` or :obj:`offset`
Size of the moving window.
min_periods: :obj:`int`
Minimum number of observations in window required to have a value.
center: :obj:`bool`
Set the labels at the center of the window.
*unsupported*
win_type: :obj:`str`
Provide a window type.
*unsupported*
on: :obj:`str`
Column on which to calculate the rolling window.
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
closed: :obj:`str`
Make the interval closed on the ‘right’, ‘left’, ‘both’ or ‘neither’ endpoints.
*unsupported*
Returns
-------
:class:`pandas.Series.rolling`
Output class to manipulate with input data.
"""
ty_checker = TypeChecker('Method rolling().')
ty_checker.check(self, SeriesType)
if not isinstance(window, types.Integer):
ty_checker.raise_exc(window, 'int', 'window')
minp_accepted = (types.Omitted, types.NoneType, types.Integer)
if not isinstance(min_periods, minp_accepted) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'None, int', 'min_periods')
center_accepted = (types.Omitted, types.Boolean)
if not isinstance(center, center_accepted) and center is not False:
ty_checker.raise_exc(center, 'bool', 'center')
str_types = (types.Omitted, types.NoneType, types.StringLiteral, types.UnicodeType)
if not isinstance(win_type, str_types) and win_type is not None:
ty_checker.raise_exc(win_type, 'str', 'win_type')
if not isinstance(on, str_types) and on is not None:
ty_checker.raise_exc(on, 'str', 'on')
axis_accepted = (types.Omitted, types.Integer, types.StringLiteral, types.UnicodeType)
if not isinstance(axis, axis_accepted) and axis != 0:
ty_checker.raise_exc(axis, 'int, str', 'axis')
if not isinstance(closed, str_types) and closed is not None:
ty_checker.raise_exc(closed, 'str', 'closed')
nan_minp = isinstance(min_periods, (types.Omitted, types.NoneType)) or min_periods is None
def hpat_pandas_series_rolling_impl(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
if window < 0:
raise ValueError('window must be non-negative')
if nan_minp == True: # noqa
minp = window
else:
minp = min_periods
if minp < 0:
raise ValueError('min_periods must be >= 0')
if minp > window:
raise ValueError('min_periods must be <= window')
if center != False: # noqa
raise ValueError('Method rolling(). The object center\n expected: False')
if win_type is not None:
raise ValueError('Method rolling(). The object win_type\n expected: None')
if on is not None:
raise ValueError('Method rolling(). The object on\n expected: None')
if axis != 0:
raise ValueError('Method rolling(). The object axis\n expected: 0')
if closed is not None:
raise ValueError('Method rolling(). The object closed\n expected: None')
return _hpat_pandas_series_rolling_init(self, window, minp, center,
win_type, on, axis, closed)
return hpat_pandas_series_rolling_impl
@sdc_overload_attribute(SeriesType, 'size')
def hpat_pandas_series_size(self):
"""
Pandas Series attribute :attr:`pandas.Series.size` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_size
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.Series`
Return the number of elements in the underlying data.
"""
_func_name = 'Attribute size.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_size_impl(self):
return len(self._data)
return hpat_pandas_series_size_impl
@sdc_overload_attribute(SeriesType, 'str')
def hpat_pandas_series_str(self):
"""
Pandas Series attribute :attr:`pandas.Series.str` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_hiframes.TestHiFrames.test_str_get
Parameters
----------
series: :obj:`pandas.Series`
input series
Returns
-------
:class:`pandas.core.strings.StringMethods`
Output class to manipulate with input data.
"""
_func_name = 'Attribute str.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.List, types.UnicodeType)):
msg = '{} Can only use .str accessor with string values. Given: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
def hpat_pandas_series_str_impl(self):
return pandas.core.strings.StringMethods(self)
return hpat_pandas_series_str_impl
@sdc_overload_attribute(SeriesType, 'ndim')
def hpat_pandas_series_ndim(self):
"""
Pandas Series attribute :attr:`pandas.Series.ndim` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_ndim
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`int`
Number of dimensions of the underlying data, by definition 1
"""
_func_name = 'Attribute ndim.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_ndim_impl(self):
return 1
return hpat_pandas_series_ndim_impl
@sdc_overload_attribute(SeriesType, 'T')
def hpat_pandas_series_T(self):
"""
Pandas Series attribute :attr:`pandas.Series.T` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_getattr_T
Parameters
----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`numpy.ndarray`
An array representing the underlying data
"""
_func_name = 'Attribute T.'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_T_impl(self):
return self._data
return hpat_pandas_series_T_impl
@sdc_overload(len)
def hpat_pandas_series_len(self):
"""
Pandas Series operator :func:`len` implementation
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_len
Parameters
----------
series: :class:`pandas.Series`
Returns
-------
:obj:`int`
number of items in the object
"""
_func_name = 'Operator len().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
def hpat_pandas_series_len_impl(self):
return len(self._data)
return hpat_pandas_series_len_impl
@sdc_overload_method(SeriesType, 'astype')
def hpat_pandas_series_astype(self, dtype, copy=True, errors='raise'):
"""
Pandas Series method :meth:`pandas.Series.astype` implementation.
Cast a pandas object to a specified dtype dtype
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_astype*
Parameters
-----------
dtype : :obj:`numpy.dtype` or :obj:`dict`
Use a numpy.dtype or Python type to cast entire pandas object to the same type.
Alternatively, use {col: dtype, …}, where col is a column label and dtype is a numpy.dtype
or Python type to cast one or more of the DataFrame’s columns to column-specific types.
copy : :obj:`bool`, default :obj:`True`
Return a copy when True
Currently copy=False is not supported
errors : :obj:`str`, default :obj:`'raise'`
Control raising of exceptions on invalid data for provided dtype.
* raise : allow exceptions to be raised
* ignore : suppress exceptions. On error return original object
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` Cast a :obj:`pandas.Series` to a specified dtype dtype
"""
_func_name = 'Method astype().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(copy, (types.Omitted, bool, types.Boolean)):
raise TypingError('{} The object must be a boolean. Given copy: {}'.format(_func_name, copy))
if (not isinstance(errors, (types.Omitted, str, types.UnicodeType, types.StringLiteral)) and
errors in ('raise', 'ignore')):
raise TypingError('{} The object must be a string literal. Given errors: {}'.format(_func_name, errors))
# Return StringArray for astype(str) or astype('str')
def hpat_pandas_series_astype_to_str_impl(self, dtype, copy=True, errors='raise'):
num_chars = 0
arr_len = len(self._data)
# Get total chars for new array
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
num_chars += len(str(item)) # TODO: check NA
data = sdc.str_arr_ext.pre_alloc_string_array(arr_len, num_chars)
for i in numba.parfor.internal_prange(arr_len):
item = self._data[i]
data[i] = str(item) # TODO: check NA
return pandas.Series(data, self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.functions.NumberClass), example - astype(np.int64)
def hpat_pandas_series_astype_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(dtype), self._index, self._name)
# Return npytypes.Array from npytypes.Array for astype(types.StringLiteral), example - astype('int64')
def hpat_pandas_series_astype_literal_type_numba_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data.astype(numpy.dtype(dtype)), self._index, self._name)
# Return self
def hpat_pandas_series_astype_no_modify_impl(self, dtype, copy=True, errors='raise'):
return pandas.Series(self._data, self._index, self._name)
if ((isinstance(dtype, types.Function) and dtype.typing_key == str)
or (isinstance(dtype, types.StringLiteral) and dtype.literal_value == 'str')):
return hpat_pandas_series_astype_to_str_impl
# Needs Numba astype impl support converting unicode_type to NumberClass and other types
if isinstance(self.data, StringArrayType):
if isinstance(dtype, types.functions.NumberClass) and errors == 'raise':
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype}')
if isinstance(dtype, types.StringLiteral) and errors == 'raise':
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
raise TypingError(f'Needs Numba astype impl support converting unicode_type to {dtype.literal_value}')
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.functions.NumberClass):
return hpat_pandas_series_astype_numba_impl
if isinstance(self.data, types.npytypes.Array) and isinstance(dtype, types.StringLiteral):
try:
literal_value = numpy.dtype(dtype.literal_value)
except:
pass # Will raise the exception later
else:
return hpat_pandas_series_astype_literal_type_numba_impl
# Raise error if dtype is not supported
if errors == 'raise':
raise TypingError(f'{_func_name} The object must be a supported type. Given dtype: {dtype}')
else:
return hpat_pandas_series_astype_no_modify_impl
@sdc_overload_method(SeriesType, 'shift')
def hpat_pandas_series_shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Pandas Series method :meth:`pandas.Series.shift` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_fill_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_shift_unsupported_params
Parameters
----------
self: :obj:`pandas.Series`
input series
periods: :obj:`int`
Number of periods to shift. Can be positive or negative.
freq: :obj:`DateOffset`, :obj:`tseries.offsets`, :obj:`timedelta`, :obj:`str`
Offset to use from the tseries module or time rule (e.g. ‘EOM’).
*unsupported*
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
fill_value : :obj:`int`, :obj:`float`
The scalar value to use for newly introduced missing values.
Returns
-------
:obj:`scalar`
returns :obj:`series` object
"""
_func_name = 'Method shift().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(fill_value, (types.Omitted, types.Number, types.NoneType)) and fill_value is not None:
raise TypingError('{} The object must be a number. Given fill_value: {}'.format(_func_name, fill_value))
if not isinstance(freq, (types.Omitted, types.NoneType)) and freq is not None:
raise TypingError('{} Unsupported parameters. Given freq: {}'.format(_func_name, freq))
if not isinstance(axis, (types.Omitted, int, types.Integer)) and not axis:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
fill_is_default = isinstance(fill_value, (types.Omitted, types.NoneType)) or fill_value is None
series_np_dtype = [numpy_support.as_dtype(self.data.dtype)]
fill_np_dtype = [numpy.float64 if fill_is_default else numpy_support.as_dtype(fill_value)]
fill_dtype = types.float64 if fill_is_default else fill_value
common_dtype = find_common_dtype_from_numpy_dtypes([], [self.data.dtype, fill_dtype])
if fill_is_default:
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(shape=len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = numpy.nan
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = numpy.nan
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
def hpat_pandas_series_shift_impl(self, periods=1, freq=None, axis=0, fill_value=None):
if axis != 0:
raise TypingError('Method shift(). Unsupported parameters. Given axis != 0')
arr = numpy.empty(len(self._data), dtype=common_dtype)
if periods > 0:
arr[:periods] = fill_value
arr[periods:] = self._data[:-periods]
elif periods < 0:
arr[periods:] = fill_value
arr[:periods] = self._data[-periods:]
else:
arr[:] = self._data
return pandas.Series(data=arr, index=self._index, name=self._name)
return hpat_pandas_series_shift_impl
@sdc_overload_method(SeriesType, 'isin')
def hpat_pandas_series_isin(self, values):
"""
Pandas Series method :meth:`pandas.Series.isin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isin_list1
Parameters
-----------
values : :obj:`list` or :obj:`set` object
specifies values to look for in the series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object indicating if each element of self is in values
"""
_func_name = 'Method isin().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(values, (types.Set, types.List)):
raise TypingError(
'{} The argument must be set or list-like object. Given values: {}'.format(_func_name, values))
def hpat_pandas_series_isin_impl(self, values):
# TODO: replace with below line when Numba supports np.isin in nopython mode
# return pandas.Series(np.isin(self._data, values))
return pandas.Series(data=[(x in values) for x in self._data], index=self._index, name=self._name)
return hpat_pandas_series_isin_impl
@sdc_overload_method(SeriesType, 'append')
def hpat_pandas_series_append(self, to_append, ignore_index=False, verify_integrity=False):
"""
Pandas Series method :meth:`pandas.Series.append` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_append*
Parameters
-----------
self: :obj:`pandas.Series`
input series
to_append : :obj:`pandas.Series` object or :obj:`list` or :obj:`set`
Series (or list or tuple of Series) to append with self
ignore_index: :obj:`bool`, default False
If True, do not use the index labels.
Supported as literal value only
verify_integrity: :obj:`bool`, default False
If True, raise Exception on creating index with duplicates.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
Concatenated Series
"""
_func_name = 'Method append().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not (isinstance(to_append, SeriesType)
or (isinstance(to_append, (types.UniTuple, types.List)) and isinstance(to_append.dtype, SeriesType))):
raise TypingError(
'{} The argument must be a pandas.series or list/tuple of pandas.series. \
Given to_append: {}'.format(_func_name, to_append))
# currently we will always raise this in the end, i.e. if no impl was found
# TODO: find a way to stop compilation early and not proceed with unliteral step
if not (isinstance(ignore_index, types.Literal) and isinstance(ignore_index, types.Boolean)
or isinstance(ignore_index, types.Omitted)
or ignore_index is False):
raise TypingError(
'{} The ignore_index must be a literal Boolean constant. Given: {}'.format(_func_name, ignore_index))
if not (verify_integrity is False or isinstance(verify_integrity, types.Omitted)):
raise TypingError(
'{} Unsupported parameters. Given verify_integrity: {}'.format(_func_name, verify_integrity))
# ignore_index value has to be known at compile time to select between implementations with different signatures
ignore_index_is_false = (common_functions.has_literal_value(ignore_index, False)
or common_functions.has_python_value(ignore_index, False)
or isinstance(ignore_index, types.Omitted))
to_append_is_series = isinstance(to_append, SeriesType)
if ignore_index_is_false:
def hpat_pandas_series_append_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
new_index = common_functions.hpat_arrays_append(self.index, to_append.index)
else:
data_arrays_to_append = [series._data for series in to_append]
index_arrays_to_append = [series.index for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, data_arrays_to_append)
new_index = common_functions.hpat_arrays_append(self.index, index_arrays_to_append)
return pandas.Series(new_data, new_index)
return hpat_pandas_series_append_impl
else:
def hpat_pandas_series_append_ignore_index_impl(self, to_append, ignore_index=False, verify_integrity=False):
if to_append_is_series == True: # noqa
new_data = common_functions.hpat_arrays_append(self._data, to_append._data)
else:
arrays_to_append = [series._data for series in to_append]
new_data = common_functions.hpat_arrays_append(self._data, arrays_to_append)
return pandas.Series(new_data, None)
return hpat_pandas_series_append_ignore_index_impl
@sdc_overload_method(SeriesType, 'copy')
def hpat_pandas_series_copy(self, deep=True):
"""
Pandas Series method :meth:`pandas.Series.copy` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_str1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_int1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_copy_deep
Parameters
-----------
self: :class:`pandas.Series`
input arg
deep: :obj:`bool`, default :obj:`True`
Make a deep copy, including a copy of the data and the indices.
With deep=False neither the indices nor the data are copied.
[SDC limitations]:
- deep=False: shallow copy of index is not supported
Returns
-------
:obj:`pandas.Series` or :obj:`pandas.DataFrame`
Object type matches caller.
"""
ty_checker = TypeChecker('Method Series.copy().')
ty_checker.check(self, SeriesType)
if not isinstance(deep, (types.Omitted, types.Boolean)) and not deep:
ty_checker.raise_exc(self.data, 'boolean', 'deep')
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), name=self._name)
else:
return pandas.Series(data=self._data, name=self._name)
return hpat_pandas_series_copy_impl
else:
def hpat_pandas_series_copy_impl(self, deep=True):
if deep:
return pandas.Series(data=self._data.copy(), index=self._index.copy(), name=self._name)
else:
# Shallow copy of index is not supported yet
return pandas.Series(data=self._data, index=self._index.copy(), name=self._name)
return hpat_pandas_series_copy_impl
@sdc_overload_method(SeriesType, 'corr')
def hpat_pandas_series_corr(self, other, method='pearson', min_periods=None):
"""
Pandas Series method :meth:`pandas.Series.corr` implementation.
Note: Unsupported mixed numeric and string data
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_corr_unsupported_period
Parameters
----------
self: :obj:`pandas.Series`
input series
other: :obj:`pandas.Series`
input series
method:
*unsupported
min_periods: :obj:`int`, default None
Returns
-------
:obj:`float`
returns :obj:`float` object
"""
ty_checker = TypeChecker('Method corr().')
ty_checker.check(self, SeriesType)
ty_checker.check(other, SeriesType)
if not isinstance(self.data.dtype, types.Number):
ty_checker.raise_exc(self.data, 'number', 'self.data')
if not isinstance(other.data.dtype, types.Number):
ty_checker.raise_exc(other.data, 'number', 'other.data')
if not isinstance(min_periods, (int, types.Integer, types.Omitted, types.NoneType)) and min_periods is not None:
ty_checker.raise_exc(min_periods, 'int64', 'min_periods')
def hpat_pandas_series_corr_impl(self, other, method='pearson', min_periods=None):
if min_periods is None:
min_periods = 1
if len(self._data) == 0 or len(other._data) == 0:
return numpy.nan
self_arr = self._data[:min(len(self._data), len(other._data))]
other_arr = other._data[:min(len(self._data), len(other._data))]
invalid = numpy.isnan(self_arr) | numpy.isnan(other_arr)
if invalid.any():
self_arr = self_arr[~invalid]
other_arr = other_arr[~invalid]
if len(self_arr) < min_periods:
return numpy.nan
new_self = pandas.Series(self_arr)
new_other = pandas.Series(other_arr)
n = new_self.count()
ma = new_self.sum()
mb = new_other.sum()
a = n * (self_arr * other_arr).sum() - ma * mb
b1 = n * (self_arr * self_arr).sum() - ma * ma
b2 = n * (other_arr * other_arr).sum() - mb * mb
if b1 == 0 or b2 == 0:
return numpy.nan
return a / numpy.sqrt(b1 * b2)
return hpat_pandas_series_corr_impl
@sdc_overload_method(SeriesType, 'head')
def hpat_pandas_series_head(self, n=5):
"""
Pandas Series method :meth:`pandas.Series.head` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_head*
Parameters
-----------
n: :obj:`int`, default 5
input argument, default 5
Returns
-------
:obj:`pandas.Series`
returns: The first n rows of the caller object.
"""
_func_name = 'Method head().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(n, (types.Integer, types.Omitted)) and n != 5:
raise TypingError('{} The parameter must be an integer type. Given type n: {}'.format(_func_name, n))
if isinstance(self.index, types.NoneType):
def hpat_pandas_series_head_impl(self, n=5):
return pandas.Series(data=self._data[:n], name=self._name)
return hpat_pandas_series_head_impl
else:
def hpat_pandas_series_head_index_impl(self, n=5):
return pandas.Series(data=self._data[:n], index=self._index[:n], name=self._name)
return hpat_pandas_series_head_index_impl
@sdc_overload_method(SeriesType, 'groupby')
def hpat_pandas_series_groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
"""
Pandas Series method :meth:`pandas.Series.groupby` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_groupby_count
Parameters
-----------
self: :class:`pandas.Series`
input arg
by: :obj:`pandas.Series` object
Used to determine the groups for the groupby
axis:
*unsupported*
level:
*unsupported*
as_index:
*unsupported*
sort:
*unsupported*
group_keys:
*unsupported*
squeeze:
*unsupported*
observed:
*unsupported*
Returns
-------
:obj:`pandas.SeriesGroupBy`
returns :obj:`pandas.SeriesGroupBy` object
"""
_func_name = 'Method Series.groupby().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if by is None and axis is None:
raise TypingError("{} You have to supply one of 'by' or 'axis' parameters".format(_func_name))
if level is not None and not isinstance(level, (types.Integer, types.NoneType, types.Omitted)):
raise TypingError("{} 'level' must be an Integer. Given: {}".format(_func_name, level))
def hpat_pandas_series_groupby_impl(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False):
# TODO Needs to implement parameters value check
# if level is not None and (level < -1 or level > 0):
# raise ValueError("Method Series.groupby(). level > 0 or level < -1 only valid with MultiIndex")
return pandas.core.groupby.SeriesGroupBy(self)
return hpat_pandas_series_groupby_impl
@sdc_overload_method(SeriesType, 'isnull')
@sdc_overload_method(SeriesType, 'isna')
def hpat_pandas_series_isna(self):
"""
Pandas Series method :meth:`pandas.Series.isna` and :meth:`pandas.Series.isnull` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isnull1
Parameters
-----------
self : :obj:`pandas.Series` object
input argument
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method isna/isnull().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if isinstance(self.data.dtype, (types.Integer, types.Float)):
def hpat_pandas_series_isna_impl(self):
return pandas.Series(data=numpy.isnan(self._data), index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_isna_impl(self):
result = numpy.empty(len(self._data), numpy.bool_)
byte_size = 8
# iterate over bits in StringArrayType null_bitmap and fill array indicating if array's element are NaN
for i in range(len(self._data)):
bmap_idx = i // byte_size
bit_idx = i % byte_size
bmap = self._data.null_bitmap[bmap_idx]
bit_value = (bmap >> bit_idx) & 1
result[i] = bit_value == 0
return pandas.Series(result, index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
@sdc_overload_method(SeriesType, 'notna')
def hpat_pandas_series_notna(self):
"""
Pandas Series method :meth:`pandas.Series.notna` implementation.
.. only:: developer
Test: python -m -k sdc.runtests sdc.tests.test_series.TestSeries.test_series_notna*
Parameters
-----------
self : :obj:`pandas.Series` object
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method notna().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.data.dtype, types.Number):
def hpat_pandas_series_notna_impl(self):
return pandas.Series(numpy.invert(numpy.isnan(self._data)), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_notna_impl(self):
result = self.isna()
return pandas.Series(numpy.invert(result._data), index=self._index, name=self._name)
return hpat_pandas_series_notna_impl
@sdc_overload_method(SeriesType, 'ne')
def hpat_pandas_series_ne(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ne` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ne().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data != other._data)
return hpat_pandas_series_ne_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ne_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data != other)
return hpat_pandas_series_ne_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'add')
def hpat_pandas_series_add(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.add
Examples
--------
.. literalinclude:: ../../../examples/series/series_add.py
:language: python
:lines: 27-
:caption: Getting the addition of Series and other
:name: ex_series_add
.. command-output:: python ./series/series_add.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.radd <pandas.Series.radd>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.add` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method add().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_add_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data + other._data)
return hpat_pandas_series_add_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_add_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method add(). The object axis\n expected: 0')
return pandas.Series(self._data + other)
return hpat_pandas_series_add_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'sub')
def hpat_pandas_series_sub(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.sub` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method sub().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_sub_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data - other._data)
return hpat_pandas_series_sub_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_sub_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data - other)
return hpat_pandas_series_sub_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'sum')
def hpat_pandas_series_sum(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Pandas Series method :meth:`pandas.Series.sum` implementation.
.. only:: developer
Tests:
python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_sum*
Parameters
----------
self: :class:`pandas.Series`
input series
axis:
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude NA/null values when computing the result.
level:
*unsupported*
numeric_only:
*unsupported*
min_count:
*unsupported*
Returns
-------
:obj:`float`
scalar or Series (if level specified)
"""
_func_name = 'Method sum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(axis, (types.Integer, types.Omitted)) or axis is None):
raise TypingError('{} The axis must be an Integer. Currently unsupported. Given: {}'.format(_func_name, axis))
if not (isinstance(skipna, (types.Boolean, types.Omitted, types.NoneType)) or skipna is None):
raise TypingError('{} The skipna must be a Boolean. Given: {}'.format(_func_name, skipna))
if not (isinstance(level, (types.Integer, types.StringLiteral, types.Omitted, types.NoneType)) or level is None):
raise TypingError(
'{} The level must be an Integer or level name. Currently unsupported. Given: {}'.format(
_func_name, level))
if not (isinstance(numeric_only, (types.Boolean, types.Omitted)) or numeric_only is None):
raise TypingError(
'{} The numeric_only must be a Boolean. Currently unsupported. Given: {}'.format(
_func_name, numeric_only))
if not (isinstance(min_count, (types.Integer, types.Omitted)) or min_count == 0):
raise TypingError(
'{} The min_count must be an Integer. Currently unsupported. Given: {}'.format(
_func_name, min_count))
def hpat_pandas_series_sum_impl(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_sum1
"""
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nansum(self._data)
return numpy.sum(self._data)
return hpat_pandas_series_sum_impl
@sdc_overload_method(SeriesType, 'take')
def hpat_pandas_series_take(self, indices, axis=0, is_copy=False):
"""
Pandas Series method :meth:`pandas.Series.take` implementation.
.. only:: developer
Tests: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_default_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_int_unboxing
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_take_index_str_unboxing
Parameters
----------
self: :obj:`pandas.Series`
input series
indices: :obj:`array-like`
An array of ints indicating which positions to take
axis: {0 or `index`, 1 or `columns`, None}, default 0
The axis on which to select elements. 0 means that we are selecting rows,
1 means that we are selecting columns.
*unsupported*
is_copy: :obj:`bool`, default True
Whether to return a copy of the original object or not.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object containing the elements taken from the object
"""
ty_checker = TypeChecker('Method take().')
ty_checker.check(self, SeriesType)
if (not isinstance(axis, (int, types.Integer, str, types.UnicodeType, types.StringLiteral, types.Omitted))
and axis not in (0, 'index')):
ty_checker.raise_exc(axis, 'integer or string', 'axis')
if not isinstance(is_copy, (bool, types.Boolean, types.Omitted)) and is_copy is not False:
ty_checker.raise_exc(is_copy, 'boolean', 'is_copy')
if not isinstance(indices, (types.List, types.Array)):
ty_checker.raise_exc(indices, 'array-like', 'indices')
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_take_noindex_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
return pandas.Series(local_data, indices)
return hpat_pandas_series_take_noindex_impl
def hpat_pandas_series_take_impl(self, indices, axis=0, is_copy=False):
local_data = [self._data[i] for i in indices]
local_index = [self._index[i] for i in indices]
return pandas.Series(local_data, local_index)
return hpat_pandas_series_take_impl
@sdc_overload_method(SeriesType, 'idxmax')
def hpat_pandas_series_idxmax(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmax` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmax_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmax().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmax_impl(self, axis=None, skipna=True):
return numpy.argmax(self._data)
return hpat_pandas_series_idxmax_impl
else:
def hpat_pandas_series_idxmax_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmax is supported by Numba at this time
result = numpy.argmax(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmax_index_impl
@sdc_overload_method(SeriesType, 'mul')
def hpat_pandas_series_mul(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.mul
Examples
--------
.. literalinclude:: ../../../examples/series/series_mul.py
:language: python
:lines: 27-
:caption: Element-wise multiplication of two Series
:name: ex_series_mul
.. command-output:: python ./series/series_mul.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rmul <pandas.Series.rmul>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.mul` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mul().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(level, types.Omitted) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(fill_value, types.Omitted) and fill_value is not None:
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not isinstance(axis, types.Omitted) and axis != 0:
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_mul_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other._data)
return hpat_pandas_series_mul_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mul_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method mul(). The object axis\n expected: 0')
return pandas.Series(self._data * other)
return hpat_pandas_series_mul_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'div')
def hpat_pandas_series_div(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.div
Examples
--------
.. literalinclude:: ../../../examples/series/series_div.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator div)
:name: ex_series_div
.. command-output:: python ./series/series_div.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rdiv <pandas.Series.rdiv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.div` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: :obj:`int` default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method div().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_div_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_div_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_div_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method div(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_div_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'truediv')
def hpat_pandas_series_truediv(self, other, level=None, fill_value=None, axis=0):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.truediv
Examples
--------
.. literalinclude:: ../../../examples/series/series_truediv.py
:language: python
:lines: 27-
:caption: Element-wise division of one Series by another (binary operator truediv)
:name: ex_series_truediv
.. command-output:: python ./series/series_truediv.py
:cwd: ../../../examples
.. note::
Parameters level, fill_value, axis are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.rtruediv <pandas.Series.rtruediv>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series :meth:`pandas.Series.truediv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method truediv().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not (isinstance(level, types.Omitted) or level is None):
ty_checker.raise_exc(level, 'None', 'level')
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
ty_checker.raise_exc(fill_value, 'None', 'fill_value')
if not (isinstance(axis, types.Omitted) or axis == 0):
ty_checker.raise_exc(axis, 'int', 'axis')
if isinstance(other, SeriesType):
def hpat_pandas_series_truediv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other._data)
return hpat_pandas_series_truediv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_truediv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
if axis != 0:
raise ValueError('Method truediv(). The object axis\n expected: 0')
return pandas.Series(self._data / other)
return hpat_pandas_series_truediv_number_impl
ty_checker.raise_exc(other, 'Series, int, float', 'other')
@sdc_overload_method(SeriesType, 'floordiv')
def hpat_pandas_series_floordiv(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.floordiv` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method floordiv().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_floordiv_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data // other._data)
return hpat_pandas_series_floordiv_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_floordiv_number_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data // other)
return hpat_pandas_series_floordiv_number_impl
raise TypingError('{} The object must be a pandas.series or scalar. Given other: {}'.format(_func_name, other))
@sdc_overload_method(SeriesType, 'pow')
def hpat_pandas_series_pow(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.pow` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method pow().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data ** other._data)
return hpat_pandas_series_pow_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_pow_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data ** other)
return hpat_pandas_series_pow_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'prod')
def hpat_pandas_series_prod(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
"""
Pandas Series method :meth:`pandas.Series.prod` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_prod*
Parameters
-----------
self: :obj:`pandas.Series`
input series
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default :obj:`True`
Exclude nan values when computing the result
level: :obj:`int`, :obj:`str`, default :obj:`None`
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default :obj:`None`
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data.
Not implemented for Series.
*unsupported*
min_count: :obj:`int`, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result will be NA.
*unsupported*
Returns
-------
:obj:
Returns scalar or Series (if level specified)
"""
_func_name = 'Method prod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError('{} Non numeric values unsupported. Given: {}'.format(_func_name, self.data.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is None or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, (types.Omitted, types.NoneType)) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, (types.Omitted, types.NoneType)) or numeric_only is None) \
or not (isinstance(min_count, (types.Omitted, types.Integer)) or min_count == 0):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}, min_count: {}'.format(
_func_name, axis, level, numeric_only, min_count))
def hpat_pandas_series_prod_impl(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanprod(self._data)
else:
return numpy.prod(self._data)
return hpat_pandas_series_prod_impl
@sdc_overload_method(SeriesType, 'quantile')
def hpat_pandas_series_quantile(self, q=0.5, interpolation='linear'):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.quantile
Examples
--------
.. literalinclude:: ../../../examples/series/series_quantile.py
:language: python
:lines: 27-
:caption: Computing quantile for the Series
:name: ex_series_quantile
.. command-output:: python ./series/series_quantile.py
:cwd: ../../../examples
.. note::
Parameter interpolation is currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html#numpy.percentile>`_
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.quantile` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile
python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_quantile_q_vector
Parameters
-----------
q : :obj: float or array-like object, default 0.5
the quantile(s) to compute
interpolation: 'linear', 'lower', 'higher', 'midpoint', 'nearest', default `linear`
*unsupported* by Numba
Returns
-------
:obj:`pandas.Series` or float
"""
_func_name = 'Method quantile().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(interpolation, types.Omitted) and interpolation is not 'linear':
ty_checker.raise_exc(interpolation, 'str', 'interpolation')
if not isinstance(q, (int, float, list, types.Number, types.Omitted, types.List)):
ty_checker.raise_exc(q, 'int, float, list', 'q')
def hpat_pandas_series_quantile_impl(self, q=0.5, interpolation='linear'):
return numpy.quantile(self._data, q)
return hpat_pandas_series_quantile_impl
@sdc_overload_method(SeriesType, 'rename')
def hpat_pandas_series_rename(self, index=None, copy=True, inplace=False, level=None):
"""
Pandas Series method :meth:`pandas.Series.rename` implementation.
Alter Series index labels or name.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_rename
Parameters
-----------
index : :obj:`scalar` or `hashable sequence` or `dict` or `function`
Dict-like or functions are transformations to apply to the index.
Scalar or hashable sequence-like will alter the Series.name attribute.
Only scalar value is supported.
copy : :obj:`bool`, default :obj:`True`
Whether to copy underlying data.
inplace : :obj:`bool`, default :obj:`False`
Whether to return a new Series. If True then value of copy is ignored.
level : :obj:`int` or `str`
In case of a MultiIndex, only rename labels in the specified level.
*Not supported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` with index labels or name altered.
"""
ty_checker = TypeChecker('Method rename().')
ty_checker.check(self, SeriesType)
if not isinstance(index, (types.Omitted, types.UnicodeType,
types.StringLiteral, str,
types.Integer, types.Boolean,
types.Hashable, types.Float,
types.NPDatetime, types.NPTimedelta,
types.Number)) and index is not None:
ty_checker.raise_exc(index, 'string', 'index')
if not isinstance(copy, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(copy, 'boolean', 'copy')
if not isinstance(inplace, (types.Omitted, types.Boolean, bool)):
ty_checker.raise_exc(inplace, 'boolean', 'inplace')
if not isinstance(level, (types.Omitted, types.UnicodeType,
types.StringLiteral, types.Integer)) and level is not None:
ty_checker.raise_exc(level, 'Integer or srting', 'level')
def hpat_pandas_series_rename_idx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
series_index = self._index.copy()
else:
series_data = self._data
series_index = self._index
return pandas.Series(data=series_data, index=series_index, name=index)
def hpat_pandas_series_rename_noidx_impl(self, index=None, copy=True, inplace=False, level=None):
if copy is True:
series_data = self._data.copy()
else:
series_data = self._data
return pandas.Series(data=series_data, index=self._index, name=index)
if isinstance(self.index, types.NoneType):
return hpat_pandas_series_rename_noidx_impl
return hpat_pandas_series_rename_idx_impl
@sdc_overload_method(SeriesType, 'min')
def hpat_pandas_series_min(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.min` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_min*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method min().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not True \
and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_min_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmin(self._data)
return self._data.min()
return hpat_pandas_series_min_impl
@sdc_overload_method(SeriesType, 'max')
def hpat_pandas_series_max(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.max
Examples
--------
.. literalinclude:: ../../../examples/series/series_max.py
:language: python
:lines: 27-
:caption: Getting the maximum value of Series elements
:name: ex_series_max
.. command-output:: python ./series/series_max.py
:cwd: ../../../examples
.. note::
Parameters axis, level, numeric_only are currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.sum <pandas.Series.sum>`
Return the sum.
:ref:`Series.min <pandas.Series.min>`
Return the minimum.
:ref:`Series.max <pandas.Series.max>`
Return the maximum.
:ref:`Series.idxmin <pandas.Series.idxmin>`
Return the index of the minimum.
:ref:`Series.idxmax <pandas.Series.idxmax>`
Return the index of the maximum.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.max` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_max*
Parameters
-----------
axis:
*unsupported*
skipna: :obj:`bool` object
Exclude nan values when computing the result
level:
*unsupported*
numeric_only:
*unsupported*
Returns
-------
:obj:
returns :obj: scalar
"""
_func_name = 'Method max().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.data.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) or skipna is True or skipna is None):
ty_checker.raise_exc(skipna, 'bool', 'skipna')
if not isinstance(axis, types.Omitted) and axis is not None:
ty_checker.raise_exc(axis, 'None', 'axis')
if not isinstance(level, (types.Omitted, types.NoneType)) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if not isinstance(numeric_only, types.Omitted) and numeric_only is not None:
ty_checker.raise_exc(numeric_only, 'None', 'numeric_only')
def hpat_pandas_series_max_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmax(self._data)
return self._data.max()
return hpat_pandas_series_max_impl
@sdc_overload_method(SeriesType, 'mean')
def hpat_pandas_series_mean(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.mean` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_mean*
Parameters
-----------
axis: {index (0)}
Axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default True
Exclude NA/null values when computing the result.
level: :obj:`int` or level name, default None
If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a scalar.
*unsupported*
numeric_only: :obj:`bool`, default None
Include only float, int, boolean columns.
If None, will attempt to use everything, then use only numeric data. Not implemented for Series.
*unsupported*
Returns
-------
:obj:
Return the mean of the values for the requested axis.
"""
_func_name = 'Method mean().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError(
'{} Currently function supports only numeric values. Given data type: {}'.format(
_func_name, self.data.dtype))
if not isinstance(skipna, (types.Omitted, types.Boolean, types.NoneType)) and skipna is not None:
raise TypingError(
'{} The parameter must be a boolean type. Given type skipna: {}'.format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None) \
or not (isinstance(level, (types.Omitted, types.NoneType)) or level is None) \
or not (isinstance(numeric_only, types.Omitted) or numeric_only is None):
raise TypingError(
'{} Unsupported parameters. Given axis: {}, level: {}, numeric_only: {}'.format(_func_name, axis, level,
numeric_only))
def hpat_pandas_series_mean_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmean(self._data)
return self._data.mean()
return hpat_pandas_series_mean_impl
@sdc_overload_method(SeriesType, 'mod')
def hpat_pandas_series_mod(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.mod` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method mod().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5
"""
return pandas.Series(self._data % other._data)
return hpat_pandas_series_mod_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_mod_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op5_float_scalar
"""
return pandas.Series(self._data % other)
return hpat_pandas_series_mod_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'eq')
def hpat_pandas_series_eq(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.eq` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method eq().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data == other._data)
return hpat_pandas_series_eq_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_eq_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data == other)
return hpat_pandas_series_eq_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'ge')
def hpat_pandas_series_ge(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.ge` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method ge().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data >= other._data)
return hpat_pandas_series_ge_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_ge_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data >= other)
return hpat_pandas_series_ge_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'idxmin')
def hpat_pandas_series_idxmin(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.idxmin` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_str_idx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_no
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_int
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_noidx
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_idxmin_idx
Parameters
-----------
axis : :obj:`int`, :obj:`str`, default: None
Axis along which the operation acts
0/None - row-wise operation
1 - column-wise operation
*unsupported*
skipna: :obj:`bool`, default: True
exclude NA/null values
*unsupported*
Returns
-------
:obj:`pandas.Series.index` or nan
returns: Label of the minimum value.
"""
_func_name = 'Method idxmin().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Numeric values supported only. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(skipna, (types.Omitted, types.Boolean, bool)) or skipna is True):
raise TypingError("{} 'skipna' must be a boolean type. Given: {}".format(_func_name, skipna))
if not (isinstance(axis, types.Omitted) or axis is None):
raise TypingError("{} 'axis' unsupported. Given: {}".format(_func_name, axis))
if not (isinstance(skipna, types.Omitted) or skipna is True):
raise TypingError("{} 'skipna' unsupported. Given: {}".format(_func_name, skipna))
if isinstance(self.index, types.NoneType) or self.index is None:
def hpat_pandas_series_idxmin_impl(self, axis=None, skipna=True):
return numpy.argmin(self._data)
return hpat_pandas_series_idxmin_impl
else:
def hpat_pandas_series_idxmin_index_impl(self, axis=None, skipna=True):
# no numpy.nanargmin is supported by Numba at this time
result = numpy.argmin(self._data)
return self._index[int(result)]
return hpat_pandas_series_idxmin_index_impl
@sdc_overload_method(SeriesType, 'lt')
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'gt')
def hpat_pandas_series_gt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.gt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method gt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data > other._data)
return hpat_pandas_series_gt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_gt_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data > other)
return hpat_pandas_series_gt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'le')
def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.le` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method le().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not (isinstance(level, types.Omitted) or level is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(fill_value, types.Omitted) or fill_value is None):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if not (isinstance(axis, types.Omitted) or axis == 0):
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data <= other._data)
return hpat_pandas_series_le_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_le_impl(self, other, level=None, fill_value=None, axis=0):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data <= other)
return hpat_pandas_series_le_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other))
@sdc_overload_method(SeriesType, 'abs')
def hpat_pandas_series_abs(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.abs
Examples
--------
.. literalinclude:: ../../../examples/series/series_abs.py
:language: python
:lines: 27-
:caption: Getting the absolute value of each element in Series
:name: ex_series_abs
.. command-output:: python ./series/series_abs.py
:cwd: ../../../examples
.. seealso::
`numpy.absolute <https://docs.scipy.org/doc/numpy/reference/generated/numpy.absolute.html>`_
Calculate the absolute value element-wise.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.abs` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_abs1
Parameters
-----------
self: :obj:`pandas.Series`
input series
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` containing the absolute value of elements
"""
_func_name = 'Method abs().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(self.dtype, (types.Integer, types.Float)):
raise TypingError(
'{} The function only applies to elements that are all numeric. Given data type: {}'.format(_func_name,
self.dtype))
def hpat_pandas_series_abs_impl(self):
return pandas.Series(numpy.abs(self._data))
return hpat_pandas_series_abs_impl
@sdc_overload_method(SeriesType, 'unique')
def hpat_pandas_series_unique(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.unique
Examples
--------
.. literalinclude:: ../../../examples/series/series_unique.py
:language: python
:lines: 27-
:caption: Getting unique values in Series
:name: ex_series_unique
.. command-output:: python ./series/series_unique.py
:cwd: ../../../examples
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.unique` implementation.
Note: Return values order is unspecified
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_sorted
Parameters
-----------
self: :class:`pandas.Series`
input arg
Returns
-------
:obj:`numpy.array`
returns :obj:`numpy.array` ndarray
"""
ty_checker = TypeChecker('Method unique().')
ty_checker.check(self, SeriesType)
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_unique_str_impl(self):
'''
Returns sorted unique elements of an array
Note: Can't use Numpy due to StringArrayType has no ravel() for noPython mode.
Also, NotImplementedError: unicode_type cannot be represented as a Numpy dtype
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique_str
'''
str_set = set(self._data)
return to_array(str_set)
return hpat_pandas_series_unique_str_impl
def hpat_pandas_series_unique_impl(self):
'''
Returns sorted unique elements of an array
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_unique
'''
return numpy.unique(self._data)
return hpat_pandas_series_unique_impl
@sdc_overload_method(SeriesType, 'cumsum')
def hpat_pandas_series_cumsum(self, axis=None, skipna=True):
"""
Pandas Series method :meth:`pandas.Series.cumsum` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unboxing
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_full
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_str
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_cumsum_unsupported_axis
Parameters
----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int`, :obj:`str`
Axis along which the operation acts
0/None/'index' - row-wise operation
1/'columns' - column-wise operation
*unsupported*
skipna: :obj:`bool`
exclude NA/null values
*args:
*unsupported*
Returns
-------
:obj:`scalar`, :obj:`pandas.Series`
returns :obj:`scalar` or :obj:`pandas.Series` object
"""
_func_name = 'Method cumsum().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
msg = '{} The object must be a number. Given self.data.dtype: {}'
raise TypingError(msg.format(_func_name, self.data.dtype))
if not isinstance(axis, (types.Omitted, types.NoneType)) and axis is not None:
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
def hpat_pandas_series_cumsum_impl(self, axis=None, skipna=True):
if skipna:
# nampy.nancumsum replaces NANs with 0, series.cumsum does not, so replace back 0 with NANs
local_data = numpy.nancumsum(self._data)
local_data[numpy.isnan(self._data)] = numpy.nan
return pandas.Series(local_data)
return pandas.Series(self._data.cumsum())
return hpat_pandas_series_cumsum_impl
@sdc_overload_method(SeriesType, 'nunique')
def hpat_pandas_series_nunique(self, dropna=True):
"""
Pandas Series method :meth:`pandas.Series.nunique` implementation.
Note: Unsupported mixed numeric and string data
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_nunique
Parameters
-----------
self: :obj:`pandas.Series`
input series
dropna: :obj:`bool`, default True
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method nunique().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_nunique_str_impl(self, dropna=True):
"""
It is better to merge with Numeric branch
"""
data = self._data
if dropna:
nan_mask = self.isna()
data = self._data[~nan_mask._data]
unique_values = set(data)
return len(unique_values)
return hpat_pandas_series_nunique_str_impl
def hpat_pandas_series_nunique_impl(self, dropna=True):
"""
This function for Numeric data because NumPy dosn't support StringArrayType
Algo looks a bit ambigous because, currently, set() can not be used with NumPy with Numba JIT
"""
data_mask_for_nan = numpy.isnan(self._data)
nan_exists = numpy.any(data_mask_for_nan)
data_no_nan = self._data[~data_mask_for_nan]
data_set = set(data_no_nan)
if dropna or not nan_exists:
return len(data_set)
else:
return len(data_set) + 1
return hpat_pandas_series_nunique_impl
@sdc_overload_method(SeriesType, 'count')
def hpat_pandas_series_count(self, level=None):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.Series.count
Examples
--------
.. literalinclude:: ../../../examples/series/series_count.py
:language: python
:lines: 27-
:caption: Counting non-NaN values in Series
:name: ex_series_count
.. command-output:: python ./series/series_count.py
:cwd: ../../../examples
.. note::
Parameter level is currently unsupported by Intel Scalable Dataframe Compiler
.. seealso::
:ref:`Series.value_counts <pandas.Series.value_counts>`
:ref:`Series.value_counts <pandas.Series.value_counts>`
:ref:`Series.str.len <pandas.Series.str.len>`
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.count` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_count
Parameters
-----------
self: :obj:`pandas.Series`
input series
level: :obj:`int` or name
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method count().'
ty_checker = TypeChecker(_func_name)
ty_checker.check(self, SeriesType)
if not isinstance(level, (types.Omitted, types.NoneType)) and level is not None:
ty_checker.raise_exc(level, 'None', 'level')
if isinstance(self.data, StringArrayType):
def hpat_pandas_series_count_str_impl(self, level=None):
nan_mask = self.isna()
return numpy.sum(nan_mask._data == 0)
return hpat_pandas_series_count_str_impl
def hpat_pandas_series_count_impl(self, level=None):
"""
Return number of non-NA/null observations in the object
Returns number of unique elements in the object
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_count
"""
data_no_nan = self._data[~numpy.isnan(self._data)]
return len(data_no_nan)
return hpat_pandas_series_count_impl
@sdc_overload_method(SeriesType, 'median')
def hpat_pandas_series_median(self, axis=None, skipna=None, level=None, numeric_only=None):
"""
Pandas Series method :meth:`pandas.Series.median` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_median1*
Parameters
-----------
self: :obj:`pandas.Series`
input series
axis: :obj:`int` or :obj:`string` {0 or `index`, None}, default None
The axis for the function to be applied on.
*unsupported*
skipna: :obj:`bool`, default True
exclude NA/null values when computing the result
level: :obj:`int` or :obj:`string`, default None
*unsupported*
numeric_only: :obj:`bool` or None, default None
*unsupported*
Returns
-------
:obj:`float` or :obj:`pandas.Series` (if level is specified)
median of values in the series
"""
_func_name = 'Method median().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if not isinstance(self.dtype, types.Number):
raise TypingError(
'{} The function only applies to elements that are all numeric. Given data type: {}'.format(
_func_name, self.dtype))
if not (isinstance(axis, (types.Integer, types.UnicodeType, types.Omitted)) or axis is None):
raise TypingError(
'{} The axis must be an Integer or a String. Currently unsupported. Given: {}'.format(
_func_name, axis))
if not (isinstance(skipna, (types.Boolean, types.Omitted, types.NoneType)) or skipna or skipna is None):
raise TypingError('{} The is_copy must be a boolean. Given: {}'.format(_func_name, skipna))
if not ((level is None or isinstance(level, (types.Omitted, types.NoneType)))
and (numeric_only is None or isinstance(numeric_only, types.Omitted))
and (axis is None or isinstance(axis, types.Omitted))
):
raise TypingError(
'{} Unsupported parameters. Given level: {}, numeric_only: {}, axis: {}'.format(
_func_name, level, numeric_only, axis))
def hpat_pandas_series_median_impl(self, axis=None, skipna=None, level=None, numeric_only=None):
if skipna is None:
_skipna = True
else:
_skipna = skipna
if _skipna:
return numpy.nanmedian(self._data)
return numpy.median(self._data)
return hpat_pandas_series_median_impl
@sdc_overload_method(SeriesType, 'argsort')
def hpat_pandas_series_argsort(self, axis=0, kind='quicksort', order=None):
"""
Pandas Series method :meth:`pandas.Series.argsort` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_series.TestSeries.test_series_argsort*
Parameters
-----------
self: :class:`pandas.Series`
input series
axis: :obj:`int`
Has no effect but is accepted for compatibility with numpy.
*unsupported*
kind: :obj:'str', {'mergesort', 'quicksort', 'heapsort'}, default: 'quicksort'
Choice of sorting algorithm. See np.sort for more information. 'mergesort' is the only stable algorithm
*uses python func - sorted() for str and numpy func - sort() for num*
*'heapsort' unsupported*
order: :obj:`str` or :obj:`list of str`, default: None
Has no effect but is accepted for compatibility with numpy.
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns: Positions of values within the sort order with -1 indicating nan values.
"""
_func_name = 'Method argsort().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if not isinstance(self.data.dtype, types.Number):
raise TypingError('{} Non-numeric type unsupported. Given: {}'.format(_func_name, self.data.dtype))
if not (isinstance(axis, types.Omitted) or isinstance(axis, types.Integer) or axis == 0):
raise TypingError('{} Unsupported parameters. Given axis: {}'.format(_func_name, axis))
if not isinstance(kind, (types.Omitted, str, types.UnicodeType, types.StringLiteral)):
raise TypingError('{} Non-string type unsupported. Given kind: {}'.format(_func_name, kind))
if not isinstance(order, (str, types.UnicodeType, types.StringLiteral, types.Omitted, types.NoneType, types.List))\
and order is not None:
raise TypingError('{} Unsupported parameters. Given order: {}'.format(_func_name, order))
if not isinstance(self.index, types.NoneType):
def hpat_pandas_series_argsort_idx_impl(self, axis=0, kind='quicksort', order=None):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method argsort(). Unsupported parameter. Given 'kind' != 'quicksort' or 'mergesort'")
if kind == 'mergesort':
#It is impossible to use numpy.argsort(self._data, kind=kind) since numba gives typing error
sort = numpy.argsort(self._data, kind='mergesort')
else:
sort = numpy.argsort(self._data)
na = self.isna().sum()
result = numpy.empty(len(self._data), dtype=numpy.int64)
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
if kind == 'mergesort':
sort_nona = numpy.argsort(self._data[~na_data_arr], kind='mergesort')
else:
sort_nona = numpy.argsort(self._data[~na_data_arr])
q = 0
for id, i in enumerate(sort):
if id in set(sort[len(self._data) - na:]):
q += 1
else:
result[id] = sort_nona[id - q]
for i in sort[len(self._data) - na:]:
result[i] = -1
return pandas.Series(result, self._index)
return hpat_pandas_series_argsort_idx_impl
def hpat_pandas_series_argsort_noidx_impl(self, axis=0, kind='quicksort', order=None):
if kind != 'quicksort' and kind != 'mergesort':
raise ValueError("Method argsort(). Unsupported parameter. Given 'kind' != 'quicksort' or 'mergesort'")
if kind == 'mergesort':
sort = numpy.argsort(self._data, kind='mergesort')
else:
sort = numpy.argsort(self._data)
na = self.isna().sum()
result = numpy.empty(len(self._data), dtype=numpy.int64)
na_data_arr = sdc.hiframes.api.get_nan_mask(self._data)
if kind == 'mergesort':
sort_nona = numpy.argsort(self._data[~na_data_arr], kind='mergesort')
else:
sort_nona = numpy.argsort(self._data[~na_data_arr])
q = 0
for id, i in enumerate(sort):
if id in set(sort[len(self._data) - na:]):
q += 1
else:
result[id] = sort_nona[id - q]
for i in sort[len(self._data) - na:]:
result[i] = -1
return | pandas.Series(result) | pandas.Series |
import cv2
from datetime import datetime
import pandas
from bokeh.plotting import figure
from bokeh.io import output_file, show
first_frame = None
status_list = [None, None]
time_stamp = []
video = cv2.VideoCapture(0)
while True:
check, frame= video.read()
status = 0
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gaussian_frame = cv2.GaussianBlur(gray_frame, (21,21), 0)
if first_frame is None:
first_frame = gaussian_frame
continue
delta_frame = cv2.absdiff(first_frame, gaussian_frame)
threshold_delta_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
threshold_delta_frame_smooth = cv2.dilate(threshold_delta_frame, None, iterations=2)
#contour
(cnts,_) = cv2.findContours(threshold_delta_frame_smooth.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cont in cnts:
if cv2.contourArea(cont) < 10000:
continue
status= 1
(x, y, w, h)=cv2.boundingRect(cont)
cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)
cv2.imshow("threshold_delta_frame_smooth", threshold_delta_frame_smooth)
cv2.imshow("main_frame", frame)
key = cv2.waitKey(1)
status_list.append(status)
if status_list[-1]==1 and status_list[-2]==0:
time_stamp.append(datetime.now())
if status_list[-1]==0 and status_list[-2]==1:
time_stamp.append(datetime.now())
if key == ord('q'):
if status == 1:
time_stamp.append(datetime.now())
break
print(time_stamp)
df = | pandas.DataFrame(columns=['start', 'end']) | pandas.DataFrame |
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import re
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import pandas as pd
import pytest
import cudf
from cudf.datasets import randomdata
from cudf.testing._utils import assert_eq, assert_exceptions_equal
params_dtypes = [np.int32, np.uint32, np.float32, np.float64]
methods = ["min", "max", "sum", "mean", "var", "std"]
interpolation_methods = ["linear", "lower", "higher", "midpoint", "nearest"]
@pytest.mark.parametrize("method", methods)
@pytest.mark.parametrize("dtype", params_dtypes)
@pytest.mark.parametrize("skipna", [True, False])
def test_series_reductions(method, dtype, skipna):
np.random.seed(0)
arr = np.random.random(100)
if np.issubdtype(dtype, np.integer):
arr *= 100
mask = arr > 10
else:
mask = arr > 0.5
arr = arr.astype(dtype)
if dtype in (np.float32, np.float64):
arr[[2, 5, 14, 19, 50, 70]] = np.nan
sr = cudf.Series.from_masked_array(arr, cudf.Series(mask).as_mask())
psr = sr.to_pandas()
psr[~mask] = np.nan
def call_test(sr, skipna):
fn = getattr(sr, method)
if method in ["std", "var"]:
return fn(ddof=1, skipna=skipna)
else:
return fn(skipna=skipna)
expect, got = call_test(psr, skipna=skipna), call_test(sr, skipna=skipna)
np.testing.assert_approx_equal(expect, got)
@pytest.mark.parametrize("method", methods)
def test_series_reductions_concurrency(method):
e = ThreadPoolExecutor(10)
np.random.seed(0)
srs = [cudf.Series(np.random.random(10000)) for _ in range(1)]
def call_test(sr):
fn = getattr(sr, method)
if method in ["std", "var"]:
return fn(ddof=1)
else:
return fn()
def f(sr):
return call_test(sr + 1)
list(e.map(f, srs * 50))
@pytest.mark.parametrize("ddof", range(3))
def test_series_std(ddof):
np.random.seed(0)
arr = np.random.random(100) - 0.5
sr = cudf.Series(arr)
pd = sr.to_pandas()
got = sr.std(ddof=ddof)
expect = pd.std(ddof=ddof)
np.testing.assert_approx_equal(expect, got)
def test_series_unique():
for size in [10 ** x for x in range(5)]:
arr = np.random.randint(low=-1, high=10, size=size)
mask = arr != -1
sr = cudf.Series.from_masked_array(arr, cudf.Series(mask).as_mask())
assert set(arr[mask]) == set(sr.unique().to_array())
assert len(set(arr[mask])) == sr.nunique()
@pytest.mark.parametrize(
"nan_as_null, dropna",
[(True, True), (True, False), (False, True), (False, False)],
)
def test_series_nunique(nan_as_null, dropna):
# We remove nulls as opposed to NaNs using the dropna parameter,
# so to test against pandas we replace NaN with another discrete value
cudf_series = cudf.Series([1, 2, 2, 3, 3], nan_as_null=nan_as_null)
pd_series = pd.Series([1, 2, 2, 3, 3])
expect = pd_series.nunique(dropna=dropna)
got = cudf_series.nunique(dropna=dropna)
assert expect == got
cudf_series = cudf.Series(
[1.0, 2.0, 3.0, np.nan, None], nan_as_null=nan_as_null
)
if nan_as_null is True:
pd_series = | pd.Series([1.0, 2.0, 3.0, np.nan, None]) | pandas.Series |
import os
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import netdice.experiments.sri_plot_helper as sph
from netdice.experiments.compare_approaches import bf_states_for_target_precision, \
hoeffding_samples_for_target_precision
from netdice.my_logging import log
class Analyzer:
def __init__(self, data: list, output_dir: str):
self.output_dir = output_dir
self.data = data
self.nof_links_for_topology = {} # assigns to each topology name the number of links
self.nof_nodes_for_topology = {} # assigns to each topology name the number of links
self.flierprops = dict(marker='.') # configuration for outlier markers in boxplots
sph.configure_plots_nolatex(font_size=12)
def analyze(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
log.info("Collecting topology statistics...")
self._find_nof_links()
log.info("Generating plot 1a...")
self._plot_1a()
log.info("Generating plot 1bc...")
self._plot_1bc()
log.info("Generating plot intro...")
self._plot_intro()
log.info("Generating plot 2...")
self._plot_2()
log.info("Generating plot 3...")
self._plot_3()
log.info("Generating plot 5...")
self._plot_5()
def _get_topo_name(self, elem):
return "-".join(elem["ctx"][0].split("-")[:-1])
def _print_links_nodes(self, topo_name):
log.info("Topology {}: {} nodes, {} links".format(topo_name, self.nof_nodes_for_topology[topo_name], self.nof_links_for_topology[topo_name]))
def _find_nof_links(self):
min_links = 100000000
max_links = -100000000
min_nodes = 100000000
max_nodes = -100000000
for elem in self.data:
if "topology" in elem:
n_links = elem["topology"]["nof_links"]
n_nodes = elem["topology"]["nof_nodes"]
self.nof_links_for_topology[elem["topology"]["name"]] = n_links
self.nof_nodes_for_topology[elem["topology"]["name"]] = n_nodes
if elem["topology"]["name"] in ["isp"]:
break
if n_links < min_links:
min_links = n_links
if n_links > max_links:
max_links = n_links
if n_nodes < min_nodes:
min_nodes = n_nodes
if n_nodes > max_nodes:
max_nodes = n_nodes
log.info("Links: MIN {} MAX {}".format(min_links, max_links))
log.info("Nodes: MIN {} MAX {}".format(min_nodes, max_nodes))
self._print_links_nodes("Colt")
self._print_links_nodes("Uninett2010")
self._print_links_nodes("Kdl")
self._print_links_nodes("AS-3549")
def _multi_trace_plot(self, df, nof_states):
colors = ["#0453E5", "#1B62E5", "#3271E5", "#4980E5", "#608EE5", "#779DE5", "#8EACE5", "#A5BBE5", "#BCCAE5",
"#D3D9E5"]
for i in range(0, 10):
plt.plot(range(0, nof_states[i]), "precision", data=df[df["rep"] == i], c=colors[i])
plt.xlabel("explored states")
plt.ylabel("imprecision")
plt.loglog()
def _plot_1a(self):
data_list = []
prec_list = []
is_timeout = set()
max_time_explore = {}
for elem in self.data:
if "timeout_after_seconds" in elem and elem["ctx"][0].endswith("-default"):
is_timeout.add((self._get_topo_name(elem), elem["ctx"][1]))
elif "time-explore" in elem and elem["ctx"][0].endswith("-default"):
nof_links = self.nof_links_for_topology[self._get_topo_name(elem)]
if self._get_topo_name(elem) not in max_time_explore or max_time_explore[self._get_topo_name(elem)] < elem["time-explore"]:
max_time_explore[self._get_topo_name(elem)] = elem["time-explore"]
if nof_links <= 75:
range = "50--75"
elif nof_links <= 100:
range = "76--100"
elif nof_links <= 200:
range = "101--200"
else:
range = "$>$ 200"
timeout = (self._get_topo_name(elem), elem["ctx"][1]) in is_timeout
data_list.append((elem["ctx"][0], nof_links, range, elem["ctx"][1], elem["time-explore"], timeout))
elif "finished" in elem and elem["ctx"][0].endswith("-default"):
prec_list.append((elem["ctx"][0], elem["finished"]["precision"]))
df = pd.DataFrame(data_list, columns=["experiment", "links", "range", "rep", "time", "is_timeout"])
df_prec = pd.DataFrame(prec_list, columns=["experiment", "precision"])
# count number of timeouts per topology
df_to = df[["experiment", "is_timeout"]].groupby("experiment").sum()
log.info("Number of timeouts:\n%s", str(df_to[df_to.is_timeout > 0]))
# compute the worst-case imprecision
log.info("Worst imprecision:\n%s", str(df_prec[df_prec.precision > 1E-4].groupby("experiment").max()))
sph.new_figure(11, 5.5)
plt.axhline(60*60, c="gray", lw=1, label="1 h (timeout)")
df_max = df[["experiment", "time", "links"]].groupby("experiment").max()
plt.plot("links", "time", "x", data=df_max, markersize=4, mew=0.6, label="maximum")
df_med = df[["experiment", "time", "links"]].groupby("experiment").median()
plt.plot("links", "time", "+", data=df_med, markersize=4, mew=0.6, label="median")
plt.xlabel("links")
plt.ylabel("time [s]")
plt.legend(handletextpad=0.3)
plt.loglog()
sph.savefig(os.path.join(self.output_dir, "plot_1a.pdf"))
def _plot_1bc(self):
data_list = []
nof_states = {}
for elem in self.data:
if "precision" in elem and elem["ctx"][0] == "Colt-trace":
data_list.append((elem["ctx"][1], elem["precision"]))
elif "finished" in elem and elem["ctx"][0] == "Colt-trace":
nof_states[elem["ctx"][1]] = elem["finished"]["num_explored"]
df = pd.DataFrame(data_list, columns=["rep", "precision"])
sph.new_figure(9, 6)
self._multi_trace_plot(df, nof_states)
sph.savefig(os.path.join(self.output_dir, "plot_1bi.pdf"))
time_data_list = []
for elem in self.data:
if "time-explore" in elem and elem["ctx"][0] == "isp-trace":
time_data_list.append(("ISP", elem["time-explore"]))
if len(time_data_list) == 0:
log.warning("skipping plot_1cii as ISP data is missing")
return
df = pd.DataFrame(time_data_list, columns=["net", "time"])
max_y = 120
log.info("Outliers:\n%s", str(df[df.time > max_y]))
nof_greater_max_y = df[df.time > max_y].count()["net"]
fig, ax = sph.new_figure(2, 6)
df.boxplot(column="time", by="net", ax=ax, grid=False, flierprops=self.flierprops)
ax.grid(b=True, which='major', axis='y', color='w')
ax.set_axisbelow(True)
plt.ylim([0, max_y])
plt.xlabel("")
plt.ylabel("time [s]")
plt.title("")
fig.suptitle("")
plt.gcf().text(0.4, 0.93, "+ {}".format(nof_greater_max_y), fontsize=12)
sph.savefig(os.path.join(self.output_dir, "plot_1cii.pdf"))
def _plot_intro(self):
data_list = []
nof_states = 0
for elem in self.data:
if "precision" in elem and elem["ctx"][0] == "Colt-intro":
data_list.append(elem["precision"])
elif "finished" in elem and elem["ctx"][0] == "Colt-intro":
nof_states = elem["finished"]["num_explored"]
df_ours = | pd.DataFrame(data_list, columns=["precision"]) | pandas.DataFrame |
# import app components
from app import app, data
from flask_cors import CORS
CORS(app) # enable CORS for all routes
# import libraries
from flask import request
import pandas as pd
import re
from datetime import datetime
from functools import reduce
# define functions
## process date args
def date_arg(arg):
try:
arg = datetime.strptime(arg, '%d-%m-%Y')
except:
try:
arg = datetime.strptime(arg, '%Y-%m-%d')
except:
arg = None
return arg
## process missing arg
def missing_arg(missing):
if missing == 'na':
missing_val = 'NA'
elif missing == 'empty':
missing_val = ''
elif missing == 'nan':
missing_val = 'NaN'
else:
missing_val = 'NULL'
return(missing_val)
## get date column
def get_date_col(df):
return list(filter(re.compile('^date_.*').search, df.columns.values))[0]
# list of dataset by location
data_canada = ['cases_timeseries_canada',
'mortality_timeseries_canada',
'recovered_timeseries_canada',
'testing_timeseries_canada',
'active_timeseries_canada',
'vaccine_administration_timeseries_canada',
'vaccine_distribution_timeseries_canada',
'vaccine_completion_timeseries_canada']
data_prov = ['cases_timeseries_prov',
'mortality_timeseries_prov',
'recovered_timeseries_prov',
'testing_timeseries_prov',
'active_timeseries_prov',
'vaccine_administration_timeseries_prov',
'vaccine_distribution_timeseries_prov',
'vaccine_completion_timeseries_prov']
data_hr = ['cases_timeseries_hr',
'mortality_timeseries_hr']
data_names = ['cases',
'mortality',
'recovered',
'testing',
'active',
'avaccine',
'dvaccine',
'cvaccine']
data_sknew = ['sk_new_cases_timeseries_hr_combined',
'sk_new_mortality_timeseries_hr_combined']
data_names_dates = {
'date_report': 'cases',
'date_death_report': 'mortality',
'date_recovered': 'recovered',
'date_testing': 'testing',
'date_active': 'active',
'date_vaccine_administered': 'avaccine',
'date_vaccine_distributed': 'dvaccine',
'date_vaccine_completed': 'cvaccine'
}
data_other = {
'prov': 'prov_map',
'hr': 'hr_map',
'age_cases': 'age_map_cases',
'age_mortality': 'age_map_mortality'
}
@app.route('/')
@app.route('/index')
def index():
# initialize response
response = {}
# subset dataframes
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
# rename date columns
for df in dfs.values():
df.columns = df.columns.str.replace('^date_.*', 'date', regex = True)
# subset active dataframe to avoid duplicate columns
dfs['active_timeseries_canada'] = dfs['active_timeseries_canada'].drop(columns=['cumulative_cases',
'cumulative_recovered',
'cumulative_deaths'])
# merge dataframes
df = reduce(lambda left, right: pd.merge(left, right, on=['date', 'province'], how='outer'), dfs.values())
# convert date column and filter to most recent date
df['date'] = pd.to_datetime(df['date'], dayfirst=True)
df = df.loc[df['date'] == data.version['date']]
# format output
df['date'] = df['date'].dt.strftime('%d-%m-%Y')
df = df.fillna('NULL')
response['summary'] = df.to_dict(orient='records')
# add version to response
response['version'] = data.version['version']
# return response
return response
@app.route('/timeseries')
def timeseries():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
if not loc:
loc = 'prov'
# get dataframes
if loc == 'canada':
if stat == 'cases':
data_name = data_canada[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_canada[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_canada[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_canada[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_canada[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_canada[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_canada[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_canada[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
elif loc == 'prov' or loc in data.keys_prov.keys():
if stat == 'cases':
data_name = data_prov[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_prov[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'recovered':
data_name = data_prov[2]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'testing':
data_name = data_prov[3]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'active':
data_name = data_prov[4]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'avaccine':
data_name = data_prov[5]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'dvaccine':
data_name = data_prov[6]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'cvaccine':
data_name = data_prov[7]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_prov}
dfs = list(dfs.values()) # convert to list
elif loc == 'hr' or loc in data.keys_hr.keys():
if stat == 'cases':
data_name = data_hr[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_hr[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: pd.read_csv(data.ccodwg[k]) for k in data_canada}
dfs = list(dfs.values()) # convert to list
else:
return "Record not found", 404
# filter by location
if loc in data.keys_prov.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_prov[loc]['province']]
elif loc in data.keys_hr.keys():
for i in range(len(dfs)):
dfs[i] = dfs[i].loc[dfs[i]['health_region'] == data.keys_hr[loc]['health_region']]
if loc != '9999':
dfs[i] = dfs[i].loc[dfs[i]['province'] == data.keys_hr[loc]['province']]
# convert date column
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
dfs[i][col_date] = pd.to_datetime(dfs[i][col_date], dayfirst=True)
# filter by date
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if date:
dfs[i] = dfs[i].loc[dfs[i][col_date] == date]
if after:
dfs[i] = dfs[i].loc[dfs[i][col_date] >= after]
if before:
dfs[i] = dfs[i].loc[dfs[i][col_date] <= before]
# format output
for i in range(len(dfs)):
col_date = get_date_col(dfs[i])
if ymd == 'true':
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%Y-%m-%d')
else:
dfs[i][col_date] = dfs[i][col_date].dt.strftime('%d-%m-%Y')
dfs[i] = dfs[i].fillna(missing_val)
# determine response name and add dataframe to response
resp_name = data_names_dates[col_date]
response[resp_name] = dfs[i].to_dict(orient='records')
# add version to response
if version == 'true':
response['version'] = data.version['version']
# return response
return response
@app.route('/sknew')
def sknew():
# initialize response
response = {}
# read arguments
stat = request.args.get('stat')
loc = request.args.get('loc')
date = request.args.get('date')
after = request.args.get('after')
before = request.args.get('before')
ymd = request.args.get('ymd')
missing = request.args.get('missing')
version = request.args.get('version')
# process date arguments
if date:
date = date_arg(date)
if after:
after = date_arg(after)
if before:
before = date_arg(before)
# process other arguments
missing_val = missing_arg(missing)
# get dataframes
if stat == 'cases':
data_name = data_sknew[0]
dfs = [pd.read_csv(data.ccodwg[data_name])]
elif stat == 'mortality':
data_name = data_sknew[1]
dfs = [pd.read_csv(data.ccodwg[data_name])]
else:
dfs = {k: | pd.read_csv(data.ccodwg[k]) | pandas.read_csv |
#! /usr/bin/env python3
import re
import math
import json
import inspect
import pkg_resources
import numpy as np
import pandas as pd
from time import time
from joblib import Parallel, delayed
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
from pkg_resources import resource_filename
from pandas.api.types import CategoricalDtype
from .utils import fpath, _mywrap
pkg_resources.require("pandas>=0.21.0")
def convert_med(
pcts: Union[str, List[str]] = ['0001', '01', '05', '100'],
years: Union[int, List[int]] = range(2001, 2013),
data_types: Union[str, List[str]] = ['carc', 'opc', 'bsfab', 'med'],
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: bool = True,
n_jobs: int = 6,
med_dta: str = '/disk/aging/medicare/data',
med_pq:
str = '/disk/agebulk3/medicare.work/doyle-DUA51929/barronk-DUA51929/raw/pq'
) -> None: # yapf: disable
"""Convert Medicare Stata files to parquet
Args:
pcts: percent samples to convert
years: file years to convert
data_types:
types of data files to convert
- ``bsfab`` (`Beneficiary Summary File, Base segment`_)
- ``bsfcc`` (`Beneficiary Summary File, Chronic Conditions segment`_)
- ``bsfcu`` (`Beneficiary Summary File, Cost & Use segment`_)
- ``bsfd`` (`Beneficiary Summary File, National Death Index segment`_)
- ``carc`` (`Carrier File, Claims segment`_)
- ``carl`` (`Carrier File, Line segment`_)
- ``den`` (Denominator File)
- ``dmec`` (`Durable Medical Equipment File, Claims segment`_)
- ``dmel`` (`Durable Medical Equipment File, Line segment`_)
- ``hhac`` (`Home Health Agency File, Claims segment`_)
- ``hhar`` (`Home Health Agency File, Revenue Center segment`_)
- ``hosc`` (`Hospice File, Claims segment`_)
- ``hosr`` (`Hospice File, Revenue Center segment`_)
- ``ipc`` (`Inpatient File, Claims segment`_)
- ``ipr`` (`Inpatient File, Revenue Center segment`_)
- ``med`` (`MedPAR File`_)
- ``opc`` (`Outpatient File, Claims segment`_)
- ``opr`` (`Outpatient File, Revenue Center segment`_)
- ``snfc`` (`Skilled Nursing Facility File, Claims segment`_)
- ``snfr`` (`Skilled Nursing Facility File, Revenue Center segment`_)
- ``xw`` (Crosswalks files for ``ehic`` - ``bene_id``)
.. _`Beneficiary Summary File, Base segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#base-abcd-segment_2
.. _`Beneficiary Summary File, Chronic Conditions segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#chronic-conditions-segment_2
.. _`Beneficiary Summary File, Cost & Use segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#cost-and-use-segment_1
.. _`Beneficiary Summary File, National Death Index segment`: https://kylebarron.github.io/medicare-documentation/resdac/mbsf/#national-death-index-segment_1
.. _`Carrier File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/carrier-rif/#carrier-rif_1
.. _`Carrier File, Line segment`: https://kylebarron.github.io/medicare-documentation/resdac/carrier-rif/#line-file
.. _`Durable Medical Equipment File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/dme-rif/#durable-medical-equipment-rif_1
.. _`Durable Medical Equipment File, Line segment`: https://kylebarron.github.io/medicare-documentation/resdac/dme-rif/#line-file
.. _`Home Health Agency File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/hha-rif/#home-health-agency-rif_1
.. _`Home Health Agency File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/hha-rif/#revenue-center-file
.. _`Hospice File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/hospice-rif/#hospice-rif_1
.. _`Hospice File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/hospice-rif/#revenue-center-file
.. _`Inpatient File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/ip-rif/#inpatient-rif_1
.. _`Inpatient File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/ip-rif/#revenue-center-file
.. _`MedPAR File`: https://kylebarron.github.io/medicare-documentation/resdac/medpar-rif/#medpar-rif_1
.. _`Outpatient File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/op-rif/#outpatient-rif_1
.. _`Outpatient File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/op-rif/#revenue-center-file
.. _`Skilled Nursing Facility File, Claims segment`: https://kylebarron.github.io/medicare-documentation/resdac/snf-rif/#skilled-nursing-facility-rif_1
.. _`Skilled Nursing Facility File, Revenue Center segment`: https://kylebarron.github.io/medicare-documentation/resdac/snf-rif/#revenue-center-file
rg_size: size in GB of each Parquet row group
parquet_engine: either 'fastparquet' or 'pyarrow'
compression_type: 'SNAPPY' or 'GZIP'
manual_schema: whether to create manual parquet schema. Doesn't
always work.
ehic_xw: Merge bene_id onto old files with ehic
n_jobs: number of processes to use
med_dta: top of tree for medicare stata files
med_pq: top of tree to output new parquet files
"""
if type(pcts) is str:
pcts = [pcts]
elif type(pcts) is list:
pass
else:
raise TypeError('pcts must be string or list of strings')
if type(years) is int:
years = [years]
elif type(years) is list:
pass
elif type(years) is range:
pass
else:
raise TypeError('years must be int, range, or list of ints')
if type(data_types) is str:
data_types = [data_types]
elif type(data_types) is list:
pass
else:
raise TypeError('data_types must be string or list of strings')
data_list = [[x, y, z] for x in pcts for y in years for z in data_types]
# Drop 100% carrier:
# data_list = [
# x for x in data_list if not (x[2] == 'carc') & (x[0] == '100')]
# Or:
# Replace 100% carrier with 20% carrier:
data_list = [['20', x[1], x[2]]
if ((x[2] == 'carc') & (x[0] == '100')) else x
for x in data_list]
# Make sure list is unique:
data_list = sorted([list(x) for x in set(tuple(y) for y in data_list)])
Parallel(n_jobs=n_jobs)(
delayed(_convert_med)(
*i,
rg_size=rg_size,
parquet_engine=parquet_engine,
compression_type=compression_type,
manual_schema=manual_schema,
ehic_xw=ehic_xw,
med_dta=med_dta,
med_pq=med_pq) for i in data_list)
def _convert_med(
pct: str,
year: int,
data_type: Union[str, List[str]],
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: bool = True,
med_dta: str = '/disk/aging/medicare/data',
med_pq:
str = '/disk/agebulk3/medicare.work/doyle-DUA51929/barronk-DUA51929/raw/pq'
) -> None: # yapf: disable
"""Convert a single Medicare file to parquet format.
Args:
pct: percent sample to convert
year: year of data to convert
data_type:
type of data files to convert
- ``bsfab`` Beneficiary Summary File, Base segment
- ``bsfcc`` Beneficiary Summary File, Chronic Conditions segment
- ``bsfcu`` Beneficiary Summary File, Cost & Use segment
- ``bsfd`` Beneficiary Summary File, National Death Index segment
- ``carc`` Carrier File, Claims segment
- ``carl`` Carrier File, Line segment
- ``den`` Denominator File
- ``dmec`` Durable Medical Equipment File, Claims segment
- ``dmel`` Durable Medical Equipment File, Line segment
- ``hhac`` Home Health Agency File, Claims segment
- ``hhar`` Home Health Agency File, Revenue Center segment
- ``hosc`` Hospice File, Claims segment
- ``hosr`` Hospice File, Revenue Center segment
- ``ipc`` Inpatient File, Claims segment
- ``ipr`` Inpatient File, Revenue Center segment
- ``med`` MedPAR File
- ``opc`` Outpatient File, Claims segment
- ``opr`` Outpatient File, Revenue Center segment
- ``snfc`` Skilled Nursing Facility File, Claims segment
- ``snfr`` Skilled Nursing Facility File, Revenue Center segment
- ``xw`` Crosswalks files for ``ehic`` - ``bene_id``
rg_size: size in GB of each Parquet row group
parquet_engine: either 'fastparquet' or 'pyarrow'
compression_type: 'SNAPPY' or 'GZIP'
manual_schema: whether to create manual parquet schema. Doesn't
always work.
med_dta: canonical path for raw medicare dta files
med_pq: top of tree to output new parquet files
ehic_xw: Merge bene_id onto old files with ehic
Returns:
nothing. Writes parquet file to disk.
Raises:
NameError if data_type is not one of the above
"""
if type(pct) != str:
raise TypeError('pct must be str')
if type(year) != int:
raise TypeError('year must be int')
infile = fpath(percent=pct, year=year, data_type=data_type, dta=True)
outfile = fpath(
percent=pct, year=year, data_type=data_type, dta=False, pq_path=med_pq)
if not data_type.startswith('bsf'):
# TODO Refactor this into separate function.
path = resource_filename(
'medicare_utils', f'metadata/xw/{data_type}.json')
try:
with open(path) as f:
varnames = json.load(f)
except OSError:
varnames = {}
rename_dict = {}
for varname, names in varnames.items():
n = {k: v for k, v in names.items() if k == str(year)}
if n:
rename_dict[n[str(year)]['name']] = varname
if rename_dict:
# Remove items from dict that map to duplicate values
# Can't save a parquet file where multiple cols have same name
rev_rename_dict = {}
for key, value in rename_dict.items():
rev_rename_dict.setdefault(value, set()).add(key)
dups = [key for key, val in rev_rename_dict.items() if len(val) > 1]
for k, v in rename_dict.copy().items():
if v in dups:
rename_dict.pop(k)
else:
print(f'Year not in variable dictionary: {year}')
rename_dict = None
else:
rename_dict = None
# Make folder path if it doesn't exist
folder = Path(outfile).parents[0]
folder.mkdir(exist_ok=True, parents=True)
msg = f"""\
Starting {data_type} conversion
- Percent: {pct}
- Year {year}
"""
print(_mywrap(msg))
if ehic_xw and (year <= 2005) and not (data_type.startswith('bsf')):
ehic_xw = fpath(pct, year, 'xw_bsf', pq_path=med_pq)
if not Path(ehic_xw).is_file():
ehic_xw = fpath(pct, year, 'xw_bsf', dta=True, dta_path=med_dta)
else:
ehic_xw = None
try:
convert_file(
infile=infile,
outfile=outfile,
rename_dict=rename_dict,
rg_size=rg_size,
parquet_engine=parquet_engine,
compression_type=compression_type,
manual_schema=manual_schema,
ehic_xw=ehic_xw)
except:
pass
def convert_file(
infile: str,
outfile: str,
rename_dict: Dict[str, str] = None,
rg_size: float = 2.5,
parquet_engine: str = 'pyarrow',
compression_type: str = 'SNAPPY',
manual_schema: bool = False,
ehic_xw: Optional[str] = None) -> None:
"""Convert arbitrary Stata file to Parquet format
Args:
infile: path of file to read from
outfile: path of file to export to
rename_dict: keys should be initial variable names; values should
be new variable names
rg_size: Size in GB of the individual row groups
parquet_engine: either ``pyarrow`` or ``fastparquet``
compression_type: Compression algorithm to use. Can be ``SNAPPY`` or
``GZIP``.
manual_schema: Create parquet schema manually. For use with
pyarrow; doesn't always work
ehic_xw: Merge bene_id onto old files with ehic
Returns:
Writes .parquet file to disk.
"""
if parquet_engine == 'pyarrow':
import pyarrow as pa
import pyarrow.parquet as pq
elif parquet_engine == 'fastparquet':
import fastparquet as fp
t0 = time()
infile = Path(infile)
# File name without suffix
infile_stub = infile.stem
# Extension
infile_type = infile.suffix[1:]
# Set row group size. The following makes an even multiple of row groups
# as close as possible to the given `rg_size`
file_size = infile.stat().st_size / (1024 ** 3)
n_rg = round(file_size / rg_size)
if n_rg == 0:
n_rg += 1
nrow_total = pd.read_stata(infile, iterator=True).nobs
nrow_rg = math.ceil(nrow_total / n_rg)
gb_per_rg = file_size / n_rg
msg = f"""\
Row groups:
- {n_rg} of size {gb_per_rg:.2f} GB
Beginning scanning dtypes of file:
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if parquet_engine == 'pyarrow':
dtypes = _scan_file(infile, categorical=False)
elif parquet_engine == 'fastparquet':
dtypes = _scan_file(infile, categorical=True)
if rename_dict is not None:
for old_name, new_name in rename_dict.items():
try:
dtypes[new_name] = dtypes.pop(old_name)
except KeyError:
pass
msg = f"""\
Finished scanning dtypes of file
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if ehic_xw:
ehic_xw = Path(ehic_xw)
if ehic_xw.suffix == '.parquet':
xw = pd.read_parquet(ehic_xw, columns=['ehic', 'bene_id'])
elif ehic_xw.suffix == '.dta':
xw = pd.read_stata(ehic_xw, columns=['ehic', 'bene_id'])
xw = xw.set_index('ehic')
itr = pd.read_stata(infile, chunksize=nrow_rg)
i = 0
for df in itr:
i += 1
msg = f"""\
Read from file:
- Group {i}
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if rename_dict is not None:
df = df.rename(columns=rename_dict)
# Rename columns that aren't in XW with `x_` prefix
non_xw_cols = set(df.columns).difference(rename_dict.values())
df = df.rename(columns={x: 'x_' + x for x in non_xw_cols})
for col in non_xw_cols:
try:
dtypes['x_' + col] = dtypes.pop(col)
except KeyError:
pass
df = df.astype(dtypes)
if ehic_xw:
df = df.merge(xw, how='left', left_on='ehic', right_index=True)
msg = f"""\
Cleaned file:
- Group {i}
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if parquet_engine == 'pyarrow':
if i == 1:
if manual_schema:
schema = _create_parquet_schema(df.dtypes)
else:
schema = pa.Table.from_pandas(
df, preserve_index=False).schema
writer = pq.ParquetWriter(outfile, schema, flavor='spark')
writer.write_table(pa.Table.from_pandas(df, preserve_index=False))
elif parquet_engine == 'fastparquet':
if i == 1:
fp.write(
outfile,
df,
compression=compression_type,
has_nulls=False,
write_index=False,
object_encoding='utf8')
else:
fp.write(
outfile,
df,
compression=compression_type,
has_nulls=False,
write_index=False,
object_encoding='utf8',
append=True)
msg = f"""\
Wrote to parquet:
- Group {i}
- infile: {infile_stub}.{infile_type}
- time: {(time() - t0) / 60:.2f} minutes
"""
print(_mywrap(msg))
if parquet_engine == 'pyarrow':
writer.close()
print('Wrote to .parquet:\n\tAll groups')
def _convert_dates(df, datecols):
for col in datecols:
if not pd.core.dtypes.common.is_datetimelike(df.iloc[:, col]):
if df[col].dtype == np.number:
df.iloc[:, col] = pd.to_datetime(
df.iloc[:, col],
unit='D',
origin=pd.Timestamp('1960-01-01'),
errors='coerce')
elif df[col].dtype == 'object':
df.loc[:, 'from_dt'] = pd.to_datetime(
df.loc[:, 'from_dt'], format='%Y-%m-%d', errors='coerce')
return df
def _scan_file(
infile: Union[str, Path],
categorical: bool = True,
chunksize: int = 100000,
cat_threshold: float = 0.1,
unsigned: bool = False) -> Dict[str, Any]:
"""Scan dta file to find minimal dtypes to hold data in
For each of the chunks of df:
for string columns: hold all unique values if I want them categorical
for float columns: do nothing
for integer columns: search for missings, highest and lowest value
for date columns: nothing
Args:
infile: dta file to scan
categorical: whether to change strings to categorical
chunksize: number of rows of infile to read at a time
cat_threshold: maximum fraction of unique values in order
to convert to categorical
Returns:
dictionary with variable names and dtyplist
"""
itr = pd.read_stata(infile, iterator=True)
varlist_df = pd.DataFrame({
'format': itr.fmtlist,
'name': itr.varlist,
'col_size': itr.col_sizes,
'dtype': itr.dtyplist,
'label': list(itr.variable_labels().values())})
start_cols = {}
date_fmts = ('%tc', '%tC', '%td', '%d', '%tw', '%tm', '%tq', '%th', '%ty')
date_cols = varlist_df['format'].apply(lambda x: x.startswith(date_fmts))
date_cols = varlist_df[date_cols]['name'].values.tolist()
start_cols['date_cols'] = date_cols
int_cols = varlist_df['dtype'].apply(
lambda x: np.issubdtype(x, np.integer) if inspect.isclass(x) else False)
int_cols = varlist_df[int_cols]['name'].values.tolist()
int_cols = sorted(list(set(int_cols) - set(date_cols)))
start_cols['int_cols'] = int_cols
regex = r'%.+s'
str_cols = varlist_df['format'].apply(lambda x: bool(re.search(regex, x)))
str_cols = varlist_df[str_cols]['name'].values.tolist()
start_cols['str_cols'] = str_cols
float_cols = varlist_df['dtype'].apply(
lambda x: np.issubdtype(x, np.floating) if inspect.isclass(x) else False
)
float_cols = varlist_df[float_cols]['name'].values.tolist()
start_cols['float_cols'] = float_cols
end_cols = {
'date_cols': start_cols['date_cols'],
'int_cols': {
'names': start_cols['int_cols'],
'min': {key: None
for key in start_cols['int_cols']},
'max': {key: None
for key in start_cols['int_cols']}},
'float_cols': start_cols['float_cols']}
if categorical:
end_cols['cat_cols'] = {
'names': start_cols['str_cols'],
'cats': {key: set()
for key in start_cols['str_cols']}}
end_cols['str_cols'] = []
else:
end_cols['cat_cols'] = {}
end_cols['str_cols'] = start_cols['str_cols']
tokeep = []
tokeep.extend(start_cols['int_cols'])
if categorical:
tokeep.extend(start_cols['str_cols'])
itr = | pd.read_stata(infile, columns=tokeep, chunksize=chunksize) | pandas.read_stata |
# spikein_utils.py
# Single Cell Sequencing Quality Assessment: scqua
#
# Copyright 2018 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import pandas as pd
from glob import iglob
import click
from sklearn.linear_model import LogisticRegression
import numpy as np
import scipy
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import matplotlib
def get_ERCC():
ercc = pd.read_table('https://raw.githubusercontent.com/Teichlab/readquant/master/readquant/ERCC.tsv', index_col=1)
ercc = np.log(ercc['concentration in Mix 1 (attomoles/ul)'])
return(ercc)
def get_SIRV():
sirv = pd.read_csv('https://raw.githubusercontent.com/chichaumiau/SIRV_E2/master/SIRV_concentration.csv', index_col=1)
sirv = np.log(sirv['E2 molarity [fmoles/µl]']*1000)
return(sirv)
def get_detection_limit(spike, quant, det_threshold=0.1):
X = spike[:, None]
y = quant[spike.index] >= det_threshold
if y.sum() < 8:
return np.inf
lr = LogisticRegression(solver='liblinear', fit_intercept=True)
lr.fit(X, y)
midpoint = -lr.intercept_ / lr.coef_[0]
return np.exp(midpoint[0])
def get_accuracy(ercc, quant, det_threshold=0.1):
y = np.log(quant[ercc.index]) \
.replace([np.inf, -np.inf], np.nan) \
.dropna()
if (y >= np.log(det_threshold)).sum() < 8:
return -np.inf
correlation = y.corr(ercc, method='pearson')
return correlation
def get_phn(cts_file,tpm_file,phn_file, ercc, sirv, spike):
cts = pd.read_csv(cts_file, index_col=0)
tpm = | pd.read_csv(tpm_file, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pandas as pd
class JPY(object):
"""docstring for JPY"""
def __init__(self, usd_filename, btc_filename, bch_filename):
usd = pd.read_csv(usd_filename, parse_dates=['snapped_at'])
btc = pd.read_csv(btc_filename, parse_dates=['snapped_at'])
bch = pd.read_csv(bch_filename, parse_dates=['snapped_at'])
self.jpy = pd.merge( | pd.merge(btc, bch, how='left') | pandas.merge |
from Task1 import *
from Task3 import *
from Task5 import *
from Task6 import *
import csv
import pandas as pd
from pandas import read_csv
from sympy import *
import sqlalchemy
from sqlalchemy.orm import sessionmaker
#Напишите скрипт, читающий во всех mp3-файлах указанной директории ID3v1-теги и выводящий информацию о каждом файле в
#виде: [имя исполнителя] - [название трека] - [название альбома]. Если пользователь при вызове скрипта задает ключ -d, то выведите
#для каждого файла также 16-ричный дамп тега. Скрипт должен также автоматически проставить номера треков и жанр (номер
#жанра задается в параметре командной строки), если они не проставлены. Используйте модуль struct. ID3v1-заголовки располагаются в последних 128 байтах mp3-файла.
def task1():
Task1().decoding()
task1()
#Выполните задание средствами SQLAlchemy, включая создание и редактирование таблиц, а также выполнение таких запросов, как:
#- вывод фамилий всех авторов, родившихся в диапазоне между X и Y годами (задайте программно числа X и Y);
#- вывод всех книг, написанных авторами из России;
#- вывод всех книг с количеством страниц более N;
#- вывод всех авторов с числом книг более N.
def task3():
engine = sqlalchemy.create_engine('sqlite:///Library.db', echo=False)
Session = sessionmaker(bind=engine)
sessia = Session()
print_range(sessia)
print_russians(sessia)
print_pages(sessia)
print_authors(sessia)
sessia.close()
task3()
#Напишите приложение для загрузки файлов из интернета. В главном окне должно быть три текстовых поля, в которые можно вводить
#URL файла на закачку; под каждым из текстовых полей должны быть индикаторы загрузки и рядом поля с процентом загрузки
#каждого файла. Необходимо организовать возможность качать от одного до трех файлов параллельно (использовать потоки
#обязательно, файлы загружать фрагментами по 4 Кб). Загрузка должна инициироваться нажатием кнопки «Start downloading!». По
#окончанию загрузки последнего файла должно появиться окно со столбчатой диаграммой со значениями времени загрузки каждого 27 файла в формате «2s 322ms»
#и размерами файлов (используйте библиотеку matplotlib).
def task5():
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtWidgets.QApplication(sys.argv)
window = Task5()
window.show()
app.exec_()
task5()
#С помощью модуля numPy реализуйте следующие операции:
#1) умножение произвольных матриц А (размерности 3х5) и В (5х2);
#2) умножение матрицы (5х3) на трехмерный вектор;
#3) решение произвольной системы линейных уравнений;
#4) расчет определителя матрицы;
#5) получение обратной и транспонированной матриц.
#Также продемонстрируйте на примере матрицы 5х5 тот факт, что определитель равен произведению собственных значений матрицы.
def task6():
matrix_multiplication()
vector_multiplication()
linear_equation()
det_matrix()
inverse_matrix()
transposed_matrix()
task6()
#Выберите произвольную дифференцируемую и интегрируемую функцию одной переменной. С помощью модуля symPy найдите и
#отобразите ее производную и интеграл в аналитическом и графическом виде. Напишите код для решения произвольного
#нелинейного уравнения и системы нелинейных уравнений.
def solution(*equations):
if len(equations) == 1:
return solve(equations[0])
return solve_poly_system(equations)
def task7():
z = Symbol('z')
fun = z + 2
print('Заданная функция: ', str(fun))
print('Производная функции: ', end='')
derivative = diff(fun)
pprint(derivative)
plot(derivative)
print('\nИнтеграл функции: ', end='')
integral = integrate(fun)
pprint(integral)
plot(integral)
x, y = symbols('x y')
eq1 = Equality(0, x - 2*y)
eq2 = Equality(0, y-3)
eq3 = Equality(12, 2*x)
print('\nСистема уравнений:')
pprint(eq1)
pprint(eq2)
print('\nОтвет: ', end='')
pprint(solution(eq1, eq2))
print('\nУравнение:')
pprint(eq3)
print('\nОтвет: ', end='')
pprint(solution(eq3))
task7()
#С помощью модуля pandas отобразите:
#1) 10 самых маленьких и самых больших стран мира по территории;
#2) 10 самых маленьких и самых больших стран мира по населению;
#3) все франкоязычные страны мира;
#4) только островные государства;
#5) все страны, находящиеся в южном полушарии.
#Сгруппируйте страны по первой букве; по населению; по территории.
#Программно сохраните в таблицу Excel все страны с выборочной информацией: название, столица, население, территория, валюта, широта, долгота.
file_countries = 'countries.csv'
def save_table(table):
titles = pd.Series([i.split(',')[0] for i in table.name])
titles.name = 'name'
lat, lng = zip(*[i.split(',')
if isinstance(i, str)
else ['nan', 'nan']
for i in table.latlng])
lat, lng = map(pd.Series, (lat, lng))
lat.name = 'latitude'
lng.name = 'longitude'
output_data = | pd.concat([titles, table[['capital', 'ccn3', 'area', 'currencies']], lat, lng], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
import time # count clock time
import psutil # access the number of CPUs
import pyomo.environ as pyo
from pyomo.environ import Set, Var, Binary, NonNegativeReals, RealSet, Constraint, ConcreteModel, Objective, minimize, Suffix, DataPortal
from pyomo.opt import SolverFactory
from collections import defaultdict
StartTime = time.time()
ModelName = 'openTEPES'
CaseName = 'SEP2030oE' # To select the case
#%% IAMC -> openTEPES: Process
# 1) Loading dictionary
# 2) Reading data
# 3) Power Demand data transformation
# 4) Power System data transformation
# 4) Power Generation data transformation
# 4) Power Transmission data transformation
# 5) Writing data
#%% Loading the dictionary
var_PowerSystem = pd.read_csv('oT_IAMC_var_ID_PowerSystem.csv', index_col=[0 ])
var_PowerTransmission = pd.read_csv('oT_IAMC_var_ID_PowerTransmission.csv', index_col=[0 ])
var_PowerGeneration = pd.read_csv('oT_IAMC_var_ID_PowerGeneration.csv', index_col=[0 ])
print('Dictionary status: OK')
#%% reading data from CSV
dfOption = pd.read_csv(CaseName+'/oT_Data_Option_' +CaseName+'.csv', index_col=[0 ])
dfParameter = pd.read_csv(CaseName+'/oT_Data_Parameter_' +CaseName+'.csv', index_col=[0 ])
dfDuration = pd.read_csv(CaseName+'/oT_Data_Duration_' +CaseName+'.csv', index_col=[0] )
dfScenario = pd.read_csv(CaseName+'/oT_Data_Scenario_' +CaseName+'.csv', index_col=[0 ])
dfNodeLocation = pd.read_csv(CaseName+'/oT_Data_NodeLocation_' +CaseName+'.csv', index_col=[0 ])
dfDuration = pd.read_csv(CaseName+'/oT_Data_Duration_' +CaseName+'.csv', index_col=[0 ])
dfDemand = pd.read_csv(CaseName+'/oT_Data_Demand_' +CaseName+'.csv', index_col=[0,1,2])
dfDwOperatingReserve = pd.read_csv(CaseName+'/oT_Data_DownwardOperatingReserve_' +CaseName+'.csv', index_col=[0,1,2])
dfUpOperatingReserve = | pd.read_csv(CaseName+'/oT_Data_UpwardOperatingReserve_' +CaseName+'.csv', index_col=[0,1,2]) | pandas.read_csv |
import pandas as pd
import string
import numpy as np
import pkg_resources
import seaborn as sns
from PIL import Image
from wordcloud import WordCloud
import matplotlib.pyplot as plt
from pdfminer.high_level import extract_text
from tqdm import tqdm
import os
class wording:
def __init__(self):
self.resource_package = __name__
self.file = '/'.join(('config', 'lematizer.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.df_lema = self.load_lema()
self.file = '/'.join(('config', 'stopwords.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.df_stopwords = self.load_stopwords()
self.file = '/'.join(('config', 'positive.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.positive_words = self.load_positive_words()
self.file = '/'.join(('config', 'negative.csv'))
self.file_path = pkg_resources.resource_filename(self.resource_package, self.file)
self.negative_words = self.load_negative_words()
self.file_cw = '/'.join(('config', 'class_words.csv'))
self.file_path_cw = pkg_resources.resource_filename(self.resource_package, self.file_cw)
self.df_wc = self.load_class_words()
self.file_nomes = '/'.join(('config', 'nomes.csv'))
self.file_path_nomes = pkg_resources.resource_filename(self.resource_package, self.file_nomes)
self.nomes_pessoas = self.load_nomes()
self.file_cidades = '/'.join(('config', 'cidades.csv'))
self.file_path_cidades = pkg_resources.resource_filename(self.resource_package, self.file_cidades)
self.cidades = self.load_cidades()
self.file_estados = '/'.join(('config', 'estados.csv'))
self.file_path_estados = pkg_resources.resource_filename(self.resource_package, self.file_estados)
self.estados = self.load_estados()
self.tfidf = pd.DataFrame()
self.colection = pd.DataFrame()
def load_file(self, file='none', type='txt', header=False, sep=',', column='None'):
if file == 'none':
raise ValueError('No Filename was provided, need one')
if type == 'excel':
df = pd.read_excel(file)
if column != 'None':
df = df[column]
df.rename(column={column: 'word'}, inplace=True)
else:
raise TypeError("An xlsx file column was not selected")
if type == 'csv':
if header:
header=0
else:
header=None
df = pd.read_csv(file, header=header, sep=sep)
if column != 'None':
df = pd.DataFrame({'word': df[column]})
else:
raise TypeError("An csv file column was not selected")
if type == 'txt':
f = open(file, "r", encoding='utf8', errors='ignore')
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
if type == 'pdf' :
df = self.load_pdf(file)
df = pd.DataFrame([df])
df.columns = ['word']
self.colection = df.copy()
def load_lema(self):
df_lema = pd.read_csv(self.file_path, sep=',')
df_lema.columns = ['word','lema']
return(df_lema)
def load_positive_words(self):
df_pw = pd.read_csv(self.file_path)
df_pw.columns = ['word']
return(df_pw)
def load_negative_words(self):
df_pw = pd.read_csv(self.file_path)
df_pw.columns = ['word']
return(df_pw)
def load_stopwords(self):
df_sw = pd.read_csv(self.file_path, sep=';', header=None)
df_sw.columns = ['stopword']
return(df_sw)
def load_nomes(self):
df_nome = pd.read_csv(self.file_path_nomes, sep=';')
return(df_nome)
def load_cidades(self):
df_cidades = pd.read_csv(self.file_path_cidades, sep=';')
return(df_cidades)
def load_estados(self):
df_estados = pd.read_csv(self.file_path_estados, sep=';')
return(df_estados)
def del_stopwords(self, text, stopwords=True):
output = list()
text = self.del_punck(text)
text = text.lower()
for word in text.split(' '):
if stopwords:
if len(word) > 3:
result = ''.join([str(x) for x in self.df_stopwords[self.df_stopwords['stopword'] == word]['stopword']])
if len(result) == 0:
output.append(word)
else:
output.append(word)
return(output)
def del_punck(self, text):
punck = ",.;/<>:?[]{}+_)(*&$#@!)1234567890\n\t\r"
for c in punck:
text = text.replace(c,'')
text = text.replace('"', '')
return(text)
def get_lema(self, text, lemmatizer=True):
output = list()
for word in text:
if lemmatizer:
w_lema = ''.join([self.df_lema[self.df_lema['lema'] == word]['word'].unique()][0])
if len(w_lema) == 0:
output.append(word)
else:
output.append(w_lema)
else:
output.append(word)
return(output)
def build_tf(self, df, stopwords=True, lemmatizer=True, silence=False):
frame_tfidf = pd.DataFrame()
if silence:
for i in range(df.shape[0]):
frame_aux = pd.DataFrame()
line = ''.join(df.loc[i])
text = self.del_stopwords(line, stopwords=stopwords)
text = self.get_lema(text, lemmatizer=lemmatizer)
frame_aux['word'] = text
frame_aux['doc'] = 'doc-' + str(i)
frame_tfidf = frame_tfidf.append(frame_aux)
else:
for i in tqdm(range(df.shape[0])):
frame_aux = pd.DataFrame()
line = ''.join(df.loc[i])
text = self.del_stopwords(line, stopwords=stopwords)
text = self.get_lema(text, lemmatizer=lemmatizer)
frame_aux['word'] = text
frame_aux['doc'] = 'doc-' + str(i)
frame_tfidf = frame_tfidf.append(frame_aux)
frame_tfidf['count'] = 1
return(frame_tfidf[['doc','word','count']])
def build_tf_idf(self, stopwords=True, lemmatizer=True, silence=False):
df = self.colection.copy()
f = self.build_tf(df, stopwords=stopwords, lemmatizer=lemmatizer, silence=silence)
n = df.shape[0]
f = f.groupby(by=['doc','word']).count().reset_index()
f.rename(columns={'count':'f'},inplace=True)
f['tf'] = 1+ np.log2(f['f'])
f['idf'] = 0
idf = f.groupby(by=['word']).count().reset_index()[['word','tf']]
idf.rename(columns={'tf':'idf'}, inplace=True)
idf['log'] = np.log2(1+ (n/idf['idf']))
if silence:
for i in range(f.shape[0]):
w = ''.join(f.loc[i:i,'word'])
f.loc[i:i,'idf'] = float(idf[idf['word'] == w]['log'])
else:
for i in tqdm(range(f.shape[0])):
w = ''.join(f.loc[i:i,'word'])
f.loc[i:i,'idf'] = float(idf[idf['word'] == w]['log'])
f['tf_idf'] = f['tf'] * f['idf']
self.tfidf = f.copy()
self.set_sign()
def set_sign(self):
self.tfidf['sign'] = ''
for i in range(self.tfidf.shape[0]):
word = self.tfidf.loc[i,'word']
p = self.positive_words[self.positive_words['word'] == word]
n = self.negative_words[self.negative_words['word'] == word]
if len(p) == 0 and len(n) > 0:
self.tfidf.loc[i,'sign'] = 'negative'
elif len(p) == 0 and len(n) == 0:
self.tfidf.loc[i,'sign'] = 'neutral'
elif len(p) > 0 and len(n) == 0:
self.tfidf.loc[i,'sign'] = 'positive'
elif len(p) > 0 and len(n) > 0:
self.tfidf.loc[i,'sign'] = 'ambiguous'
def sentimental_graf(self, rotate=False):
bar = pd.DataFrame(self.tfidf['sign'].value_counts()).reset_index()
bar.columns = ['Sentimental','frequency']
if rotate:
img = sns.barplot(y='Sentimental', x='frequency', data=bar)
else:
img = sns.barplot(x='Sentimental', y='frequency', data=bar)
return(img)
def sentimental_table(self):
bar = pd.DataFrame(self.tfidf['sign'].value_counts()).reset_index()
bar.columns = ['Sentimental','frequency']
return(bar)
def word_cloud(self, picture='none'):
resource_package = __name__
file = '/'.join(('config', 'cloud.jpeg'))
file_path = pkg_resources.resource_filename(resource_package, file)
if picture == 'none':
mask = np.array(Image.open(file_path))
else:
mask = np.array(Image.open(picture))
tuples = [tuple(x) for x in self.tfidf[['word','tf_idf']].values]
wc = WordCloud(background_color="white", max_words=1000, mask=mask).generate_from_frequencies(frequencies=dict(tuples))
plt.figure(figsize=(15,15))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
return(plt)
def look2word(self, wsearch):
output = pd.DataFrame({'index': [],'word': []})
for i in range(self.colection.shape[0]):
line = self.del_punck(self.colection.loc[i,'word'])
for word in line.split(' '):
if word == wsearch:
output = output.append(pd.DataFrame({'index':[int(i)],'word':[line]}))
break
output['index'] = output['index'].apply(lambda x: int(x))
output = output.set_index('index')
return(output)
def load_class_words(self):
df_lema = pd.read_csv(self.file_path_cw, sep=';')
return(df_lema)
def set_class(self, word='none', wclass='none', force=False):
word = word.lower()
wclass = wclass.lower()
exist = ''.join(self.df_wc[self.df_wc['word'] == word]['class'])
save = False
if exist == '-':
self.df_wc.loc[self.df_wc['word'] == word,'class'] = wclass
save = True
elif force:
self.df_wc.loc[self.df_wc['word'] == word,'class'] = wclass
save = True
else:
print('Word ' + word + ' has a class named ' + wclass + ' you must use force=True to change the class')
if save:
self.df_wc.to_csv(self.file_path_cw, sep=';', index=False)
def get_class(self, word='none'):
word = word.lower()
return(''.join(self.df_wc[self.df_wc['word'] == word]['class']))
def load_pdf(self, file, silence=False):
if not silence:
print('Reading PDF file ' + file)
text = extract_text(file)
text_line = text.split('\n')
doc = ''
if silence:
for line in text_line:
if len(line) > 0:
doc = doc + line + ' '
else:
for line in tqdm(text_line):
if len(line) > 0:
doc = doc + line + ' '
return(doc)
def find_cities(self, city):
result = int(self.colection['word'].str.find(city))
return('Substring ' + city + ' found at index: ' + str(result))
def load_colection(self, dir, type='pdf', silence=False):
files = [x for x in os.listdir(dir) if x.endswith("." + type)]
if len(files) == 0:
raise TypeError("File type " + type + " not found")
if silence:
for file in files:
if type == 'pdf':
df = self.load_pdf(os.path.join(dir, file),silence=silence)
elif type == 'txt':
f = open(file, "r")
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
f.close()
else:
raise TypeError("File type " + type + " not permited")
df = pd.DataFrame([df])
df.columns = ['word']
self.colection = self.colection.append(df)
else:
for file in tqdm(files):
if type == 'pdf':
df = self.load_pdf(os.path.join(dir, file),silence=silence)
elif type == 'txt':
f = open(file, "r")
df = f.read()
df = pd.DataFrame(df.split('\n'))
df.columns = ['word']
f.close()
else:
raise TypeError("File type " + type + " not permited")
df = pd.DataFrame([df])
df.columns = ['word']
self.colection = self.colection.append(df)
def load_class_colection(self, dir='none', silence=False):
if dir == 'none':
raise TypeError("Directory not valid")
classes = [x for x in os.listdir(dir)]
if len(classes) == 0:
raise TypeError("Directory of classes is empty")
self.colection = | pd.DataFrame() | pandas.DataFrame |
"""
Программа создает файлы-исходники в папке it\Иван\ИВАН\НовыйАвтомат(НК/ВБ/МАЙ)\Исходники(НК/ВБ/МАЙ)CRM , необходимые для автоматов Ивана.
Логика основанана на сверке данных из JSON застройщика, где содержатся свободные квартиры, и "Эталонных выгрузок", в которых содержатся данные по всем квартирам вообще
При выводе новой секции/корпуса необходимо внести данные в эталонные выгрузки.
Для котроля работы можно раскомментировать закомментированные строки кода, которые создают промежуточные .csv таблицы в директории проекта
Точка роста - ликивдация необходимости ссылаться на Эталонные выгрузки, как вариант ссылаться на автомат Ивана.
"""
import requests
import json
import pandas as pd
def get_json():
url = 'http://incrm.ru/export-tred/ExportToSite.svc/ExportToTf/json' #адрес JSON выгрузки из CRM застройщика
r = requests.get(url) #заправшиваем адрес
json_data=json.loads(r.text) #получаем выгрузку в виде текста
data_frame = pd.DataFrame.from_records(json_data,columns = ["ArticleID", "Article", "Number", "StatusCode", "StatusCodeName", "Quantity", "Rooms", "Sum",
"Finishing", "Decoration", "SeparateEntrance","RoofExit","2level","TerrasesCount"]) #загружаем выгрузку в DataFrame
return data_frame #возвращаем выгрузку в виде DataFrame
def maintain_df(data_frame,zhk):
data_frame = data_frame.rename(
columns={'Article': 'Код объекта', 'Number': 'Номер квартиры', 'StatusCodeName': 'Статус',
'Quantity': 'Площадь',
'Sum': 'Цена', 'Decoration': 'Отделка'}) #переименовываем в DataFrame названия столбцов на русские
data_frame = data_frame.drop(
columns=['ArticleID', 'StatusCode', 'Finishing', 'SeparateEntrance', 'RoofExit', '2level',
'TerrasesCount']) #Выкидываем ненужные столбцы
data_frame['Rooms'].replace({'0': 'СТ', '1': '1К', '2': '2К', '3': '3К', '4': '4К'}, inplace=True)
data_frame['Цена'] = data_frame['Цена'].astype(float)
data_frame['Площадь'] = data_frame['Площадь'].astype(float) # Приводим данные в формат десятичных дробей
data_frame['Цена за метр'] = data_frame['Цена'] / data_frame['Площадь'] # Высчитываем цену за метр
data_frame = data_frame.replace(
{'без отделки': 0, 'б/о с перегородками': 0, 'НЕКОНДИЦИЯ': 0, 'Классика': 2, 'МОДЕРН': 2, 'СОЧИ': 2,
'Финишная отделка': 2, 'ч/о без перегородок': 1, 'черновая': 1, 'чистовая': 2, 'чистовая (светлая)': 2,
'чистовая (темная)': 2, 'ЯЛТА': 2, 'Без отделки':0, 'Модерн':2, 'Сочи':2, 'Ялта':2, 'Чистовая':2, 'Черновая':1,
'без отделки (old)':0, 'Венеция':2,'венеция':2,'ВЕНЕЦИЯ':2})
data_frame = data_frame.assign(service=data_frame['Код объекта'].str.get(0))
data_frame = data_frame[data_frame['service'] == zhk]
#data_frame.to_csv(zhk + 'МАЙ.csv', sep=';', encoding='cp1251', decimal=',', float_format='%.2f', index=False)
data_frame = data_frame[data_frame['Код объекта'].apply(lambda x : 'F' not in x)]
data_frame.to_csv('DF' + '.csv', sep=';', encoding='cp1251', decimal=',', float_format='%.2f', index=False)
for i in range(len(data_frame)):
if (data_frame.iloc[i, data_frame.columns.get_loc('service')] == 'Н'):
try:
data_frame.iloc[i, data_frame.columns.get_loc('Код объекта')] = 'Новокрасково, корп. %d, кв.%d' % (int(data_frame.iloc[i, 0][3:4]), int(data_frame.iloc[i, 0][14:]))
except:
pass
elif (data_frame.iloc[i, data_frame.columns.get_loc('service')] == 'В'):
if (data_frame.iloc[i, 0][:6] == 'ВБ-5,1' or data_frame.iloc[i, 0][:6] == 'ВБ-5,2'):
data_frame.iloc[i, 0] = 'Видный берег, корп. 5, кв.%d' % (int(data_frame.iloc[i, 0][16:]))
else:
data_frame.iloc[i, 0] = 'Видный берег, корп. %d, кв.%d' % (int(data_frame.iloc[i, 0][4:5]), int(data_frame.iloc[i, 0][15:]))
elif (data_frame.iloc[i, data_frame.columns.get_loc('service')] == 'М'):
if (data_frame.iloc[i, 0][4:6] == '01' or data_frame.iloc[i, 0][4:6] == '02' or data_frame.iloc[i, 0][4:6] == '05' or data_frame.iloc[i, 0][
4:6] == '06' or
data_frame.iloc[i, 0][4:6] == '09'):
data_frame.iloc[i, 0] = 'Май, корп. %d, кв.%d' % (int(data_frame.iloc[i, 0][4:6]), int(data_frame.iloc[i, 0][16:]))
else:
data_frame.iloc[i, 0] = 'МАЙ, корп. %d, кв.%d' % (int(data_frame.iloc[i, 0][4:6]), int(data_frame.iloc[i, 0][16:]))
#data_frame.to_csv(zhk + '.csv', sep=';', encoding='cp1251', decimal=',', float_format='%.2f', index=False)
return data_frame
def merge(data_frame, file):
path = 'C:\\Python\\for_automat\\'
prev_df = | pd.read_csv(path+'_'+file+'.csv',sep=';', encoding='cp1251',engine='python', index_col=False) | pandas.read_csv |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for schemas."""
# pytype: skip-file
from __future__ import absolute_import
import typing
import unittest
import future.tests.base # pylint: disable=unused-import
import numpy as np
# patches unittest.testcase to be python3 compatible
import pandas as pd
from parameterized import parameterized
from past.builtins import unicode
import apache_beam as beam
from apache_beam.coders import RowCoder
from apache_beam.coders.typecoders import registry as coders_registry
from apache_beam.dataframe import schemas
from apache_beam.dataframe import transforms
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
Simple = typing.NamedTuple(
'Simple', [('name', unicode), ('id', int), ('height', float)])
coders_registry.register_coder(Simple, RowCoder)
Animal = typing.NamedTuple(
'Animal', [('animal', unicode), ('max_speed', typing.Optional[float])])
coders_registry.register_coder(Animal, RowCoder)
def matches_df(expected):
def check_df_pcoll_equal(actual):
actual = pd.concat(actual)
sorted_actual = actual.sort_values(by=list(actual.columns)).reset_index(
drop=True)
sorted_expected = expected.sort_values(
by=list(expected.columns)).reset_index(drop=True)
if not sorted_actual.equals(sorted_expected):
raise AssertionError(
'Dataframes not equal: \n\nActual:\n%s\n\nExpected:\n%s' %
(sorted_actual, sorted_expected))
return check_df_pcoll_equal
# Test data for all supported types that can be easily tested.
# Excludes bytes because it's difficult to create a series and dataframe bytes
# dtype. For example:
# pd.Series([b'abc'], dtype=bytes).dtype != 'S'
# pd.Series([b'abc'], dtype=bytes).astype(bytes).dtype == 'S'
COLUMNS = [
([375, 24, 0, 10, 16], np.int32, 'i32'),
([375, 24, 0, 10, 16], np.int64, 'i64'),
([375, 24, None, 10, 16], pd.Int32Dtype(), 'i32_nullable'),
([375, 24, None, 10, 16], pd.Int64Dtype(), 'i64_nullable'),
([375., 24., None, 10., 16.], np.float64, 'f64'),
([375., 24., None, 10., 16.], np.float32, 'f32'),
([True, False, True, True, False], np.bool, 'bool'),
(['Falcon', 'Ostrich', None, 3.14, 0], np.object, 'any'),
([True, False, True, None, False], pd.BooleanDtype(), 'bool_nullable'),
(['Falcon', 'Ostrich', None, 'Aardvark', 'Elephant'],
pd.StringDtype(),
'strdtype'),
] # type: typing.List[typing.Tuple[typing.List[typing.Any], typing.Any, str]]
NICE_TYPES_DF = pd.DataFrame(columns=[name for _, _, name in COLUMNS])
for arr, dtype, name in COLUMNS:
NICE_TYPES_DF[name] = pd.Series(arr, dtype=dtype, name=name).astype(dtype)
NICE_TYPES_PROXY = NICE_TYPES_DF[:0]
SERIES_TESTS = [( | pd.Series(arr, dtype=dtype, name=name) | pandas.Series |
# -*- coding: utf-8 -*-
import pytest
import pandas as pd
from numpy import nan, float64
from jqfactor_analyzer.prepare import get_clean_factor_and_forward_returns
from jqfactor_analyzer.performance import (
factor_information_coefficient,
factor_autocorrelation,
mean_information_coefficient,
quantile_turnover,
factor_returns, factor_alpha_beta,
average_cumulative_return_by_quantile
)
from jqfactor_analyzer.utils import get_forward_returns_columns
dr = pd.date_range(start='2015-1-1', end='2015-1-2')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=[[1, 2, 3, 4],
[4, 3, 2, 1]]).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor.name = 'factor'
factor_data = pd.DataFrame()
factor_data['factor'] = factor
factor_data['group'] = pd.Series(index=factor.index,
data=[1, 1, 2, 2, 1, 1, 2, 2],)
factor_data['weights'] = pd.Series(range(8), index=factor.index,
dtype=float64) + 1
@pytest.mark.parametrize(
('factor_data', 'forward_returns', 'group_adjust',
'by_group', 'expected_ix', 'expected_ic_val'),
[(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, dr, [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], True, True,
pd.MultiIndex.from_product([dr, [1, 2]], names=['date', 'group']),
[1., 1., 1., 1.])]
)
def test_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = factor_information_coefficient(factor_data=factor_data,
group_adjust=group_adjust,
by_group=by_group)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1'], dtype='object'),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df)
@pytest.mark.parametrize(
(
'factor_data', 'forward_returns', 'group_adjust',
'by_group', 'by_time', 'expected_ix', 'expected_ic_val'
), [
(factor_data, [4, 3, 2, 1, 1, 2, 3, 4], False, False, 'D',
dr, [-1., -1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, False, 'W',
pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'), [1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, None,
pd.Int64Index([1, 2], name='group'), [1., 1.]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], False, True, 'W',
pd.MultiIndex.from_product(
[pd.DatetimeIndex(['2015-01-04'], name='date', freq='W-SUN'),
[1, 2]],
names=['date', 'group']
),
[1., 1.])
]
)
def test_mean_information_coefficient(factor_data,
forward_returns,
group_adjust,
by_group,
by_time,
expected_ix,
expected_ic_val):
factor_data = factor_data.copy()
factor_data['period_1'] = pd.Series(index=factor_data.index,
data=forward_returns)
ic = mean_information_coefficient(factor_data,
group_adjust=group_adjust,
by_group=by_group,
by_time=by_time)
expected_ic_df = pd.DataFrame(index=expected_ix,
columns=pd.Index(['period_1']),
data=expected_ic_val)
pd.testing.assert_frame_equal(ic, expected_ic_df,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize(
('quantile_values', 'test_quantile', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
4.0,
[nan, 1.0, 1.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
3.0,
[nan, 0.0, 0.0, 0.0]),
([[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0]],
2.0,
[nan, 1.0, 1.0, 1.0])]
)
def test_quantile_turnover(quantile_values, test_quantile,
expected_vals):
dr = pd.date_range(start='2015-1-1', end='2015-1-4')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
quantized_test_factor = pd.Series(
pd.DataFrame(index=dr, columns=tickers, data=quantile_values).stack()
)
quantized_test_factor.index = quantized_test_factor.index.set_names(
['date', 'asset']
)
to = quantile_turnover(quantized_test_factor, test_quantile)
expected = pd.Series(
index=quantized_test_factor.index.levels[0], data=expected_vals)
expected.name = test_quantile
pd.testing.assert_series_equal(to, expected)
@pytest.mark.parametrize(
('factor_data', 'factor_vals', 'fwd_return_vals',
'group_adjust', 'expected_vals'),
[(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [-1.25000, -1.25000]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
False, [0.0, 0.0]),
(factor_data, [1, 2, 3, 4, 4, 3, 2, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [-0.5, -0.5]),
(factor_data, [1, 2, 3, 4, 1, 2, 3, 4], [1, 4, 1, 2, 1, 2, 2, 1],
True, [1.0, 0.0]),
(factor_data, [1, 1, 1, 1, 1, 1, 1, 1], [4, 3, 2, 1, 1, 2, 3, 4],
True, [0.0, 0.0])]
)
def test_factor_returns(factor_data,
factor_vals,
fwd_return_vals,
group_adjust,
expected_vals):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
factor_data['factor'] = factor_vals
factor_returns_s = factor_returns(factor_data=factor_data,
demeaned=True,
group_adjust=group_adjust)
expected = pd.DataFrame(
index=dr,
data=expected_vals,
columns=get_forward_returns_columns(factor_data.columns)
)
pd.testing.assert_frame_equal(factor_returns_s, expected)
@pytest.mark.parametrize(
('factor_data', 'fwd_return_vals', 'alpha', 'beta'),
[(factor_data, [1, 2, 3, 4, 1, 1, 1, 1], -1, 5. / 6.)]
)
def test_factor_alpha_beta(factor_data, fwd_return_vals, alpha, beta):
factor_data = factor_data.copy()
factor_data['period_1'] = fwd_return_vals
ab = factor_alpha_beta(factor_data=factor_data)
expected = pd.DataFrame(columns=['period_1'],
index=['Ann. alpha', 'beta'],
data=[alpha, beta])
pd.testing.assert_frame_equal(ab, expected)
@pytest.mark.parametrize(
('factor_values', 'end_date', 'period', 'expected_vals'),
[([[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, 1.0, 1.0, 1.0]),
([[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0]],
'2015-1-4', 1,
[nan, -1.0, -1.0, -1.0]),
([[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0],
[1.0, 2.0, 3.0, 4.0],
[2.0, 1.0, 4.0, 3.0],
[2.0, 1.0, 4.0, 3.0],
[4.0, 3.0, 2.0, 1.0]],
'2015-1-12', 3,
[nan, nan, nan, 1.0, 1.0, 1.0, 0.6, -0.6, -1.0, 1.0, -0.6, -1.0])]
)
def test_factor_autocorrelation(factor_values,
end_date,
period,
expected_vals):
dr = pd.date_range(start='2015-1-1', end=end_date)
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
factor = pd.DataFrame(index=dr,
columns=tickers,
data=factor_values).stack()
factor.index = factor.index.set_names(['date', 'asset'])
factor_df = pd.DataFrame()
factor_df['factor'] = factor
fa = factor_autocorrelation(factor_df, period)
expected = pd.Series(index=dr, data=expected_vals)
expected.name = period
pd.testing.assert_series_equal(fa, expected)
@pytest.mark.parametrize(
('before', 'after', 'demeaned', 'quantiles', 'expected_vals'),
[(1, 2, False, 4,
[[1.00, 0.0, -0.50, -0.75],
[0.0, 0.0, 0.0, 0.0],
[0.00, 0.00, 0.00, 0.00],
[0.0, 0.0, 0.0, 0.0],
[-0.20, 0.0, 0.25, 0.5625],
[0.0, 0.0, 0.0, 0.0],
[-0.3333333, 0.0, 0.50, 1.25],
[0.0, 0.0, 0.0, 0.0]]),
(1, 2, True, 4,
[[0.8833333, 0.0, -0.5625, -1.015625],
[0.0, 0.0, 0.0, 0.0],
[-0.1166667, 0.0, -0.0625, -0.265625],
[0.0, 0.0, 0.0, 0.0],
[-0.3166667, 0.0, 0.1875, 0.296875],
[0.0, 0.0, 0.0, 0.0],
[-0.4500000, 0.0, 0.4375, 0.984375],
[0.0, 0.0, 0.0, 0.0]]),
(3, 0, False, 4,
[[7.0, 3.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.488, -0.36, -0.2, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-0.703704, -0.55555555, -0.333333333, 0.0],
[0.0, 0.0, 0.0, 0.0]]),
(0, 3, True, 4,
[[0.0, -0.5625, -1.015625, -1.488281],
[0.0, 0.0, 0.0, 0.0],
[0.0, -0.0625, -0.265625, -0.613281],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.1875, 0.296875, 0.339844],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.4375, 0.984375, 1.761719],
[0.0, 0.0, 0.0, 0.0]]),
(3, 3, False, 2,
[[3.5, 1.5, 0.5, 0.0, -0.25, -0.375, -0.4375],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.595852, -0.457778, -0.266667, 0.0, 0.375, 0.90625, 1.664062],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
(3, 3, True, 2,
[[2.047926, 0.978888, 0.383333, 0.0, -0.3125, -0.640625, -1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-2.047926, -0.978888, -0.383333, 0.0, 0.3125, 0.640625, 1.050781],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])]
)
def test_average_cumulative_return_by_quantile(before, after,
demeaned, quantiles,
expected_vals):
dr = pd.date_range(start='2015-1-15', end='2015-2-1')
dr.name = 'date'
tickers = ['A', 'B', 'C', 'D']
r1, r2, r3, r4 = (1.25, 1.50, 1.00, 0.50)
data = [[r1**i, r2**i, r3**i, r4**i] for i in range(1, 19)]
prices = pd.DataFrame(index=dr, columns=tickers, data=data)
dr2 = pd.date_range(start='2015-1-21', end='2015-1-26')
dr2.name = 'date'
factor = pd.DataFrame(
index=dr2, columns=tickers, data=[
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1],
[3, 4, 2, 1]]).stack()
factor_data = get_clean_factor_and_forward_returns(
factor, prices, quantiles=quantiles, periods=range(0, after + 1)
)
avgrt = average_cumulative_return_by_quantile(
factor_data, prices, before, after, demeaned)
arrays = []
for q in range(1, quantiles + 1):
arrays.append((q, 'mean'))
arrays.append((q, 'std'))
index = pd.MultiIndex.from_tuples(arrays, names=['factor_quantile', None])
expected = pd.DataFrame(
index=index, columns=range(-before, after + 1), data=expected_vals)
| pd.testing.assert_frame_equal(avgrt, expected) | pandas.testing.assert_frame_equal |
"""Provides helper functions for reading input data and configuration files.
The default configuration values are provided in aneris.RC_DEFAULTS.
"""
from collections import abc
import os
import yaml
import pandas as pd
from aneris.utils import isstr, isnum, iamc_idx
RC_DEFAULTS = """
config:
default_luc_method: reduce_ratio_2150_cov
default_ratio_method: reduce_ratio_2080
default_offset_method: reduce_offset_2080
cov_threshold: 20
harmonize_year: 2015
global_harmonization_only: false
replace_suffix: Harmonized-DB
prefix: CEDS+|9+ Sectors
suffix: Unharmonized
add_5regions: true
"""
def _read_data(indfs):
datakeys = sorted([x for x in indfs if x.startswith('data')])
df = pd.concat([indfs[k] for k in datakeys])
# don't know why reading from excel changes dtype and column types
# but I have to reset them manually
df.columns = df.columns.astype(str)
numcols = [x for x in df.columns if isnum(x)]
df[numcols] = df[numcols].astype(float)
# some teams also don't provide standardized column names and styles
df.columns = df.columns.str.capitalize()
return df
def _recursive_update(d, u):
for k, v in u.items():
if isinstance(v, abc.Mapping):
r = _recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def pd_read(f, str_cols=False, *args, **kwargs):
"""Try to read a file with pandas, supports CSV and XLSX
Parameters
----------
f : string
the file to read in
str_cols : bool, optional
turn all columns into strings (numerical column names are sometimes
read in as numerical dtypes)
args, kwargs : sent directly to the Pandas read function
Returns
-------
df : pd.DataFrame
"""
if f.endswith('csv'):
df = pd.read_csv(f, *args, **kwargs)
else:
df = pd.read_excel(f, *args, **kwargs)
if str_cols:
df.columns = [str(x) for x in df.columns]
return df
def pd_write(df, f, *args, **kwargs):
"""Try to write a file with pandas, supports CSV and XLSX"""
# guess whether to use index, unless we're told otherwise
index = kwargs.pop('index', isinstance(df.index, pd.MultiIndex))
if f.endswith('csv'):
df.to_csv(f, index=index, *args, **kwargs)
else:
writer = | pd.ExcelWriter(f, engine='xlsxwriter') | pandas.ExcelWriter |
import os
import unittest
import pandas as pd
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVC
from sklearn_pandas import DataFrameMapper
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LinearRegression,LogisticRegression
from nyoka import skl_to_pmml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('svm',SVC())
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,"svc_pmml.pmml")
self.assertEqual(os.path.isfile("svc_pmml.pmml"),True)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = | pd.DataFrame(iris.data,columns=iris.feature_names) | pandas.DataFrame |
#import AYS_Environment as ays_env
import c_global.cG_LAGTPKS_Environment as c_global
import numpy as np
import pandas as pd
import sys,os
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
pars=dict( Sigma = 1.5 * 1e8,
Cstar=5500,
a0=0.03,
aT=3.2*1e3,
l0=26.4,
lT=1.1*1e6,
delta=0.01,
m=1.5,
g=0.02,
p=0.04,
Wp=2000,
q0=20,
qP=0.,
b=5.4*1e-7,
yE=120,
wL=0.,
eB=4*1e10,
eF=4*1e10,
i=0.25,
k0=0.1,
aY=0.,
aB=1.5e4,
aF=2.7e5,
aR=9e-15,
sS=1./50.,
sR=1.,
ren_sub=.5,
carbon_tax=.5 ,
i_DG=0.1,
L0=0.3*2480
)
ics=dict( L=2480.,
A=830.0,
G=1125,
T=5.053333333333333e-6,
P=6e9,
K=5e13,
S=5e11
)
dt=1
reward_type='PB'
my_Env=c_global.cG_LAGTPKS_Environment(dt=dt,pars=pars, reward_type=reward_type, ics=ics, plot_progress=True)
def read_trajectories(learner_type, reward_type, basin, policy='epsilon_greedy', episode=0):
runs=[]
# 0_path_[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]_episode0 limit=150
limit=150
parameters=['time','L', 'A', 'G', 'T', 'P', 'K', 'S' , 'action' , 'Reward' ]
for i in range(limit):
file_name=('./'+learner_type+'/' + policy +'/' +reward_type + '/DQN_Path/' +
basin+ '/' + str(i)+'_path_[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]_episode' + str(episode)+'.txt')
if os.path.isfile(file_name):
tmp_file= pd.read_csv(file_name, sep='\s+' ,header=None, names=parameters, skiprows=2, index_col=False) # Skiprow=2, since we calculate derived variables first!
runs.append(tmp_file)
# print(file_name)
# For not too many files
if len(runs) > 100:
break
print(len(runs))
return runs
def get_LAGTPKS(learning_progress):
time=learning_progress['time']
L=learning_progress['L']
A=learning_progress['A']
G=learning_progress['G']
T=learning_progress['T']
P=learning_progress['P']
K=learning_progress['K']
S=learning_progress['S']
actions= learning_progress['action']
return time, L,A,G,T,P,K,S,actions
def management_distribution_part(learning_progress_arr, savepath, start_time=0, end_time=20, only_long_times=False):
tot_my_actions=pd.DataFrame(columns=['action'])
for learning_progress in learning_progress_arr:
time, L_comp, A_comp, G_comp, T_comp, P_comp, K_comp, S_comp, actions = get_LAGTPKS(learning_progress)
end_time_simulation=time.iloc[-1]
if only_long_times:
if end_time_simulation >100:
print(end_time_simulation)
my_actions= | pd.DataFrame(actions[start_time:end_time]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Utility functions for model generation
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import logging
import math
import numpy as np
import pandas as pd
from datetime import datetime
# -- Private Imports
# -- Globals
logger = logging.getLogger(__name__)
dict_wday_name = {
0: 'W-MON',
1: 'W-TUE',
2: 'W-WED',
3: 'W-THU',
4: 'W-FRI',
5: 'W-SAT',
6: 'W-SUN',
}
# -- Exception classes
# -- Functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def array_transpose(a):
"""
Transpose a 1-D numpy array
:param a: An array with shape (n,)
:type a: numpy.Array
:return: The original array, with shape (n,1)
:rtype: numpy.Array
"""
return a[np.newaxis, :].T
# TODO: rework to support model composition
def model_requires_scaling(model):
"""
Given a :py:class:`anticipy.forecast_models.ForecastModel`
return True if the function requires scaling a_x
:param model: A get_model_<modeltype> function from
:py:mod:`anticipy.model.periodic_models` or
:py:mod:`anticipy.model.aperiodic_models`
:type model: function
:return: True if function is logistic or sigmoidal
:rtype: bool
"""
requires_scaling = model is not None and model.name in [
'logistic',
'sigmoid'
]
return requires_scaling
def apply_a_x_scaling(a_x, model=None, scaling_factor=100.0):
"""
Modify a_x for forecast_models that require it
:param a_x: x axis of time series
:type a_x: numpy array
:param model: a :py:class:`anticipy.forecast_models.ForecastModel`
:type model: function or None
:param scaling_factor: Value used for scaling t_values for logistic models
:type scaling_factor: float
:return: a_x with scaling applied, if required
:rtype: numpy array
"""
if model_requires_scaling(model): # todo: check that this is still useful
a_x = a_x / scaling_factor
return a_x
dict_freq_units_per_year = dict(
A=1.0,
Y=1.0,
D=365.0,
W=52.0,
M=12,
Q=4,
H=24 * 365.0
)
dict_dateoffset_input = dict(
Y='years',
A='years',
M='months',
W='weeks',
D='days',
H='hours'
)
def get_normalized_x_from_date(s_date):
"""Get column of days since Monday of first date"""
date_start = s_date.iloc[0]
# Convert to Monday
date_start = date_start - pd.to_timedelta(date_start.weekday(), unit='D')
s_x = (s_date - date_start).dt.days
return s_x
def get_s_x_extrapolate(
date_start_actuals,
date_end_actuals,
model=None,
freq=None,
extrapolate_years=2.5,
scaling_factor=100.0,
x_start_actuals=0.):
"""
Return a_x series with DateTimeIndex, covering the date range for the
actuals, plus a forecast period.
:param date_start_actuals: date or numeric index for first actuals sample
:type date_start_actuals: str, datetime, int or float
:param date_end_actuals: date or numeric index for last actuals sample
:type date_end_actuals: str, datetime, int or float
:param extrapolate_years:
:type extrapolate_years: float
:param model:
:type model: function
:param freq: Time unit between samples. Supported units are 'W' for weekly
samples, or 'D' for daily samples. (untested) Any date unit or time
unit accepted by numpy should also work, see
https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.datetime.html#arrays-dtypes-dateunits # noqa
:type freq: basestring
:param shifted_origin: Offset to apply to a_x
:type shifted_origin: int
:param scaling_factor: Value used for scaling a_x for certain model
functions
:type scaling_factor: float
:param x_start_actuals: numeric index for the first actuals sample
:type x_start_actuals: int
:return: Series of floats with DateTimeIndex. To be used as (a_date, a_x)
input for a model function.
:rtype: pandas.Series
The returned series covers the actuals time domain plus a forecast period
lasting extrapolate_years, in years.
The number of additional samples for the forecast period is
time_resolution * extrapolate_years, rounded down
"""
if isinstance(date_start_actuals, str) or \
isinstance(date_start_actuals, datetime): # Use dates if available
date_start_actuals = pd.to_datetime(date_start_actuals)
date_end_actuals = pd.to_datetime(date_end_actuals)
weekday_adjustment = date_start_actuals.weekday()
expected_freq = dict_wday_name.get(weekday_adjustment)
if freq is None: # Default frequency
freq = expected_freq
else:
if freq.startswith('W'):
assert expected_freq == freq, \
'Error: with weekly frequency, freq ' \
'parameter must match weekday of date_start_actuals:' \
' {} - {} , {}' \
.format(freq, expected_freq, date_start_actuals)
freq_short = freq[0:1] # Changes e.g. W-MON to W
# freq_units_per_year = 52.0 if freq_short=='W' else 365.0
# Todo: change to dict to support more frequencies
freq_units_per_year = dict_freq_units_per_year.get(freq_short, 365.0)
extrapolate_units = extrapolate_years * freq_units_per_year
offset_input = {dict_dateoffset_input.get(freq_short):
extrapolate_units}
date_end_forecast = date_end_actuals + \
pd.DateOffset(**offset_input)
i_date = pd.date_range(
date_start_actuals,
date_end_forecast,
freq=freq,
name='date')
s_date = pd.Series(i_date)
# Get days passed since date_start, then add x_start_actuals
s_x = (s_date - date_start_actuals).dt.days + x_start_actuals
s_x.index = i_date
else:
# Otherwise, use numeric index
# we extrapolate future samples equal to 100*extrapolate_years
index = pd.Index(
np.arange(
date_start_actuals,
date_end_actuals +
100 *
extrapolate_years))
s_x = pd.Series(
index=index,
data=np.arange(
x_start_actuals,
x_start_actuals + index.size)) + x_start_actuals
if model_requires_scaling(model):
s_x = s_x / scaling_factor
return s_x
# Forecast Selection Functions
def get_aic_c(fit_error, n, n_params):
"""
This function implements the corrected Akaike Information Criterion (AICc)
taking as input a given fit error and data/model degrees of freedom.
We assume that the residuals of the candidate model are distributed
according to independent identical normal distributions with zero mean.
Hence, we can use define the AICc as
.. math::
AICc = AIC + \\frac{2k(k+1)}{n-k-1} =
2k + n \\log\\left(\\frac{E}{n}\\right) + \\frac{2k(k+1)}{n-k-1},
where :math:`k` and :math:`n` denotes the model and data degrees of
freedom respectively, and :math:`E`
denotes the residual error of the fit.
:param fit_error: Residual error of the fit
:type fit_error: float
:param n: Data degrees of freedom
:type n: int
:param n_params: Model degrees of freedom
:type n_params: int
:return: Corrected Akaike Information Criterion (AICc)
:rtype: float
Note:
- see AIC in `Wikipedia article on the AIC
<https://en.wikipedia.org/wiki/Akaike_information_criterion>`_.
"""
# First, deal with corner cases that can blow things up with division by
# zero
if (n <= n_params + 1) or (n == 0):
aux = n - n_params - 1
raise ValueError(
'ERROR: Time series too short for AIC_C: (n = ' +
str(n) +
', n - n_params - 1 = ' +
str(aux) +
')')
elif fit_error == 0.0:
if n_params == 1:
aicc = -float("inf")
else:
# This can lead to suboptimal model selection when we have
# multiple perfect fits - we use a patch instead
# aicc = -float("inf")
fit_error = 10 ** -320
aicc = n * math.log(fit_error / n) + 2 * n_params + \
(2 * n_params * (n_params + 1) / (n - n_params - 1))
else:
# Actual calculation of the AICc
aicc = n * math.log(fit_error / n) + 2 * n_params + \
(2 * n_params * (n_params + 1) / (n - n_params - 1))
return aicc
def get_s_aic_c_best_result_key(s_aic_c):
# Required because aic_c can be -inf, that value is not compatible with
# pd.Series.argmin()
if s_aic_c.empty or s_aic_c.isnull().all():
return None
if (s_aic_c.values == -np.inf).any():
(key_best_result,) = (s_aic_c == -np.inf).to_numpy().nonzero()[0]
key_best_result = s_aic_c.index[key_best_result.min()]
else:
key_best_result = s_aic_c.argmin()
return key_best_result
def detect_freq(a_date):
if isinstance(a_date, pd.DataFrame):
if 'date' not in a_date.columns:
return None
else:
a_date = a_date.date
s_date = pd.Series(a_date).sort_values().drop_duplicates()
min_date_delta = s_date.diff().min()
if pd.isnull(min_date_delta):
return None
elif min_date_delta == pd.Timedelta(1, unit='h'):
return 'H'
elif min_date_delta == pd.Timedelta(7, unit='D'):
# Weekly seasonality - need to determine day of week
min_date_wday = s_date.min().weekday()
return dict_wday_name.get(min_date_wday, 'W')
elif min_date_delta >= pd.Timedelta(28, unit='d') and \
min_date_delta <= pd.Timedelta(31, unit='d'):
# MS is month start, M is month end. We use MS if all dates match first
# of month
if s_date.dt.day.max() == 1:
return 'MS'
else:
return 'M'
elif min_date_delta >= pd.Timedelta(89, unit='d') and \
min_date_delta <= pd.Timedelta(92, unit='d'):
return 'Q'
elif min_date_delta >= pd.Timedelta(365, unit='d') and \
min_date_delta <= pd.Timedelta(366, unit='d'):
# YS is month start, Y is month end. We use MS if all dates match first
# of month
if s_date.dt.day.max() == 1 and s_date.dt.month.max() == 1:
return 'YS'
else:
return 'Y'
elif min_date_delta >= | pd.Timedelta(23, unit='h') | pandas.Timedelta |
""":func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
import struct
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.computation.common import NameResolutionError
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError
def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None,
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(gbls=global_dict, lcls=local_dict, level=level,
resolvers=resolvers, target=target)
def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys):
"""Make sure that variables in resolvers don't overlap with locals or
globals.
"""
res_locals = list(com.intersection(resolver_keys, local_keys))
if res_locals:
msg = "resolvers and locals overlap on names {0}".format(res_locals)
raise NameResolutionError(msg)
res_globals = list(com.intersection(resolver_keys, global_keys))
if res_globals:
msg = "resolvers and globals overlap on names {0}".format(res_globals)
raise NameResolutionError(msg)
def _replacer(x, pad_size):
"""Replace a number with its padded hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin).replace('0x', '').rjust(pad_size, '0')
def _raw_hex_id(obj, pad_size=2):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(_replacer(x, pad_size) for x in packed)
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
added by pandas.
Parameters
----------
gbls : dict or None, optional, default None
lcls : dict or Scope or None, optional, default None
level : int, optional, default 1
resolvers : list-like or None, optional, default None
Attributes
----------
globals : dict
locals : dict
level : int
resolvers : tuple
resolver_keys : frozenset
"""
__slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers',
'resolver_keys', '_resolver', 'level', 'ntemps', 'target')
def __init__(self, gbls=None, lcls=None, level=1, resolvers=None,
target=None):
self.level = level
self.resolvers = tuple(resolvers or [])
self.globals = dict()
self.locals = dict()
self.target = target
self.ntemps = 1 # number of temporary variables in this scope
if isinstance(lcls, Scope):
ld, lcls = lcls, dict()
self.locals.update(ld.locals.copy())
self.globals.update(ld.globals.copy())
self.resolvers += ld.resolvers
if ld.target is not None:
self.target = ld.target
self.update(ld.level)
frame = sys._getframe(level)
try:
self.globals.update(gbls or frame.f_globals)
self.locals.update(lcls or frame.f_locals)
finally:
del frame
# add some useful defaults
self.globals['Timestamp'] = pd.lib.Timestamp
self.globals['datetime'] = datetime
# SUCH a hack
self.globals['True'] = True
self.globals['False'] = False
# function defs
self.globals['list'] = list
self.globals['tuple'] = tuple
res_keys = (list(o.keys()) for o in self.resolvers)
self.resolver_keys = frozenset(reduce(operator.add, res_keys, []))
self._global_resolvers = self.resolvers + (self.locals, self.globals)
self._resolver = None
self.resolver_dict = {}
for o in self.resolvers:
self.resolver_dict.update(dict(o))
def __unicode__(self):
return com.pprint_thing(
'locals: {0}\nglobals: {0}\nresolvers: '
'{0}\ntarget: {0}'.format(list(self.locals.keys()),
list(self.globals.keys()),
list(self.resolver_keys),
self.target))
def __getitem__(self, key):
return self.resolve(key, globally=False)
def resolve(self, key, globally=False):
resolvers = self.locals, self.globals
if globally:
resolvers = self._global_resolvers
for resolver in resolvers:
try:
return resolver[key]
except KeyError:
pass
def update(self, level=None):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
# we are always 2 levels below the caller
# plus the caller may be below the env level
# in which case we need addtl levels
sl = 2
if level is not None:
sl += level
# add sl frames to the scope starting with the
# most distant and overwritting with more current
# makes sure that we can capture variable scope
frame = inspect.currentframe()
try:
frames = []
while sl >= 0:
frame = frame.f_back
sl -= 1
if frame is None:
break
frames.append(frame)
for f in frames[::-1]:
self.locals.update(f.f_locals)
self.globals.update(f.f_globals)
finally:
del frame, frames
def add_tmp(self, value, where='locals'):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
where : basestring, optional, default 'locals', {'locals', 'globals'}
What scope to add the value to.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot add value to non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot add value to object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
name = 'tmp_var_{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
d[name] = value
# only increment if the variable gets put in the scope
self.ntemps += 1
return name
def remove_tmp(self, name, where='locals'):
d = getattr(self, where, None)
if d is None:
raise AttributeError("Cannot remove value from non-existent scope "
"{0!r}".format(where))
if not isinstance(d, dict):
raise TypeError("Cannot remove value from object of type {0!r}, "
"scope must be a dictionary"
"".format(type(d).__name__))
del d[name]
self.ntemps -= 1
def _rewrite_assign(source):
"""Rewrite the assignment operator for PyTables expression that want to use
``=`` as a substitute for ``==``.
"""
res = []
g = tokenize.generate_tokens(StringIO(source).readline)
for toknum, tokval, _, _, _ in g:
res.append((toknum, '==' if tokval == '=' else tokval))
return tokenize.untokenize(res)
def _replace_booleans(source):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
"""
return source.replace('|', ' or ').replace('&', ' and ')
def _replace_locals(source, local_symbol='@'):
"""Replace local variables with a syntacticall valid name."""
return source.replace(local_symbol, _LOCAL_TAG)
def _preparse(source):
"""Compose assignment and boolean replacement."""
return _replace_booleans(_rewrite_assign(source))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
elif not isinstance(node, ast.AST):
raise TypeError("Cannot visit objects of type {0!r}"
"".format(node.__class__.__name__))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
self.env.remove_tmp(right.name)
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
self.env.remove_tmp(left.name)
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
return lambda lhs, rhs: Div(lhs, rhs,
truediv=self.env.locals['truediv'])
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e).value for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
res = self.visit(node.func)
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args = args + self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
_python_not_supported = frozenset(['Dict', 'Call', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=lambda x: _replace_locals(_replace_booleans(x))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=2):
self.expr = expr
self.env = _ensure_scope(level=level, local_dict=env)
self.engine = engine
self.parser = parser
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
self.truediv = truediv
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
self.env.locals['truediv'] = self.truediv
return self.terms(self.env)
def __unicode__(self):
return | com.pprint_thing(self.terms) | pandas.core.common.pprint_thing |
import os
import glob
import psycopg2
import pandas as pd
from sql_queries import *
def process_song_file(cur, filepath):
"""Reads songs log file row by row, selects needed fields and inserts them into song and artist tables.
Parameters:
cur (psycopg2.cursor()): Cursor of the sparkifydb database
filepath (str): Filepath of the file to be analyzed
"""
# open song file
df = | pd.read_json(filepath, lines=True) | pandas.read_json |
#!/usr/bin/env python
import sys
import PySimpleGUI as sg
import pandas as pd
import numpy as np
from icon import icon
def file_picker():
"""shows a file picker for selecting a postQC.tsv file. Returns None on Cancel."""
chooser = sg.Window('Choose file', [
[sg.Text('Filename')],
[sg.Input(), sg.FileBrowse(key='-FILE-', file_types=(('PostQC TSV files', '*.postQC.tsv'),))],
[sg.OK(), sg.Cancel()]], icon=icon)
event, values = chooser.read()
if event in (None, 'Cancel'):
chooser.close()
return None
elif event == 'OK':
chooser.close()
return values['-FILE-']
def postqc_window(uid_groups, avail_groups):
"""main interface. uid_groups is a list of [UID, Group] combinations.
avail_groups is a list of the available groups. returns the main window object."""
table_height = min(25, len(uid_groups))
mgmt_layout = [[sg.B('Add New Group', key='Add')],
[sg.B('Assign Seedling to Group', key='Change'),
sg.B('Exclude Seedling from Analysis', key='Exclude')]]
layout = [
[sg.Table(values=uid_groups, headings=['UID', 'Group'], display_row_numbers=False,
auto_size_columns=True, num_rows=table_height, key="-COMBOS-"),
sg.Table(values=avail_groups, headings=['Available groups', ], display_row_numbers=False,
auto_size_columns=True, num_rows=table_height, key="-GROUPS-",
select_mode=sg.TABLE_SELECT_MODE_BROWSE)],
[sg.Frame('Seedling and Group Management', layout=mgmt_layout)],
[sg.Sizer(h_pixels=120), sg.B('Write PostQC File', key='Write'), sg.B('Exit')]]
return sg.Window('SPIRO Assay Customizer', layout, grab_anywhere=False, icon=icon)
def get_uid_groups(df):
"""gets the unique uids and groups in the specified dataframe. returns a
tuple of uid/group combos (list) and the unique groups (list)."""
uids = | pd.unique(df['UID']) | pandas.unique |
from flask import *
from flask_cors import CORS,cross_origin
import warnings
import os
import dash
import plotly.express as px
from flask import Flask, render_template #this has changed
import plotly.graph_objs as go
import numpy as np
import dash_core_components as dcc
import uuid
from werkzeug.utils import secure_filename
import tensorflow as tf
from keras.preprocessing import image
import pickle
import matplotlib.pyplot as plt
import pandas as pd
import requests
from bs4 import BeautifulSoup
url = "https://www.mygov.in/covid-19/"
path_Model = "./static/model/vgg19_pneumonia.h5"
model = tf.keras.models.load_model(path_Model,compile = True)
# model.load_weights("./static/model/vgg19_pneumonia_weights.hdf5")
path_model_Classifier = "./static/model/neuralNet_Covid_Classifier.sav"
# path_Model_BrainTumor = "./static/model/inception_braintumor.h5"
path_Model_BrainTumor = "./static/model/inception_braintumor1.h5"
path_Model_Covid_CT = "./static/model/vgg_ct.h5"
path_Model_Covid_CXRAY = "./static/model/vgg19_covid_chest.h5"
model_Classifer = pickle.load(open(path_model_Classifier,"rb"))
modelBrainTumor = tf.keras.models.load_model(path_Model_BrainTumor,compile = True)
modelCovid_CT = tf.keras.models.load_model(path_Model_Covid_CT,compile = True)
modelCovid_CXRAY = tf.keras.models.load_model(path_Model_Covid_CXRAY,compile = True)
# model_Classifer.load_weights("./static/model/neuralNet_Covid_Classifier_Weight.hdf5")
warnings.filterwarnings("ignore")
UPLOAD_FOLDER = './static/uploads/'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
examples = os.listdir(os.path.join(app.config['UPLOAD_FOLDER'], 'examples'))
@app.route('/')
def home():
context = web_scrap_and_return_data()
return render_template('index.html', page='home',context = context)
@app.route('/about')
def about():
return render_template('about.html', page='about')
@app.route('/detectB')
def detectB():
return render_template('detectB.html', page='detectB')
@app.route('/detectP')
def detectP():
return render_template('detectP.html', page='detectP')
@app.route('/detectC')
def detectC():
return render_template('detectC1.html', page='detectC')
@app.route('/symptoms')
def symptoms():
return render_template('symptoms.html', page='symptoms')
@app.route('/prevention')
def prevention():
return render_template('prevention.html', page='prevention')
@app.route('/predict', methods=['GET', 'POST'])
def predict1():
if request.method == 'POST':
if 'xrayimage' not in request.files:
return redirect(request.url)
file = request.files['xrayimage']
if file.filename == '':
return redirect(request.url)
if file and check_file_ext(file.filename):
filename = secure_filename(file.filename)
file_ext = os.path.splitext(filename)[1]
filename = str(uuid.uuid4()) + file_ext
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
res,prob = predict(filepath)
return render_template('predict.html', image=filename, result=res, prob = prob)
else:
ex_file = request.args.get('example', default=examples[0], type=str)
if ex_file in examples:
pass
# res = predict(os.path.join(app.config['UPLOAD_FOLDER'], 'examples', ex_file))
# return render_template('predict.html', image=os.path.join('examples', ex_file), result=res)
return redirect('/')
@app.route('/predictCovidTesting', methods=['GET', 'POST'])
def predictCovidTesting():
content = {'state': 'Andhra Pradesh', 'gender': 'male', 'age': '33', 'fever': 'Yes', 'cough': 'Yes', 'fatigue': 'Yes', 'ncongestion': 'Yes', 'pains': 'Yes', 'sbreadth': 'Yes', 'vomiting': 'Yes', 'Diarrhea': 'Yes', 'chills': 'Yes', 'rnose': 'Yes', 'sthroat': 'Yes', 'Headache': 'Yes', 'typeimage': 'XRAY', 'lives_in_affected_area': 1}
return render_template('predictCovidTesting.html', image="filename", result="REEESSIII", prob = 1, image_inp = "CT-SCAN", content = [content])
@app.route('/predictCovid', methods=['GET', 'POST'])
def predictCovid():
if request.method == 'POST':
if 'image' not in request.files:
return redirect(request.url)
file = request.files['image']
if file.filename == '':
return redirect(request.url)
if file and check_file_ext(file.filename):
filename = secure_filename(file.filename)
file_ext = os.path.splitext(filename)[1]
filename = str(uuid.uuid4()) + file_ext
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
content ={
"state" : request.form.get("state"),
"gender" : request.form.get("gender"),
"age" : request.form.get("age"),
"fever" : request.form.get("fever"),
"cough" : request.form.get("cough"),
"fatigue" : request.form.get("fatigue"),
"ncongestion" : request.form.get("ncongestion"),
"pains" : request.form.get("pains"),
"sbreadth" : request.form.get("sbreadth"),
"vomiting" : request.form.get("vomiting"),
"Diarrhea" : request.form.get("Diarrhea"),
"chills" : request.form.get("chills"),
"rnose" : request.form.get("rnose"),
"sthroat" : request.form.get("sthroat"),
"Headache" : request.form.get("Headache"),
"typeimage" : request.form.get("typeimage"),
"lives_in_affected_area" : lives_Cal(request.form.get("state"))}
# print(content)
cc = prepare_C(content)
# print(cc)
result = (model_Classifer.predict_proba([cc]))
# result = round(result[0][1]*100,2)
labels = ['NORMAL','COVID-19']
# return make_response(jsonify(return_content), 200)
res,prob = predictCovidddd(filepath,content['typeimage'])
lab = np.argmax(result[0])
return render_template('predictCovid.html', image=filename, result=res, prob = prob, image_inp = "X-RAY Scan" if content['typeimage'] == "XRAY" else "CT-SCAN",
result2= labels[lab], prob2 = result[0][lab],content = [content])
else:
ex_file = request.args.get('example', default=examples[0], type=str)
if ex_file in examples:
pass
# res = predict(os.path.join(app.config['UPLOAD_FOLDER'], 'examples', ex_file))
# return render_template('predict.html', image=os.path.join('examples', ex_file), result=res)
return redirect('/')
def predictCovidddd(filename,typeimage):
disease_class=['Covid-19','NORMAL']
if typeimage == "XRAY":
custom = modelCovid_CXRAY.predict(prepare(filename))
print("XX")
else:
custom = modelCovid_CT.predict(prepare(filename))
print("CT")
a=custom[0]
# strr = disease_class[1 if a>0.6 else 0]+" with Probability of Pneumonia Being : "+str(a)
# return strr
cla = np.argmax(a)
print(a)
return disease_class[cla], str(a[cla])
# Other functions
def prepare_C(content):
ccc = []
ccc.append(int(content['age']))
ccc.append(1 if content['gender']=='male' else 0)
ccc.append(1 if content['fever']=='Yes' else 0)
ccc.append(int(1 if content['cough']=='Yes' else 0))
ccc.append(int(1 if content['fatigue']=='Yes' else 0))
ccc.append(int(1 if content['pains']=='Yes' else 0))
ccc.append(int(1 if content['ncongestion']=='Yes' else 0))
ccc.append(int(1 if content['sbreadth']=='Yes' else 0))
ccc.append(int(1 if content['rnose']=='Yes' else 0))
ccc.append(int(1 if content['sthroat']=='Yes' else 0))
ccc.append(int(1 if content['Diarrhea']=='Yes' else 0))
ccc.append(int(1 if content['chills']=='Yes' else 0))
ccc.append(int(1 if content['Headache']=='Yes' else 0))
ccc.append(int(1 if content['vomiting']=='Yes' else 0))
ccc.append(int(content['lives_in_affected_area']))
return ccc
def lives_Cal(state):
return 1
def prepare(path):
show_img=image.load_img(path, grayscale=False, target_size=(224, 224))
x = image.img_to_array(show_img)
x = np.expand_dims(x, axis = 0)
x /= 255
plt.imshow(show_img)
plt.show()
return x
def prepareForB(path):
show_img=image.load_img(path, grayscale=True, target_size=(150, 150))
x = image.img_to_array(show_img)
x = np.expand_dims(x, axis = 0)
x /= 255
plt.imshow(show_img)
plt.show()
return x
def my_figure():
india_states = json.load(open("static/states_india.geojson", "r"))
state_id_map = {}
for feature in india_states["features"]:
feature["id"] = feature["properties"]["state_code"]
state_id_map[feature["properties"]["st_nm"]] = feature["id"]
data_scraped = web_scrap_and_return_data()
df = | pd.DataFrame.from_dict(data_scraped['data']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from logistic_regression import LogisticRegression
def Cal_accuracy(predictions, y):
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
return accuracy
def Call_myLRmodel(X, y):
theta = np.zeros([1,X.shape[1]])
model = LogisticRegression(theta, regularization=1)
result = model.optimize(X, y)
cost = model.cost(result[0], X, y)
print(result, cost)
predictions = model.predict(X)
accuracy = Cal_accuracy(predictions, y)
return cost, accuracy
def Call_SklearnLR(X, y):
model = linear_model.LogisticRegression(penalty='l2', C=1.0)
model.fit(X, y.ravel())
score = model.score(X, y.ravel())
predictions = model.predict(X)
accuracy = Cal_accuracy(predictions, y)
return score, accuracy
if __name__ == '__main__':
path = 'ex2data2.txt'
data = pd.read_csv(path, header=None, names=['Test 1', 'Test 2', 'Accepted'])
print(data.head())
'''
# base on the data to choose the model fo Logistic Regression
positive = data[data['Accepted'].isin([1])]
negative = data[data['Accepted'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['Test 1'], positive['Test 2'], s=50, c='b', marker='o', label='Accepted')
ax.scatter(negative['Test 1'], negative['Test 2'], s=50, c='r', marker='x', label='Rejected')
ax.legend()
ax.set_xlabel('Test 1 Score')
ax.set_ylabel('Test 2 Score')
plt.show()
'''
# According to the original data, a linear decision boundary cannot be found, so a polynomial is introduced.
# Constructing polynomial features from raw data
degree = 5
x1 = data['Test 1']
x2 = data['Test 2']
data.insert(3, 'Ones', 1)
for i in range(1, degree):
for j in range(0, i):
data['F' + str(i) + str(j)] = np.power(x1, i-j) * np.power(x2, j)
data.drop('Test 1', axis=1, inplace=True)
data.drop('Test 2', axis=1, inplace=True)
print(data.head())
# initial training data
# set X and y (remember from above that we moved the label to column 0)
cols = data.shape[1]
X = data.iloc[:,1:]
y = data.iloc[:,0:1]
# convert to numpy arrays and initalize the parameter array theta
X = np.array(X.values)
y = np.array(y.values)
score_1, accuracy_1 = Call_myLRmodel(X, y)
score_2, accuracy_2 = Call_SklearnLR(X, y)
dict = [{'Score':score_1, 'Accuracy':accuracy_1},
{'Score':score_2, 'Accuracy':accuracy_2}]
df = | pd.DataFrame(dict) | pandas.DataFrame |
import pandas as pd
import numpy as np
import re as re
from base import Feature, get_arguments, generate_features
Feature.dir = 'features'
# """sample usage
# """
# class Pclass(Feature):
# def create_features(self):
# self.train['Pclass'] = train['Pclass']
# self.test['Pclass'] = test['Pclass']
class Year(Feature):
def create_features(self):
self.train["year"] = pd.to_datetime(train["publishedAt"]).dt.year
self.test["year"] = pd.to_datetime(test["publishedAt"]).dt.year
class Month(Feature):
def create_features(self):
self.train["month"] = pd.to_datetime(train["publishedAt"]).dt.month
self.test["month"] = | pd.to_datetime(test["publishedAt"]) | pandas.to_datetime |
# -----------------------------------------------------------------------------
'''A Feature Module of classes and functions related to stress distributions.'''
# Case() : A collection of LaminateModel objects
# Cases() : A collection of Cases
# flake8 distributions.py --ignore E265,E501,N802,N806
import os
import importlib
import logging
import tempfile
import traceback
import warnings
import collections as ct
import itertools as it
import pandas as pd
import matplotlib as mpl
mpl.use('Agg') # required to prevent DISPLAY error; must be before pyplot (REF 050)
import matplotlib.pyplot as plt
import lamana as la
from lamana.input_ import BaseDefaults
from lamana.utils import tools as ut
from lamana.lt_exceptions import ModelError
bdft = BaseDefaults()
# =============================================================================
# FEATUREINPUT ----------------------------------------------------------------
# =============================================================================
# Builds FeatureInput objects & makes calls for building LaminateModel objects.
class Case(object):
'''Build a Case object that handles User Input parameters.
Attributes
----------
materials
{middle, inner, outer, total}
total_{middle, inner, inner_i, outer}
snapshots
frames
LMs
size
load_params : dict; default None
A dict of common loading parameters, sample and support radii, etc.
mat_props : dict; default None
A dict of materials and properties, i.e. elastic modulus and Poisson's ratio.
parameters : Series
Converted load_params to pandas object; used for quick display.
properties : DataFrame
Converted mat_props to pandas object; used for quick display.
Geometries : list of Geometry objects
A container for multiple Geometry objects.
model : str
Specified custom, laminate theory model.
LaminateModels : list of DataFrames
Each DataFrame represents a laminate stack containing calculations
from the applied laminate theory model.
p : int
Datapoints per lamina. Although available in FeatureInput, this
attribute is mainly is used for quick access in string representations.
Methods
-------
apply(geo_strings=None, model='Wilson_LT', unique=False)
Return `LaminateModel` and `FeatureInput` objects by iterating geometry
strings. Accept user geometries and selected model.
plot(**kwargs)
Return matplotlib plots given laminate DataFrames.
to_csv(**kwargs)
Write all LaminateModels and FeatureInputs to separate files.
to_xlsx(**kwargs)
Write all LaminateModels and FeatureInputs to one file.
Raises
------
TypeError
If load_params or mat_props is None; needs a dict and nested dict
respectively.
Notes
-----
See the "Getting Started" documentation for a sample loading configuration
diagram.
'''
# Automated Parameters
'''Rename args to load_params and mat_props.'''
# TODO: remove materials kwarg (?)
def __init__(self, load_params=None, mat_props=None, materials=None):
# Default Parameters
if load_params is not None:
self.load_params = load_params
else:
raise TypeError('Expected a dict of loading parameter values.')
# Material Properties and Materials Order List
if mat_props is not None:
self.mat_props = bdft._convert_material_parameters(mat_props)
else:
raise TypeError('Expected a nested dict of material properties.')
self._materials = bdft.get_materials(self.mat_props)
# Display pandas Views
self.parameters = | pd.Series(self.load_params) | pandas.Series |
import time
import datetime
start_time = time.time()
date = str(datetime.datetime.now().strftime(format='%m%d'))
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
# from sklearn import pipeline, model_selection
from sklearn import pipeline, grid_search
# from sklearn.feature_extraction import DictVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import FeatureUnion
from sklearn.decomposition import TruncatedSVD
# from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import mean_squared_error, make_scorer
from sklearn import decomposition
# from nltk.metrics import edit_distance
from nltk.stem.porter import *
from nltk.stem.snowball import SnowballStemmer
#stemmer = PorterStemmer()
# from nltk.stem.snowball import SnowballStemmer #0.003 improvement but takes twice as long as PorterStemmer
stemmer = SnowballStemmer('english')
import re
# import enchant
import random
random.seed(2016)
hd_train = pd.read_csv('input/train.csv', encoding="ISO-8859-1")#[:100] #update here
hd_test = pd.read_csv('input/test.csv', encoding="ISO-8859-1")#[:100] #update here
hd_pro_desc = pd.read_csv('input/product_descriptions.csv')#[:100] #update here
hd_attr = pd.read_csv('input/attributes.csv')
hd_brand = hd_attr[hd_attr.name == "MFG Brand Name"][["product_uid", "value"]].rename(columns={"value": "brand"})
num_train = hd_train.shape[0]
all_details = pd.concat((hd_train, hd_test), axis=0, ignore_index=True)
all_details = | pd.merge(all_details, hd_pro_desc, how='left', on='product_uid') | pandas.merge |
from typing import List
from bs4 import BeautifulSoup
from pandas.core.frame import DataFrame
import requests
import pandas as pd
import numpy as np
import json
# Scraping Target api to create a database
stores = np.arange(0000, 4000, 1).tolist()
stores = [str(store).zfill(4) for store in stores]
# stores = ['2185', '0874'] #list for isolated store numbers
def get_store(store_keys: List):
ef = pd.DataFrame()
for store in store_keys:
ef=ef
print(store) # Used as a status to monitor where code is at while running
keys = f"https://redsky.target.com/v3/stores/location/{store}?key=8df66ea1e1fc070a6ea99e942431c9cd67a80f02"
result = requests.get(keys)
if result.status_code == 404: continue # Handling 404 pages
content = result.text
soup = BeautifulSoup(content, 'lxml',)
data_json = soup.find('p').getText()
nested_data = json.loads(data_json)
# Turning nested data into a dot notation
def nested(nested_data, prefix='Object'):
new_json=[]
def dotJson(nested_json, prefix="Object"):
if isinstance(nested_json, dict):
for kwargs, json in nested_json.items():
p = "{}.{}".format(prefix, kwargs)
dotJson(json, p)
elif isinstance(nested_json, list):
for kwargs, json in enumerate(nested_json):
kwargs = ''
p = "{}{}".format(prefix, kwargs)
dotJson(json, p)
else:
new_json.append(['{}'.format(prefix), nested_json])
return new_json
dotJson(nested_data, prefix)
return new_json
dot_data = nested(nested_data, prefix='Store')
# Creating the dataframe from the scraped data
df = | pd.DataFrame(dot_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Automated test suites with unittest
run "python -m unittest -v test" in the module directory to run the tests
The clock decorator in utils will measure the run time of the test
"""
#########################################################
# Import Packages and helpers
#########################################################
import unittest
# internal helpers
from decam.utils import *
from decam.modeling_helpers import DataCleaner
import pandas as pd
import numpy as np
flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
cserie = lambda serie: list(serie[serie].index)
#flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x]
#########################################################
# Writing the tests
#########################################################
class TestDataCleaner(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_dc = DataCleaner(data = create_test_df())
@clock
def test_cserie(self):
char_var = cserie(self._test_dc.data.dtypes == "object")
self.assertIsInstance(char_var,list)
self.assertIn('character_variable',char_var)
@clock
def test_removena_numpy(self):
test_array = np.array([np.nan,1,2,np.nan])
self.assertTrue((removena_numpy(test_array) == np.array([1,2])).all())
@clock
def test_sample_df(self):
self.assertEqual(len(self._test_dc.sample_df(pct=0.061)),
0.061 * float(self._test_dc.data.shape[0]))
@clock
def test_nrow(self):
self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0])
@clock
def test_col(self):
self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1])
@clock
def test_nacolcount_capture_na(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0)
self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7)
@clock
def test_nacolcount_is_type_dataframe(self):
self.assertIsInstance(self._test_dc.nacolcount(), pd.core.frame.DataFrame)
@clock
def test_narowcount_capture_na(self):
narowcount = self._test_dc.narowcount()
self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow)
@clock
def test_narowcount_is_type_dataframe(self):
narowcount = self._test_dc.narowcount()
self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_manymissing_capture(self):
manymissing = self._test_dc.manymissing(0.7)
self.assertIsInstance(manymissing,list)
self.assertIn('many_missing_70', manymissing)
self.assertIn('na_col', manymissing)
@clock
def test_constant_col_capture(self):
constantcol = self._test_dc.constantcol()
self.assertIsInstance(constantcol,list)
self.assertIn('constant_col', constantcol)
self.assertIn('constant_col_num', constantcol)
self.assertIn('na_col', constantcol)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique,pd.Series)
self.assertEqual(count_unique.id, 1000)
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.character_factor, 7)
@clock
def test_dfchar_check_col(self):
dfchar = self._test_dc._dfchar
self.assertIsInstance(dfchar, list)
self.assertNotIn('num_variable', dfchar)
self.assertIn('character_factor', dfchar)
self.assertIn('character_variable', dfchar)
self.assertNotIn('many_missing_70', dfchar)
@clock
def test_dfnum_check_col(self):
dfnum = self._test_dc._dfnum
self.assertIsInstance(dfnum, list)
self.assertIn('num_variable', dfnum)
self.assertNotIn('character_factor', dfnum)
self.assertNotIn('character_variable', dfnum)
self.assertIn('many_missing_70', dfnum)
@clock
def test_factors_check_col(self):
factors = self._test_dc.factors()
self.assertIsInstance(factors, list)
self.assertNotIn('num_factor', factors)
self.assertNotIn('character_variable', factors)
self.assertIn('character_factor', factors)
@clock
def test_detectkey_check_col(self):
detectkey = self._test_dc.detectkey()
self.assertIsInstance(detectkey,list)
self.assertIn('id', detectkey)
self.assertIn('member_id', detectkey)
@clock
def test_detectkey_check_col_dropna(self):
detectkeyna = self._test_dc.detectkey(dropna=True)
self.assertIn('id_na', detectkeyna)
self.assertIn('id', detectkeyna)
self.assertIn('member_id', detectkeyna)
@clock
def test_findupcol_check(self):
findupcol = self._test_dc.findupcol()
self.assertIn(['id', 'duplicated_column'], findupcol)
self.assertNotIn('member_id', flatten_list(findupcol))
@clock
def test_clean_df(self):
basic_cleaning = self._test_dc.basic_cleaning(drop_col='duplicated_column').columns
self.assertTrue(all([e not in basic_cleaning for e in ['constant_col',
'na_col', 'duplicated_column']]))
self.assertIn('id', basic_cleaning)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, len(self._test_dc.data.id))
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.num_factor, len(pd.unique(self._test_dc.data.num_factor)))
@clock
def test_structure(self):
structure = self._test_dc.structure()
self.assertIsInstance(structure, pd.DataFrame)
self.assertEqual(len(self._test_dc.data), structure.loc['na_col', 'nb_missing'])
self.assertEqual(len(self._test_dc.data), structure.loc['id', 'nb_unique_values'])
self.assertTrue(structure.loc['id', 'is_key'])
@clock
def test_nearzerovar(self):
nearzerovar = self._test_dc.nearzerovar(save_metrics=True)
self.assertIsInstance(nearzerovar, pd.DataFrame)
self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv))
self.assertIn('constant_col', cserie(nearzerovar.nzv))
self.assertIn('na_col', cserie(nearzerovar.nzv))
@clock
def test_fillna_serie(self):
test_char_variable = self._test_dc.fillna_serie(self._test_dc.data.character_variable_fillna)
test_num_variable = self._test_dc.fillna_serie(self._test_dc.data.numeric_variable_fillna)
self.assertTrue(test_char_variable.notnull().any())
self.assertTrue(test_num_variable.notnull().any())
self.assertTrue(( | pd.Series(['A']*300 + ['B']*200 + ['C']*200 +['A']*300) | pandas.Series |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------------
#Framework:
#1. In this framework, only set parameters then train model for you.
#2. Automatically recommend best models for you. Give you insights that what model
# is fitting your problem best.
#3. Give you predictions if you input dataset to be predicted.
#Version: 1.0.0
# Hard coded version. Run basic workflow of statistical modeling, including
# One-hot preprocessing, discretize by equal-width, CART, GBDT, precision, recall
# thresholds, PR curve with averaged precision, ROC with AUC
#Dependencies: pandas, numpy, sklearn, matplotlib
#------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
#User Input:
#1. Dataset: the full datasets with column names.
# 1.1. Data Source: Basically from CSV file, Need support json, xml, mysql etc, for
# mobile phone or server use.
#2. Features: a list of str , specifying the columns in the dataset
# 2.1. For better execution, it will need specification of feature data-type in next version.
#3. That's ALL!!! No need setup the models, the parameters, the evaluation metrices.
# Forget them!
#-----------------------------------------------------------------------------------------
import numpy as np
import pandas as pd
#from sklearn import linear_model
import matplotlib.pyplot as plt
import pipline.fitting as ft
import pipline.preprocessing as pp
from datetime import datetime
from sklearn.externals import joblib
class WellDefinedDataSets(object) :
def __init__(self, X_train, Y_train, X_test, Y_test) :
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.Y_test = Y_test
#params: a dict of all parameters
# minimal setting of params:
# 1. data desc: cat_features, con_features, label
# 2. model setup: n_disc,
def EasiML_Modeling(data_ori, params) :
frac = params["frac"]
data_0 = data_ori
tic = datetime.now()
X_norm, Y, onehot_names = pp.BuildFeatures(data_0, params)
toc = datetime.now()
elapsed_time = toc-tic
print("preprocessing elapsed time: " + str(elapsed_time))
dataset = | pd.concat([X_norm, Y], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import re
import marcformat
class MarcExtractor(object):
tag_marc_file = 'MARC_FILE'
tag_filter_columns = 'FILTER_COLUMNS'
tag_marc_output_file = 'MARC_OUTPUT_FILE'
marcFile = ''
marcOutFile = ''
filteredColumns = []
df = | pd.DataFrame() | pandas.DataFrame |
"""
GLM fitting utilities based on NeuroGLM by <NAME>, <NAME>:
https://github.com/pillowlab/neuroGLM
<NAME>
International Brain Lab, 2020
"""
from warnings import warn, catch_warnings
import numpy as np
from numpy.linalg.linalg import LinAlgError
import pandas as pd
from brainbox.processing import bincount2D
from sklearn.linear_model import PoissonRegressor
import scipy.sparse as sp
import numba as nb
from numpy.matlib import repmat
from scipy.optimize import minimize
from scipy.special import xlogy
from tqdm import tqdm
import torch
from brainbox.modeling.poissonGLM import PoissonGLM
class NeuralGLM:
"""
Generalized Linear Model which seeks to describe spiking activity as the output of a poisson
process. Uses sklearn's GLM methods under the hood while providing useful routines for dealing
with neural data
"""
def __init__(self, trialsdf, spk_times, spk_clu, vartypes,
train=0.8, blocktrain=False, binwidth=0.02, mintrials=100, subset=False):
"""
Construct GLM object using information about all trials, and the relevant spike times.
Only ingests data, and further object methods must be called to describe kernels, gain
terms, etc. as components of the model.
Parameters
----------
trialsdf: pandas.DataFrame
DataFrame of trials in which each row contains all desired covariates of the model.
e.g. contrast, stimulus type, etc. Not all columns will necessarily be fit.
If a continuous covariate (e.g. wheel position, pupil diameter) is included, each entry
of the column must be a nSamples x 2 array with samples in the first column and
timestamps (relative to trial start) in the second position.
*Must have \'trial_start\' and \'trial_end\' parameters which are times, in seconds.*
spk_times: numpy.array of floats
1-D array of times at which spiking events were detected, in seconds.
spk_clu: numpy.array of integers
1-D array of same shape as spk_times, with integer cluster IDs identifying which
cluster a spike time belonged to.
vartypes: dict
Dict with column names in trialsdf as keys, values are the type of covariate the column
contains. e.g. {'stimOn_times': 'timing', 'wheel', 'continuous', 'correct': 'value'}
Valid values are:
'timing' : A timestamp relative to trial start (e.g. stimulus onset)
'continuous' : A continuous covariate sampled throughout the trial (e.g. eye pos)
'value' : A single value for the given trial (e.g. contrast or difficulty)
train: float
Float in (0, 1] indicating proportion of data to use for training GLM vs testing
(using the NeuralGLM.score method). Trials to keep will be randomly sampled.
binwidth: float
Width, in seconds, of the bins which will be used to count spikes. Defaults to 20ms.
mintrials: int
Minimum number of trials in which neurons fired a spike in order to be fit. Defaults
to 100 trials.
subset: bool
Whether or not to perform model subsetting, in which the model is built iteratively
from only the mean rate, up. This allows comparison of D^2 scores for sub-models which
incorporate only some parameters, to see which regressors actually improve
explainability. Default to False.
Returns
-------
glm: object
GLM object with methods for adding regressors and fitting
"""
# Data checks #
if not all([name in vartypes for name in trialsdf.columns]):
raise KeyError("Some columns were not described in vartypes")
if not all([value in ('timing', 'continuous', 'value') for value in vartypes.values()]):
raise ValueError("Invalid values were passed in vartypes")
if not len(spk_times) == len(spk_clu):
raise IndexError("Spike times and cluster IDs are not same length")
if not isinstance(train, float) and not train == 1:
raise TypeError('train must be a float between 0 and 1')
if not ((train > 0) & (train <= 1)):
raise ValueError('train must be between 0 and 1')
# Filter out cells which don't meet the criteria for minimum spiking, while doing trial
# assignment
self.vartypes = vartypes
self.vartypes['duration'] = 'value'
trialsdf = trialsdf.copy() # Make sure we don't modify the original dataframe
clu_ids = np.unique(spk_clu)
trbounds = trialsdf[['trial_start', 'trial_end']] # Get the start/end of trials
# Initialize a Cells x Trials bool array to easily see how many trials a clu spiked
trialspiking = np.zeros((trialsdf.index.max() + 1, clu_ids.max() + 1), dtype=bool)
# Empty trial duration value to use later
trialsdf['duration'] = np.nan
# Iterate through each trial, and store the relevant spikes for that trial into a dict
# Along with the cluster labels. This makes binning spikes and accessing spikes easier.
spks = {}
clu = {}
st_endlast = 0
timingvars = [col for col in trialsdf.columns if vartypes[col] == 'timing']
for i, (start, end) in trbounds.iterrows():
if any(np.isnan((start, end))):
warn(f"NaN values found in trial start or end at trial number {i}. "
"Discarding trial.")
trialsdf.drop(i, inplace=True)
continue
st_startind = np.searchsorted(spk_times[st_endlast:], start) + st_endlast
st_endind = np.searchsorted(spk_times[st_endlast:], end, side='right') + st_endlast
st_endlast = st_endind
trial_clu = np.unique(spk_clu[st_startind:st_endind])
trialspiking[i, trial_clu] = True
spks[i] = spk_times[st_startind:st_endind] - start
clu[i] = spk_clu[st_startind:st_endind]
for col in timingvars:
trialsdf.at[i, col] = np.round(trialsdf.at[i, col] - start, decimals=5)
trialsdf.at[i, 'duration'] = end - start
# Break the data into test and train sections for cross-validation
if train == 1:
print('Training fraction set to 1. Training on all data.')
traininds = trialsdf.index
testinds = trialsdf.index
elif blocktrain:
trainlen = int(np.floor(len(trialsdf) * train))
traininds = trialsdf.index[:trainlen]
testinds = trialsdf.index[trainlen:]
else:
trainlen = int(np.floor(len(trialsdf) * train))
traininds = sorted(np.random.choice(trialsdf.index, trainlen, replace=False))
testinds = trialsdf.index[~trialsdf.index.isin(traininds)]
# Set model parameters to begin with
self.spikes = spks
self.clu = clu
self.clu_ids = np.argwhere(np.sum(trialspiking, axis=0) > mintrials)
self.binwidth = binwidth
self.covar = {}
self.trialsdf = trialsdf
self.traininds = traininds
self.testinds = testinds
self.compiled = False
self.subset = subset
if len(self.clu_ids) == 0:
raise UserWarning('No neuron fired a spike in a minimum number.')
# Bin spikes
self._bin_spike_trains()
return
def _bin_spike_trains(self):
"""
Bins spike times passed to class at instantiation. Will not bin spike trains which did
not meet the criteria for minimum number of spiking trials. Must be run before the
NeuralGLM.fit() method is called.
"""
spkarrs = []
arrdiffs = []
for i in self.trialsdf.index:
duration = self.trialsdf.loc[i, 'duration']
durmod = duration % self.binwidth
if durmod > (self.binwidth / 2):
duration = duration - (self.binwidth / 2)
if len(self.spikes[i]) == 0:
arr = np.zeros((self.binf(duration), len(self.clu_ids)))
spkarrs.append(arr)
continue
spks = self.spikes[i]
clu = self.clu[i]
arr = bincount2D(spks, clu,
xbin=self.binwidth, ybin=self.clu_ids, xlim=[0, duration])[0]
arrdiffs.append(arr.shape[1] - self.binf(duration))
spkarrs.append(arr.T)
y = np.vstack(spkarrs)
if hasattr(self, 'dm'):
assert y.shape[0] == self.dm.shape[0], "Oh shit. Indexing error."
self.binnedspikes = y
return
def add_covariate_timing(self, covlabel, eventname, bases,
offset=0, deltaval=None, cond=None, desc=''):
"""
Convenience wrapper for adding timing event regressors to the GLM. Automatically generates
a one-hot vector for each trial as the regressor and adds the appropriate data structure
to the model.
Parameters
----------
covlabel : str
Label which the covariate will use. Can be accessed via dot syntax of the instance
usually.
eventname : str
Label of the column in trialsdf which has the event timing for each trial.
bases : numpy.array
nTB x nB array, i.e. number of time bins for the bases functions by number of bases.
Each column in the array is used together to describe the response of a unit to that
timing event.
offset : float, seconds
Offset of bases functions relative to timing event. Negative values will ensure that
deltaval : None, str, or pandas series, optional
Values of the kronecker delta function peak used to encode the event. If a string, the
column in trialsdf with that label will be used. If a pandas series with indexes
matching trialsdf, corresponding elements of the series will be the delta funtion val.
If None (default) height is 1.
cond : None, list, or fun, optional
Condition which to apply this covariate. Can either be a list of trial indices, or a
function which takes in rows of the trialsdf and returns booleans.
desc : str, optional
Additional information about the covariate, if desired. by default ''
"""
if covlabel in self.covar:
raise AttributeError(f'Covariate {covlabel} already exists in model.')
if self.compiled:
warn('Design matrix was already compiled once. Be sure to compile again if adding'
' additional covariates.')
if deltaval is None:
gainmod = False
elif isinstance(deltaval, pd.Series):
gainmod = True
elif isinstance(deltaval, str) and deltaval in self.trialsdf.columns:
gainmod = True
deltaval = self.trialsdf[deltaval]
else:
raise TypeError(f'deltaval must be None or pandas series. {type(deltaval)} '
'was passed instead.')
if eventname not in self.vartypes:
raise ValueError('Event name specified not found in trialsdf')
elif self.vartypes[eventname] != 'timing':
raise TypeError(f'Column {eventname} in trialsdf is not registered as a timing')
vecsizes = self.trialsdf['duration'].apply(self.binf)
stiminds = self.trialsdf[eventname].apply(self.binf)
stimvecs = []
for i in self.trialsdf.index:
vec = np.zeros(vecsizes[i])
if gainmod:
vec[stiminds[i]] = deltaval[i]
else:
vec[stiminds[i]] = 1
stimvecs.append(vec.reshape(-1, 1))
regressor = | pd.Series(stimvecs, index=self.trialsdf.index) | pandas.Series |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import gzip
import os
import pathlib
import shutil
import tempfile
import unittest
import pandas as pd
from q2_cutadapt._demux import (_build_demux_command, _rename_files,
_write_barcode_fasta,
_write_empty_fastq_to_mux_barcode_in_seq_fmt)
from q2_types.multiplexed_sequences import (
MultiplexedSingleEndBarcodeInSequenceDirFmt,
MultiplexedPairedEndBarcodeInSequenceDirFmt)
from q2_types.per_sample_sequences import (
SingleLanePerSampleSingleEndFastqDirFmt,
SingleLanePerSamplePairedEndFastqDirFmt,
FastqGzFormat)
from qiime2 import Artifact, CategoricalMetadataColumn
from qiime2.util import redirected_stdio
from qiime2.plugin.testing import TestPluginBase
class TestDemuxSingle(TestPluginBase):
package = 'q2_cutadapt.tests'
def assert_demux_results(self, exp_samples_and_barcodes, obs_demuxed_art):
obs_demuxed = obs_demuxed_art.view(
SingleLanePerSampleSingleEndFastqDirFmt)
obs_demuxed_seqs = obs_demuxed.sequences.iter_views(FastqGzFormat)
zipped = zip(exp_samples_and_barcodes.iteritems(), obs_demuxed_seqs)
for (sample_id, barcode), (filename, _) in zipped:
filename = str(filename)
self.assertTrue(sample_id in filename)
self.assertTrue(barcode in filename)
def assert_untrimmed_results(self, exp, obs_untrimmed_art):
obs_untrimmed = obs_untrimmed_art.view(
MultiplexedSingleEndBarcodeInSequenceDirFmt)
obs_untrimmed = obs_untrimmed.file.view(FastqGzFormat)
obs_untrimmed = gzip.decompress(obs_untrimmed.path.read_bytes())
self.assertEqual(exp, obs_untrimmed)
def setUp(self):
super().setUp()
self.demux_single_fn = self.plugin.methods['demux_single']
muxed_sequences_fp = self.get_data_path('forward.fastq.gz')
self.muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
def test_typical(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_all_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_none_matched(self):
metadata = CategoricalMetadataColumn(
pd.Series(['TTTT'], name='Barcode',
index=pd.Index(['sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
with self.assertRaisesRegex(ValueError, 'demultiplexed'):
self.demux_single_fn(self.muxed_sequences, metadata)
def test_error_tolerance_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# sample_a is dropped because of a substitution error (AAAA vs AAAG)
exp_samples_and_barcodes = pd.Series(['CCCC'], index=['sample_b'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
self.assert_untrimmed_results(b'@id1\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id3\nAAAAACGTACGT\n+\nzzzzzzzzzzzz\n'
b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_error_tolerance_high_enough_to_prevent_filtering(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAG', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
error_rate=0.25)
# This test should yield the same results as test_typical, above
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_extra_barcode_in_metadata(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG', 'TTTT'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c',
'sample_d'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
# TTTT/sample_d shouldn't be in the demuxed results, because there
# were no reads with that barcode present
exp_samples_and_barcodes = pd.Series(['AAAA', 'CCCC', 'GGGG'],
index=['sample_a', 'sample_b',
'sample_c'])
self.assert_demux_results(exp_samples_and_barcodes, obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_variable_length_barcodes(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAAA', 'CCCCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
muxed_sequences_fp = self.get_data_path('variable_length.fastq.gz')
muxed_sequences = Artifact.import_data(
'MultiplexedSingleEndBarcodeInSequence', muxed_sequences_fp)
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(muxed_sequences, metadata)
# This test should yield the same results as test_typical, above, just
# with variable length barcodes
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
batch_size=1)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
self.assert_untrimmed_results(b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
obs_untrimmed_art)
def test_invalid_batch_size(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with self.assertRaisesRegex(ValueError, '5.*cannot be greater.*2'):
self.demux_single_fn(self.muxed_sequences, metadata, batch_size=5)
def test_batch_size_odd_number_of_samples(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC', 'GGGG'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata,
batch_size=2)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
# obs_untrimmed should be empty, since everything matched
self.assert_untrimmed_results(b'', obs_untrimmed_art)
def test_min_length(self):
metadata = CategoricalMetadataColumn(
# The third barcode is meant to completely remove the only GGGG
# coded sequence
pd.Series(['AAAA', 'CCCC', 'GGGGACGTACGT'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b', 'sample_c'],
name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_single_fn(self.muxed_sequences, metadata)
obs = obs_demuxed_art.view(SingleLanePerSampleSingleEndFastqDirFmt)
(obs_f1, _), (obs_f2, _) = obs.sequences.iter_views(FastqGzFormat)
self.assertEqual('sample_a_AAAA_L001_R1_001.fastq.gz', str(obs_f1))
self.assertEqual('sample_b_CCCC_L001_R1_001.fastq.gz', str(obs_f2))
class TestDemuxPaired(TestPluginBase):
package = 'q2_cutadapt.tests'
def assert_demux_results(self, exp_samples_and_barcodes, obs_demuxed_art):
obs_demuxed = obs_demuxed_art.view(
SingleLanePerSamplePairedEndFastqDirFmt)
obs_demuxed_seqs = obs_demuxed.sequences.iter_views(FastqGzFormat)
# Since we are working with fwd/rev reads, duplicate each list elem
exp = [x for x in exp_samples_and_barcodes.iteritems() for _ in (0, 1)]
zipped = zip(exp, obs_demuxed_seqs)
for (sample_id, barcode), (filename, _) in zipped:
filename = str(filename)
self.assertTrue(sample_id in filename)
self.assertTrue(barcode in filename)
def assert_untrimmed_results(self, exp, obs_untrimmed_art):
obs_untrimmed = obs_untrimmed_art.view(
MultiplexedPairedEndBarcodeInSequenceDirFmt)
obs_untrimmed_f = obs_untrimmed.forward_sequences.view(FastqGzFormat)
obs_untrimmed_f = gzip.decompress(obs_untrimmed_f.path.read_bytes())
self.assertEqual(exp[0], obs_untrimmed_f)
obs_untrimmed_r = obs_untrimmed.reverse_sequences.view(FastqGzFormat)
obs_untrimmed_r = gzip.decompress(obs_untrimmed_r.path.read_bytes())
self.assertEqual(exp[1], obs_untrimmed_r)
def setUp(self):
super().setUp()
self.demux_paired_fn = self.plugin.methods['demux_paired']
muxed_sequences_f_fp = self.get_data_path('forward.fastq.gz')
muxed_sequences_r_fp = self.get_data_path('reverse.fastq.gz')
with tempfile.TemporaryDirectory() as temp:
shutil.copy(muxed_sequences_f_fp, temp)
shutil.copy(muxed_sequences_r_fp, temp)
self.muxed_sequences = Artifact.import_data(
'MultiplexedPairedEndBarcodeInSequence', temp)
# Just one proof-of-concept test here - the single-end test suite
# covers the edge cases.
def test_typical(self):
metadata = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='Barcode',
index=pd.Index(['sample_a', 'sample_b'], name='id')))
with redirected_stdio(stderr=os.devnull):
obs_demuxed_art, obs_untrimmed_art = \
self.demux_paired_fn(self.muxed_sequences, metadata)
self.assert_demux_results(metadata.to_series(), obs_demuxed_art)
exp_untrimmed = [b'@id6\nGGGGACGTACGT\n+\nzzzzzzzzzzzz\n',
b'@id6\nTTTTTGCATGCA\n+\nzzzzzzzzzzzz\n']
self.assert_untrimmed_results(exp_untrimmed, obs_untrimmed_art)
def test_di_typical(self):
forward_barcodes = CategoricalMetadataColumn(
pd.Series(['AAAA', 'CCCC'], name='ForwardBarcode',
index= | pd.Index(['sample_a', 'sample_b'], name='id') | pandas.Index |
"""
"""
import io
import os
import pandas as pd
import numpy as np
from datetime import datetime
import yaml
import tethys_utils as tu
import logging
from time import sleep
from pyproj import Proj, CRS, Transformer
pd.options.display.max_columns = 10
#############################################
### Parameters
base_path = os.path.realpath(os.path.dirname(__file__))
permit_csv = os.path.join(base_path, 'es_water_permit_data_v02.csv')
sd_csv = os.path.join(base_path, 'es_stream_depletion_details.csv')
with open(os.path.join(base_path, 'parameters-permits.yml')) as param:
param = yaml.safe_load(param)
conn_config = param['remote']['connection_config']
bucket = param['remote']['bucket']
base_key = 'es/{name}.csv'
run_date = pd.Timestamp.today(tz='utc').round('s')
# run_date_local = run_date.tz_convert(ts_local_tz).tz_localize(None).strftime('%Y-%m-%d %H:%M:%S')
run_date_key = run_date.strftime('%Y%m%dT%H%M%SZ')
def read_s3_csv(s3, bucket, key):
"""
"""
resp = s3.get_object(Bucket=bucket, Key=key)
body1 = resp['Body'].read().decode()
s_io = io.StringIO(body1)
csv1 = pd.read_csv(s_io)
return csv1
use_type_mapping = {'Dairying - Cows': 'irrigation', 'Water Supply - Rural': 'water_supply', 'Pasture Irrigation': 'irrigation', 'Crop Irrigation': 'irrigation', 'Stock Yard': 'stockwater', 'Water Supply - Town': 'water_supply', 'Quarrying': 'other', 'Recreational': 'other', 'Gravel extraction': 'other', 'Hydro-electric power generation': 'hydro_electric', 'Food Processing': 'other', 'Meat works': 'other', 'Tourism': 'other', 'Mining works': 'other', 'Industrial': 'other', 'Domestic': 'water_supply', 'Timber Processing incl Sawmills': 'other', 'Peat Harvesting/Processing': 'other', 'Milk and dairy industries': 'other', 'Gravel Wash': 'other'}
###########################################
#### Process csv files
### Permit file
permit1 = | pd.read_csv(permit_csv) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-12 15:00:35
# @Author : <NAME> (<EMAIL>)
# @Link : github.com/taseikyo
# @Version : python3.5
"""
obtain video information that exceeds the play threshold
"""
import os
import sys
import csv
import requests
import pandas as pd
PLAY_THRESHOLD = 500
def obtain_video_play_info(mid: str, page: int = 1) -> None:
"""
obtain upper's ($mid) video information
that exceeds the $PLAY_THRESHOLD
"""
url = "https://api.bilibili.com/x/space/arc/search"
payloads = {
"mid": mid,
"ps": 30,
"tid": 0,
"pn": page,
"keyword": "",
"order": "pubdate",
"jsonp": "jsonp",
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3904.97"
}
r = requests.get(url, headers=headers, params=payloads)
videos = r.json()["data"]["list"]["vlist"]
if not videos:
return
video_list = []
for video in videos:
video_aid = video["aid"]
video_title = video["title"]
video_play = video["play"]
video_description = video["description"].replace("\n", " || ")
video_created = video["created"]
if video_play >= PLAY_THRESHOLD:
video_list.append(
[video_aid, video_title, video_play, video_description, video_created]
)
print(video_title, video_play)
if video_list:
dump(mid, video_list, page)
obtain_video_play_info(mid, page + 1)
def dump(mid: str, data: list, page: int) -> None:
"""
save $data as csv named $mid_$page.csv
"""
print(f"begin to dump @{mid}'data page {page}...")
with open(f"{mid}_{page}.csv", "w", encoding="utf-8", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"video_aid",
"video_title",
"video_play",
"video_description",
"video_created",
]
)
writer.writerows(data)
def merge(mid: str) -> None:
"""
merge all $mid_*.csv to $mid.csv
"""
merge_list = []
files = os.listdir()
for file in files:
if file.startswith(f"{mid}"):
merge_list.append(file)
if len(merge_list) == 1:
os.rename(merge_list[0], f"{mid}.csv")
return
df1 = pd.read_csv(merge_list[0], encoding="utf-8")
for file in merge_list[1:]:
df2 = pd.read_csv(file, encoding="utf-8")
df1 = | pd.concat([df1, df2], axis=0, ignore_index=True, sort=False) | pandas.concat |
# coding=utf-8
"""
数据源解析模块以及示例内置数据源的解析类实现
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import pandas as pd
from .ABuSymbol import EMarketTargetType
from ..CoreBu.ABuFixes import six
from ..UtilBu import ABuDateUtil
__author__ = '阿布'
__weixin__ = 'abu_quant'
def del_columns(df, columns):
"""
从df中删除参数columns指定的整列数据
:param df: 金融时间序列切割pd.DataFrame对象
:param columns: 可迭代的字符序列,代表需要删除的指定列
:return:
"""
old_c = df.columns.tolist()
for col in filter(lambda x: x in old_c, columns):
df.drop(col, axis=1, inplace=True)
class AbuDataParseWrap(object):
"""
做为类装饰器封装替换解析数据统一操作,装饰替换init
"""
def __call__(self, cls):
"""只做为数据源解析类的装饰器,统一封装通用的数据解析规范及流程"""
if isinstance(cls, six.class_types):
# 只做为类装饰器使用
init = cls.__init__
def wrapped(*args, **kwargs):
try:
# 拿出被装饰的self对象
warp_self = args[0]
warp_self.df = None
# 调用原始init
init(*args, **kwargs)
symbol = args[1]
# 开始数据解析
self._gen_warp_df(warp_self, symbol)
except Exception as e:
logging.exception(e)
# 使用wrapped替换原始init
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
# 将原始的init赋予deprecated_original,必须要使用这个属性名字,在其它地方,如AbuParamBase会寻找原始方法找它
wrapped.deprecated_original = init
return cls
else:
raise TypeError('AbuDataParseWrap just for class warp')
# noinspection PyMethodMayBeStatic
def _gen_warp_df(self, warp_self, symbol):
"""
封装通用的数据解析规范及流程
:param warp_self: 被封装类init中使用的self对象
:param symbol: 请求的symbol str对象
:return:
"""
# 规范原始init函数中必须为类添加了如下属性
must_col = ['open', 'close', 'high', 'low', 'volume', 'date']
# 检测所有的属性都有
all_has = all([hasattr(warp_self, col) for col in must_col])
# raise RuntimeError('df.columns must have |date|open|close|high|volume| ')
if all_has:
# 将时间序列转换为pd时间
dates_pd = pd.to_datetime(warp_self.date)
# 构建df,index使用dates_pd
warp_self.df = pd.DataFrame(index=dates_pd)
for col in must_col:
# 所以必须有的类属性序列设置给df的列
warp_self.df[col] = getattr(warp_self, col)
# 从收盘价格序列shift出昨收价格序列
warp_self.df['pre_close'] = warp_self.df['close'].shift(1)
warp_self.df['pre_close'].fillna(warp_self.df['open'], axis=0, inplace=True)
# 添加日期int列
warp_self.df['date'] = warp_self.df['date'].apply(lambda x: ABuDateUtil.date_str_to_int(str(x)))
# 添加周几列date_week,值为0-4,分别代表周一到周五
warp_self.df['date_week'] = warp_self.df['date'].apply(
lambda x: ABuDateUtil.week_of_date(str(x), '%Y%m%d'))
# 类型转换
warp_self.df['close'] = warp_self.df['close'].astype(float)
warp_self.df['high'] = warp_self.df['high'].astype(float)
warp_self.df['low'] = warp_self.df['low'].astype(float)
warp_self.df['open'] = warp_self.df['open'].astype(float)
warp_self.df['volume'] = warp_self.df['volume'].astype(float)
warp_self.df['volume'] = warp_self.df['volume'].astype(np.int64)
warp_self.df['date'] = warp_self.df['date'].astype(int)
warp_self.df['pre_close'] = warp_self.df['pre_close'].astype(float)
# 不使用df['close'].pct_change计算
# noinspection PyTypeChecker
warp_self.df['p_change'] = np.where(warp_self.df['pre_close'] == 0, 0,
(warp_self.df['close'] - warp_self.df['pre_close']) / warp_self.df[
'pre_close'] * 100)
warp_self.df['p_change'] = warp_self.df['p_change'].apply(lambda x: round(x, 3))
# 给df加上name
warp_self.df.name = symbol
@AbuDataParseWrap()
class TXParser(object):
"""tx数据源解析类,被类装饰器AbuDataParseWrap装饰"""
def __init__(self, symbol, sub_market, json_dict):
"""
:param symbol: 请求的symbol str对象
:param sub_market: 子市场(交易所)类型
:param json_dict: 请求返回的json数据
"""
if json_dict['code'] == 0:
if symbol.market == EMarketTargetType.E_MARKET_TARGET_US:
data = json_dict['data'][symbol.value + sub_market]
else:
data = json_dict['data'][symbol.value]
if 'qfqday' in data.keys():
data = data['qfqday']
else:
data = data['day']
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列,时间格式为2017-07-26格式字符串
self.date = [item[0] for item in data]
# 开盘价格序列
self.open = [item[1] for item in data]
# 收盘价格序列
self.close = [item[2] for item in data]
# 最高价格序列
self.high = [item[3] for item in data]
# 最低价格序列
self.low = [item[4] for item in data]
# 成交量序列
self.volume = [item[5] for item in data]
@AbuDataParseWrap()
class NTParser(object):
"""nt数据源解析类,被类装饰器AbuDataParseWrap装饰"""
# noinspection PyUnusedLocal
def __init__(self, symbol, json_dict):
"""
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json数据
"""
data = json_dict['data']
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列
self.date = [item[0] for item in data]
# 开盘价格序列
self.open = [item[1] for item in data]
# 收盘价格序列
self.close = [item[2] for item in data]
# 最高价格序列
self.high = [item[3] for item in data]
# 最低价格序列
self.low = [item[4] for item in data]
# 成交量序列
self.volume = [item[5] for item in data]
@AbuDataParseWrap()
class SNUSParser(object):
"""snus数据源解析类,被类装饰器AbuDataParseWrap装饰"""
# noinspection PyUnusedLocal
def __init__(self, symbol, json_dict):
"""
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json数据
"""
data = json_dict
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列
self.date = [item['d'] for item in data]
# 开盘价格序列
self.open = [item['o'] for item in data]
# 收盘价格序列
self.close = [item['c'] for item in data]
# 最高价格序列
self.high = [item['h'] for item in data]
# 最低价格序列
self.low = [item['l'] for item in data]
# 成交量序列
self.volume = [item['v'] for item in data]
@AbuDataParseWrap()
class SNFuturesParser(object):
"""示例期货数据源解析类,被类装饰器AbuDataParseWrap装饰"""
# noinspection PyUnusedLocal
def __init__(self, symbol, json_dict):
"""
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json数据
"""
data = json_dict
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列
self.date = [item[0] for item in data]
# 开盘价格序列
self.open = [item[1] for item in data]
# 最高价格序列
self.high = [item[2] for item in data]
# 最低价格序列
self.low = [item[3] for item in data]
# 收盘价格序列
self.close = [item[4] for item in data]
# 成交量序列
self.volume = [item[5] for item in data]
@AbuDataParseWrap()
class SNFuturesGBParser(object):
"""示例国际期货数据源解析类,被类装饰器AbuDataParseWrap装饰"""
# noinspection PyUnusedLocal
def __init__(self, symbol, json_dict):
"""
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json或者dict数据
"""
data = json_dict
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列
self.date = [item['date'] for item in data]
# 开盘价格序列
self.open = [item['open'] for item in data]
# 最高价格序列
self.high = [item['high'] for item in data]
# 最低价格序列
self.low = [item['low'] for item in data]
# 收盘价格序列
self.close = [item['close'] for item in data]
# 成交量序列
self.volume = [item['volume'] for item in data]
@AbuDataParseWrap()
class HBTCParser(object):
"""示例币类市场数据源解析类,被类装饰器AbuDataParseWrap装饰"""
# noinspection PyUnusedLocal
def __init__(self, symbol, json_dict):
"""
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json数据
"""
data = json_dict
# 为AbuDataParseWrap准备类必须的属性序列
if len(data) > 0:
# 时间日期序列
self.date = [item[0] for item in data]
# 开盘价格序列
self.open = [item[1] for item in data]
# 最高价格序列
self.high = [item[2] for item in data]
# 最低价格序列
self.low = [item[3] for item in data]
# 收盘价格序列
self.close = [item[4] for item in data]
# 成交量序列
self.volume = [item[5] for item in data]
# 时间日期进行格式转化,转化为如2017-07-26格式字符串
self.date = list(map(lambda date: ABuDateUtil.fmt_date(date), self.date))
class BDParser(object):
"""bd数据源解析类"""
data_keys = ['data', 'dataMash']
s_calc_dm = True
def __init__(self, symbol, json_dict):
"""
没有使用AbuDataParseWrap装饰类,保留一个原始的解析流程类,
其它的解析类都使用AbuDataParseWrap装饰类,解析过程不做多注解,
详阅读AbuDataParseWrap的实现
:param symbol: 请求的symbol str对象
:param json_dict: 请求返回的json数据
"""
try:
if BDParser.data_keys[0] in json_dict.keys():
self.data = json_dict[BDParser.data_keys[0]][::-1]
elif BDParser.data_keys[1] in json_dict.keys():
self.data = json_dict[BDParser.data_keys[1]][::-1]
else:
raise ValueError('content not json format')
dates = [mash['date'] for mash in self.data]
klines = [mash['kline'] for mash in self.data]
self.df = None
if len(klines) > 0 and len(dates) > 0:
dates_fmt = list(map(lambda date: ABuDateUtil.fmt_date(date), dates))
dates_pd = pd.to_datetime(dates_fmt)
self.df = | pd.DataFrame(klines, index=dates_pd) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import panel as pn
from patchwork._sample import PROTECTED_COLUMN_NAMES, find_partially_labeled
class SingleImageTagger():
def __init__(self, f, classname="class", size=200):
self.classname = classname
# determine PNG or JPG and build image pane
if f.lower().endswith(".png"):
self._pane = pn.pane.PNG(f, width=size, height=size, sizing_mode="fixed")
elif f.lower().endswith(".jpg") or f.lower().endswith(".jpeg"):
self._pane = pn.pane.JPG(f, width=size, height=size, sizing_mode="fixed")
else:
assert False, "can't determine file format"
# build button
self._button = pn.widgets.Button(name=f"doesn't contain {classname}", width=size)
# build column to hold it
self.panel = pn.Column(self._pane, self._button, background="white")
# attach callback to button
def _callback(*events):
if self.panel.background == "white":
self.panel.background = "blue"
self._button.name = f"contains {self.classname}"
elif self.panel.background == "blue":
self.panel.background = "red"
self._button.name = "EXCLUDE"
else:
self.panel.background = "white"
self._button.name = f"doesn't contain {self.classname}"
self._button.on_click(_callback)
def __call__(self):
# return label: 0, 1, or np.nan
_parse_class = {"white":0, "blue":1, "red":np.nan}
return _parse_class[self.panel.background]
def update_image(self, f, classname=None):
if classname is not None:
self.classname = classname
# update image pane to new image, and reset button
self._pane.object = f
self.panel.background = "white"
self._button.name = f"doesn't contain {self.classname}"
class QuickTagger():
"""
Barebones panel app for quickly tagging multiclass images. Call QuickTagger.panel
in a notebook to get to the GUI.
Takes inputs in the same form as the active learning tool- use pw.prep_label_dataframe()
to get started.
"""
def __init__(self, df, outfile=None, size=200):
"""
:df: pandas DataFrame containing filepaths to images and a label column
for each category
:outfile: optional; path to CSV to write to during tagging
:size: scaling number for panel GUI
"""
self.df = df.copy()
self.outfile = outfile
self.categories = [c for c in df.columns if c not
in PROTECTED_COLUMN_NAMES]
self._set_up_panel(size)
def _set_up_panel(self, size=200):
init_sample_indices = np.random.choice(np.arange(len(self.df))[pd.isna(self.df[self.categories[0]])], 9)
self._current_indices = init_sample_indices
self._taggers = [SingleImageTagger(self.df.loc[i,"filepath"], self.categories[0], size)
for i in init_sample_indices]
self._grid = pn.GridBox(*[t.panel for t in self._taggers], ncols=3, nrows=3, width=3*size)
self._classchooser = pn.widgets.Select(options=self.categories,
value=self.categories[0], name="Class to annotate", width=size)
self._selectchooser = pn.widgets.Select(options=["all", "partially labeled"], value="all",
name="Sample from", width=size)
self._samplesavebutton = pn.widgets.Button(name="save and sample", button_type="primary", width=size)
self._samplebutton = pn.widgets.Button(name="sample without saving", button_type="danger", width=size)
self._summaries = pn.pane.DataFrame(self._compute_summaries(), index=False, width=size)
self.panel = pn.Row(
self._grid,
pn.Spacer(width=50),
pn.Column(
self._classchooser,
self._selectchooser,
self._samplesavebutton,
self._samplebutton,
self._summaries
)
)
# set up button callbacks
self._samplebutton.on_click(self._sample)
self._samplesavebutton.on_click(self._record_and_sample_callback)
def _sample(self, *events):
cat = self._classchooser.value
if self._selectchooser.value == "partially_labeled":
available_indices = np.arange(len(self.df))[pd.isna(self.df[cat])&find_partially_labeled(self.df)]
else:
available_indices = np.arange(len(self.df))[pd.isna(self.df[cat])]
self._current_indices = np.random.choice(available_indices, size=9, replace=False)
for e,i in enumerate(self._current_indices):
self._taggers[e].update_image(self.df.filepath.values[i], cat)
def _record(self):
for i, t in zip(self._current_indices, self._taggers):
self.df.loc[i, t.classname] = t()
def _record_and_sample_callback(self, *events):
self._record()
self._sample()
self.save()
self._summaries.object = self._compute_summaries()
def _compute_summaries(self):
cats = self.categories
label_counts = []
for c in cats:
summary = {"class":c, "None":np.sum( | pd.isna(self.df[c]) | pandas.isna |
from pathlib import Path
from typing import List, Tuple
import matplotlib.pyplot as plt
import pandas as pd
from pylossmap import BLMData
from pylossmap.lossmap import LossMap
from tqdm.auto import tqdm
def ufo_stable_proton(ufo_meta: pd.DataFrame) -> pd.DataFrame:
ufo_meta = ufo_meta[ufo_meta["beam_mode"] == "STABLE"]
ufo_meta = ufo_meta[ufo_meta["particle_b1"] == "protons"]
ufo_meta = ufo_meta[ufo_meta["particle_b2"] == "protons"]
return ufo_meta
def ufo_on_blms(ufo_meta: pd.DataFrame, blms: List[str]) -> pd.DataFrame:
"""Only keep ufos which occur on the provided blms.
Args:
ufo_meta: metadata of the ufo events
blms: keep ufos which occur on these blms
Returns:
The filtered ufo metadata.
"""
blms_in_ufo = list(set(blms) & set(ufo_meta["blm"].unique()))
ufo_meta = ufo_meta.set_index("blm").loc[blms_in_ufo].reset_index()
return ufo_meta
def load_raw_fill(file_path: Path) -> BLMData:
"""Load the raw blm data.
Args:
file_path: the path to the hdf file
Returns:
The raw BLM data.
"""
blm_data = BLMData.load(file_path)
blm_data.df.drop_duplicates(inplace=True)
return blm_data
def get_ufo_data(ufo_meta: pd.DataFrame, raw_data_dir: Path) -> pd.DataFrame:
"""Load the ufo event blm data.
Args:
ufo_meta: metadata of the ufo events
raw_data_dir: directory containing the raw blm data
Returns:
The raw blm data.
"""
ufo_blm_data = []
for idx, row in tqdm(ufo_meta.reset_index().iterrows(), total=len(ufo_meta)):
# try:
print(idx, row.datetime, row.blm, row.fill)
try:
blm_data = load_raw_fill(raw_data_dir / f"{row.fill}.h5")
except FileNotFoundError:
print(f"file not found {row.fill}")
continue
loss_map = blm_data.loss_map(row.datetime + pd.Timedelta("1s"))
ufo_blm_data.append(loss_map.df["data"])
return pd.DataFrame(ufo_blm_data).reset_index(drop=True)
def ufo_above_threshold(
ufo_meta: pd.DataFrame,
raw_data_dir: Path,
dcum_range: int = 3000,
blm_threshold: float = 1e-3,
n_above: int = 2,
) -> pd.DataFrame:
"""Keep ufo event which have `n_above` blms within `dcum_range` above `blm_threshold`.
Args:
ufo_meta: metadata of the ufos
raw_data_dir: directory containing the raw blm data
dcum_range: +- distance wwithin which to look for neighboring high blms
blm_threshold: blm amplitude threshold
n_above: how many neighboring blms should be above `blm_threshold`
Returns:
The metadata of the ufo events which pass the conditions.
"""
keep_ufo_idx = []
for fill, fill_grp in tqdm(
ufo_meta.reset_index().sort_values("datetime").groupby("fill")
):
try:
blm_data_fill = load_raw_fill(raw_data_dir / f"{fill}.h5")
except FileNotFoundError:
print(f"file not found {fill}")
continue
for idx, ufo in fill_grp.iterrows():
# print(idx)
blm_data_lm = blm_data_fill.loss_map(ufo.datetime + pd.Timedelta("1s"))
blm_data_around = blm_data_lm.df[
(blm_data_lm.df["dcum"] >= ufo.dcum - dcum_range)
& (blm_data_lm.df["dcum"] <= ufo.dcum + dcum_range)
]
if (blm_data_around["data"] >= blm_threshold).sum() >= n_above:
keep_ufo_idx.append(idx)
else:
print(f"{idx} does not pass threshold check.")
return ufo_meta.iloc[keep_ufo_idx]
def plot_ufos(ufo_meta: pd.DataFrame, raw_data_dir: Path):
"""Plot the ufos for all the ufo events in `ufo_meta`.
Args:
ufo_meta: metadata of the ufos
raw_data_dir: directory containing the raw blm data
"""
for idx, row in tqdm(ufo_meta.reset_index().iterrows(), total=len(ufo_meta)):
try:
ufo_fill_data = load_raw_fill(raw_data_dir / f"{row.fill}.h5")
ufo_data = ufo_fill_data.loss_map(row.datetime + | pd.Timedelta("1s") | pandas.Timedelta |
import requests
import pandas as pd
from datetime import timedelta
import numpy as np
def ba_timezone(ba, format):
"""
Retrieves the UTC Offset (for standard time) for each balancing area.
"""
offset_dict = {'AEC': 6,
'AECI': 6,
'AVA': 8,
'AVRN': 8,
'AZPS': 7,
'BANC': 8,
'BPAT': 8,
'BPA': 8,
'CHPD': 8,
'CISO': 8,
'CAISO': 8,
'CPLE': 5,
'CPLW': 5,
'DEAA': 7,
'DOPD': 8,
'DUK': 5,
'EEI': 6,
'EPE': 7,
'ERCO': 6,
'FMPP': 5,
'FPC': 5,
'FPL': 5,
'GCPD': 8,
'GRID': 8,
'GRIF': 7,
'GVL': 5,
'GWA': 7,
'HGMA': 7,
'HST': 5,
'IID': 8,
'IPCO': 8,
'ISNE': 5,
'ISONE': 5,
'JEA': 5,
'LDWP': 8,
'LGEE': 5,
'MISO': 5,
'NEVP': 8,
'NSB': 5,
'NWMT': 7,
'NYIS': 5,
'NYISO': 5,
'PACE': 7,
'PACW': 8,
'PGE': 8,
'PJM': 5,
'PNM': 7,
'PSCO': 7,
'PSEI': 8,
'SC': 5,
'SCEG': 5,
'SCL': 8,
'SEC': 5,
'SEPA': 6,
'SOCO': 6,
'SPA': 6,
'SRP': 7,
'SWPP': 6,
'SPP': 6,
'TAL': 5,
'TEC': 5,
'TEPC': 7,
'TIDC': 8,
'TPWR': 8,
'TVA': 6,
'WACM': 7,
'WALC': 7,
'WAUW': 7,
'WWA': 7,
'YAD': 5,
'EIA.CISO': 8,
'EIA.ISNE': 5,
'EIA.PJM': 5,
'EIA.NYIS': 5,
'EIA.SWPP': 6,
'EIA.MISO': 5,
'EIA.BPAT': 8,}
offset = offset_dict[ba]
if format == 'ISO':
timezone = f'-0{offset}:00'
elif format == 'GMT':
timezone = f'Etc/GMT+{offset}'
return timezone
def check_singularity_region_exists(api_key, ba):
# define parameters for API call
event_type = 'carbon_intensity'
header = {'X-Api-Key': api_key}
def download_singularity_data(api_key, ba, start_date, end_date):
"""
"""
# set the start and end datetimes
utc_offset = ba_timezone(ba, format='ISO')
start_datetime = | pd.to_datetime(f'{start_date}T00:00:00{utc_offset}') | pandas.to_datetime |
# %matplotlib notebook
import pandas as pd
import numpy as np
import seaborn as sns
sns.set(color_codes=True)
from sklearn import preprocessing
# from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import SMOTE,RandomOverSampler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,confusion_matrix
import matplotlib.pyplot as plt
import scipy
from sklearn.decomposition import PCA
from scipy.stats import pearsonr
import os
# import boto3
import time
from bokeh.events import Tap
# import utils.visualization
# annot_df_2['rep']
blackListFeatures=[
'Nuclei_Correlation_Manders_AGP_DNA',
'Nuclei_Correlation_Manders_AGP_ER',
'Nuclei_Correlation_Manders_AGP_Mito',
'Nuclei_Correlation_Manders_AGP_RNA',
'Nuclei_Correlation_Manders_DNA_AGP',
'Nuclei_Correlation_Manders_DNA_ER',
'Nuclei_Correlation_Manders_DNA_Mito',
'Nuclei_Correlation_Manders_DNA_RNA',
'Nuclei_Correlation_Manders_ER_AGP',
'Nuclei_Correlation_Manders_ER_DNA',
'Nuclei_Correlation_Manders_ER_Mito',
'Nuclei_Correlation_Manders_ER_RNA',
'Nuclei_Correlation_Manders_Mito_AGP',
'Nuclei_Correlation_Manders_Mito_DNA',
'Nuclei_Correlation_Manders_Mito_ER',
'Nuclei_Correlation_Manders_Mito_RNA',
'Nuclei_Correlation_Manders_RNA_AGP',
'Nuclei_Correlation_Manders_RNA_DNA',
'Nuclei_Correlation_Manders_RNA_ER',
'Nuclei_Correlation_Manders_RNA_Mito',
'Nuclei_Correlation_RWC_AGP_DNA',
'Nuclei_Correlation_RWC_AGP_ER',
'Nuclei_Correlation_RWC_AGP_Mito',
'Nuclei_Correlation_RWC_AGP_RNA',
'Nuclei_Correlation_RWC_DNA_AGP',
'Nuclei_Correlation_RWC_DNA_ER',
'Nuclei_Correlation_RWC_DNA_Mito',
'Nuclei_Correlation_RWC_DNA_RNA',
'Nuclei_Correlation_RWC_ER_AGP',
'Nuclei_Correlation_RWC_ER_DNA',
'Nuclei_Correlation_RWC_ER_Mito',
'Nuclei_Correlation_RWC_ER_RNA',
'Nuclei_Correlation_RWC_Mito_AGP',
'Nuclei_Correlation_RWC_Mito_DNA',
'Nuclei_Correlation_RWC_Mito_ER',
'Nuclei_Correlation_RWC_Mito_RNA',
'Nuclei_Correlation_RWC_RNA_AGP',
'Nuclei_Correlation_RWC_RNA_DNA',
'Nuclei_Correlation_RWC_RNA_ER',
'Nuclei_Correlation_RWC_RNA_Mito',
'Nuclei_Granularity_14_AGP',
'Nuclei_Granularity_14_DNA',
'Nuclei_Granularity_14_ER',
'Nuclei_Granularity_14_Mito',
'Nuclei_Granularity_14_RNA',
'Nuclei_Granularity_15_AGP',
'Nuclei_Granularity_15_DNA',
'Nuclei_Granularity_15_ER',
'Nuclei_Granularity_15_Mito',
'Nuclei_Granularity_15_RNA',
'Nuclei_Granularity_16_AGP',
'Nuclei_Granularity_16_DNA',
'Nuclei_Granularity_16_ER',
'Nuclei_Granularity_16_Mito',
'Nuclei_Granularity_16_RNA']
rootDir='/home/ubuntu/bucket/projects/2017_09_27_RareDiseases_Taipale/workspace'
dataset='Set2'
ndd='3'
profType='mean'
normalization=''; #'' for no normaliztion 'n_' for per untransfected cells normalization
scaleMeanProfilesForEachPlate=1;
############################# load annotaations
AnnotSet2 = pd.read_excel(rootDir+'/metadata/Set2Annots20190801.xlsx', sheet_name=None)
print(AnnotSet2.keys())
df_1=AnnotSet2['Replicate_Plates']
df_1['batch']='Maxproj_Replicates_Original_Screen'
df_2=AnnotSet2['Kinase_Mutants']
# df_2=df_2.drop([159])
df_2['batch']='Maxproj_Kinase_Plates'
df_3=AnnotSet2['Common_Variants']
df_3['batch']='Maxproj_Common_Variants'
df_4=AnnotSet2['Cancer_Mutants']
df_4['batch']='Maxproj_Cancer_Mutations_Screen'
annot_df_2 = pd.concat([df_1,df_2,df_3,df_4],axis=0,ignore_index=True)
metaDataPlates=annot_df_2['Metadata_Plate'].unique()
# listOfPlates0=os.listdir(rootDir+'/backend/wellsSingleCells/')[1:]
listOfPlates0=os.listdir(rootDir+'/backend/'+profType+'PerWells'+ndd+'/')
annot_df_2['Metadata_Sample']=annot_df_2['Metadata_Sample'].str.rstrip()
######################### load mean profiles
# listOfPlatesAndWells=[p for p in listOfPlates0 if p.split('df_n_')[1:][0][0:-4] in metaDataPlates]
strProf='df_'+normalization
listOfPlates=[p for p in listOfPlates0 if p.split(strProf)[1:] in metaDataPlates]
# listOfPlates.remove('df_n_Replicate_21')
scaler_Plate= preprocessing.StandardScaler()
df = pd.DataFrame();
for p in listOfPlates: #[0:1]: ['df_RC4_IF_05']:
fileNameToSave=rootDir+'/backend/'+profType+'PerWells'+ndd+'/'+p;
transfectedMeanPerWell= | pd.read_pickle(fileNameToSave, compression='infer') | pandas.read_pickle |
"""
To fix the yield from the UPTSO preprocessed files
both Schwaller & Lowe versions
Keeps all data, no filtration done.
Version: 1.31: 2021-04-16; A.M.
@author: <NAME> (DocMinus)
license: MIT License
Copyright (c) 2021 DocMinus
"""
import pandas as pd
import numpy as np
def deconvolute_yield(row):
text_yield_data = row["TxtYield"]
calc_yield_data = row["CalcYield"]
my_text_yield = 0
my_calc_yield = 0
if 0 < text_yield_data <= 100:
my_text_yield = text_yield_data
if 0 < calc_yield_data <= 100:
my_calc_yield = calc_yield_data
out_yield = my_text_yield
if my_calc_yield > my_text_yield:
out_yield = my_calc_yield
return out_yield
def main():
#
# change these two lines accordingly
#
in_file = "/data/uspto/Lowe_processed_and_yield_curated/1976_Sep2016_USPTOgrants_smiles.rsmi"
out_file = "/data/uspto/Lowe_processed_and_yield_curated/1976_Sep2016_USPTOgrants_smiles_yield_ok_all_data.csv"
print("Reading...")
# read first line only.
with open(in_file) as file_in:
first_line = file_in.readline()
to_skip = 0 # Lowe data set, contains no initial comments
if first_line.startswith("# Original"):
to_skip = 2 # Schwaller data set, contains comments
data = pd.read_csv(in_file, sep="\t", low_memory=False, skiprows=to_skip)
# optional: create your own ID; for example good for use in a database
data["myID"] = np.arange(len(data))
id_prefix = "ID" # change as you like
data["myID"] = data["myID"].apply(lambda x: id_prefix + "{0:0>8}".format(x))
cols = data.columns.tolist() # also optional.
cols = cols[-1:] + cols[:-1] # I prefer the ID to come first
data = data[cols]
# Make temporary copies of the yield columns
# Keep only digits, remove all >, %, etc.
# Easier (for overview) to replace step-wise, too many exceptions become otherwise converted wrong.
# Goal is to keep largest yield
#
# CalculatedYield is simplest to convert
data["CalcYield"] = data["CalculatedYield"].str.rstrip("%")
# TextMinedYield has multiple tricky combos (single regex could solve, but wouldn't be readable)
data["TxtYield"] = data["TextMinedYield"].str.lstrip("~")
data["TxtYield"] = data["TxtYield"].str.rstrip("%")
data["TxtYield"] = data["TxtYield"].str.replace(">=", "", regex=True)
data["TxtYield"] = data["TxtYield"].str.replace(">", "", regex=True)
data["TxtYield"] = data["TxtYield"].str.replace("<", "", regex=True)
# How to treat x to y% is a matter of taste; here it becomes last (i.e. highest value)
data["TxtYield"] = data["TxtYield"].str.replace("\d{1,2}\sto\s", "", regex=True)
data["TxtYield"] = data["TxtYield"].replace(np.nan, 0)
data["CalcYield"] = data["CalcYield"].replace(np.nan, 0)
data["TxtYield"] = | pd.to_numeric(data["TxtYield"], errors="coerce") | pandas.to_numeric |
import numpy as np
import random as rand
import copy
import time
import matplotlib.pyplot as plt
import sys
import os
import pandas as pd
from .toric_model import *
from .util import Action
from .mcmc import *
from .toric_model import Toric_code
from matplotlib import rc
#rc('font',**{'family':'sans-serif'})#,'sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif'})#,'serif':['Palatino']})
rc('text', usetex=True)
# geometric mean of array of numbers: series
def geom_mean(series):
array=series.to_numpy()
return np.exp(np.average(np.log(array)))
# geometric std of array of numbers: series
def geom_std(series):
array=series.to_numpy()
return np.exp(np.std(np.log(array)))
# ''maximum distance'' for arrays of numbers a & b
def tvd(a, b):
nonzero = np.logical_and(a != 0, b != 0)
if np.any(nonzero):
return np.amax(np.absolute(a - b))
else:
return -1
# kulback-leibler distance forr arrays of numbers a & b
def kld(a, b):
nonzero = np.logical_and(a != 0, b != 0)
if np.any(nonzero):
log = np.log2(np.divide(a, b, where=nonzero), where=nonzero)
return np.sum((a - b) * log, where=nonzero)
else:
return -1
# generates datafile to determine optimal number of chains in parallel tempering, Nc
# returns three columns with, Nc, time for convergence and steps for convergence
def Nc_tester(file_path, Nc_interval=[3,31]):
# set parameters for parallel tempering
size = 5
p_error = 0.15
SEQ = 8
Nc = 9
TOPS = 10
tops_burn = 5
steps = 1000
eps = 0.008
iters = 10
conv_criteria='error_based'
# create dataframe
stats = pd.DataFrame(columns=['Nc', 'time', 'steps'])
# Number of times every parameter configuration is tested
pop = 10
# create array of toric_codes to be evaluated
t_list = []
for i in range(pop):
t = Toric_code(5)
t.generate_random_error(p_error)
t_list.append(t)
# k=# different Nc-values to be tested
k=(Nc_interval[1] - Nc_interval[0]) / 2 + 1
# for each Nc-value run parallel_tempering with set prameters and add Nc,time,steps to file
for Nc in range(Nc_interval[0], Nc_interval[1], 2):
print('Nc =', Nc, '/ ', Nc_interval[1])
for pt in range(pop):
t1 = time.time()
_, conv_step = parallel_tempering_plus(copy.deepcopy(t_list[pt]), Nc=Nc, p=p_error, SEQ=SEQ, TOPS=TOPS, tops_burn=tops_burn, eps=eps, steps=steps, iters=iters, conv_criteria=conv_criteria)
delta_t = time.time() - t1
tmp_dict = {'Nc': Nc, 'time': delta_t, 'steps': conv_step}
stats = stats.append(tmp_dict, ignore_index=True)
stats.to_pickle(file_path)
# Reads files generated by Nc_tester, aggregates some statistics and plots data
def Nc_visuals(files=6, SEQ=20):
# File name. Should be built properly if correct files and SEQ is provided as argument
file_base = 'output/Nc_data_SEQ{}_'.format(SEQ)
# Initiate DataFrame to hold data from files
stats = pd.DataFrame(columns=['Nc', 'time', 'steps'])
# Read data from files into "stats"
for i in range(files):
df = pd.read_pickle(file_base + str(i) + '.xz')
stats = pd.concat([stats, df])
# List of statistics to calculate
agg_stats = ['Nc', 'time_mean', 'time_std', 'steps_mean', 'steps_std']
#DataFrame to hold aggregate statistics
agg_data = pd.DataFrame(columns = agg_stats)
# Unique tested Nc values
Nc_values = np.unique(stats['Nc'].to_numpy())
# Number of data points
tot_pts = stats.shape[0]
# Number of different Nc values
Nc_pts = Nc_values.size
# Number of samples per (SEQ, tol) pair
pop = int(tot_pts / Nc_pts)
# steps is -1 if nonconverged. replace with max no of steps (usually 1e6)
stats['steps'].replace(-1, 1e6, inplace=True)
for Nc in Nc_values:
# Window of points for current Nc value
window = stats[stats['Nc'] == Nc].copy()
# Calculate step time
window['steptime'] = window['time'] / window['steps']
# Mean and std values over converged runs
agg = window.agg(['mean', 'std', geom_mean, geom_std])
# number of converged runs
nbr_converged = window[window['steps'] != 1e6].shape[0]
# temporary dict to append to aggregated data
tmp_dict = {'Nc': Nc, 'nbr_converged': nbr_converged}
for name in ['time', 'steps', 'steptime']:
for op in ['mean', 'std']:
tmp_dict[name + '_' + op] = agg.loc[op][name]
# append aggregate data for current Nc value to agg_data
agg_data = agg_data.append(tmp_dict, ignore_index=True)
# Numpy arrays to hold mean convergence time and standard error
time = agg_data['time_mean'].to_numpy()
time_err = agg_data['time_std'] / np.sqrt(pop)
# Set the matplotlib framing to look good on my computer. Might need tweeks on yours
left = 0.12
plt.rcParams.update({'font.size': 48, 'figure.subplot.top': 0.91, 'figure.subplot.bottom': 0.18, 'figure.subplot.left': left, 'figure.subplot.right': 1 - left})
plt.rc('axes', labelsize=60)
# Choose line color from appropriate colormap (here cubehelix)
c = plt.cm.cubehelix(0.2)
# Initiate figure and axes
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot mean convergence time as funtion of Nc, with errorbars
ax.errorbar(Nc_values, time, yerr=time_err, fmt='-', ms=24, lw=6, capsize=12, capthick=6, c=c)
# Set and adjust x and y label of plot
pad = 20.0
ax.set_xlabel('\\texttt{Nc}', labelpad=pad)
ax.set_ylabel('Konvergenstid [s]', labelpad=pad)
# Set axis limits
ax.set_ylim([0, 110])
ax.set_xlim([2, 32])
# Make title
ax.set_title('SEQ = {}'.format(SEQ), pad=pad)
# Add rectangulare grid
ax.grid(True, axis='both')
plt.show()
# Tests maximum distance and kld for testing convergence criteria 'error_based' and saves to a file
def convergence_tester(file_path):
size = 5
p_error = 0.15
Nc = 19
TOPS=20
tops_burn=10
steps=1000000
# Number of times every parameter configuration is tested
pop = 10
# choose criteria to test. Possible values are: 'error_based', 'distr_based', 'majority_based', 'tvd_based', 'kld_based'
crit = 'error_based'
# List of SEQ values to test
SEQ_list = [20, 25, 30, 35, 40]
# List of eps values to test
eps_list = [2e-3*i for i in range(1, 6)]
# Initiate DataFrame to hold tested and resulting parameters
crits_stats = pd.DataFrame(columns=['SEQ', 'eps', 'kld', 'tvd', 'steps'])
# runs parallel tempering for each SEQ and eps-value and saves the data
for SEQ in SEQ_list:
for eps in eps_list:
for j in range(pop):
# Initiate toric code to run parallel tempering on
init_toric = Toric_code(size)
init_toric.generate_random_error(p_error)
# Run parallel_tempering_analysis and collect resulting parameters
[distr, eq, eq_full, chain0, burn_in, crits_distr] = parallel_tempering_analysis(init_toric, Nc, p=p_error, TOPS=TOPS, SEQ=SEQ, tops_burn=tops_burn, steps=steps, conv_criteria=[crit], eps=eps)
# Convert distribution from integer percentage to float in (0, 1)
distr = np.divide(distr.astype(np.float), 100)
# Calculate total variational and kulback-leibler distance between converged distribution and final distribution
tvd_crit = tvd(distr, crits_distr[crit][0])
kld_crit = kld(distr, crits_distr[crit][0])
# Create dictionary and append it to the DataFrame
tmp_dict = {'SEQ': SEQ, 'eps': eps, 'kld': kld_crit, 'tvd': tvd_crit, 'steps': crits_distr[crit][1]}
crits_stats = crits_stats.append(tmp_dict, ignore_index=True)
#Save the DataFrame with specified file name
crits_stats.to_pickle(file_path)
# Calculate and plot statistics on data generated by convergence_tester()
def conv_test_visuals():
# Number of files to read
files = 100
# Basic file path and base name
file_base = 'output/conv_data_'
# Initiate DatFrame to hold data read from files
stats = | pd.DataFrame(columns=['SEQ', 'eps', 'kld', 'tvd', 'steps']) | pandas.DataFrame |
__author__ = "saeedamen" # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on a "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import pandas
import collections
from findatapy.timeseries.calculations import Calculations
class RetStats(object):
"""Calculating return statistics of a time series
"""
def __init__(self, returns_df=None, ann_factor=None, resample_freq=None):
self._returns_df = returns_df
self._ann_factor = ann_factor
self._resample_freq = resample_freq
self._rets = None
self._vol = None
self._inforatio = None
self._kurtosis = None
self._dd = None
self._yoy_rets = None
def split_into_dict(self):
"""If we have multiple columns in our returns, we can opt to split up
the RetStats object into a dictionary of smaller RetStats object, one
for each asset return
Returns
-------
dict
Dictionary of RetStats objects
"""
ret_stats_dict = collections.OrderedDict()
for d in self._returns_df.columns:
returns_df = | pandas.DataFrame(self._returns_df[d]) | pandas.DataFrame |
import os
from os.path import join as pjoin
import numpy as np
import pandas as pd
import scipy.stats
import dask
from cesium import featurize
from cesium.tests.fixtures import (sample_values, sample_ts_files,
sample_featureset)
import numpy.testing as npt
import pytest
DATA_PATH = pjoin(os.path.dirname(__file__), "data")
FEATURES_CSV_PATH = pjoin(DATA_PATH, "test_features_with_targets.csv")
def test_featurize_files_function(tmpdir):
"""Test featurize function for on-disk time series"""
with sample_ts_files(size=4, labels=['A', 'B']) as ts_paths:
fset, labels = featurize.featurize_ts_files(ts_paths,
features_to_use=["std_err"],
scheduler=dask.get)
assert "std_err" in fset
assert fset.shape == (4, 1)
npt.assert_array_equal(labels, ['A', 'B', 'A', 'B'])
def test_featurize_time_series_single():
"""Test featurize wrapper function for single time series"""
t, m, e = sample_values()
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert fset['amplitude'].values.dtype == np.float64
def test_featurize_time_series_single_multichannel():
"""Test featurize wrapper function for single multichannel time series"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_multiple():
"""Test featurize wrapper function for multiple time series"""
n_series = 5
list_of_series = [sample_values() for i in range(n_series)]
times, values, errors = [list(x) for x in zip(*list_of_series)]
features_to_use = ['amplitude', 'std_err']
meta_features = [{'meta1': 0.5}] * n_series
fset = featurize.featurize_time_series(times, values, errors,
features_to_use,
meta_features, scheduler=dask.get)
npt.assert_array_equal(sorted(fset.columns.get_level_values('feature')),
['amplitude', 'meta1', 'std_err'])
def test_featurize_time_series_multiple_multichannel():
"""Test featurize wrapper function for multiple multichannel time series"""
n_series = 5
n_channels = 3
list_of_series = [sample_values(channels=n_channels)
for i in range(n_series)]
times, values, errors = [list(x) for x in zip(*list_of_series)]
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(times, values, errors,
features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_uneven_multichannel():
"""Test featurize wrapper function for uneven-length multichannel data"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
t = [[t, t[0:-5], t[0:-10]]]
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
e = [[e[0], e[1][0:-5], e[2][0:-10]]]
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_custom_functions():
"""Test featurize wrapper function for time series w/ custom functions"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err', 'test_f']
meta_features = {'meta1': 0.5}
custom_functions = {'test_f': lambda t, m, e: np.pi}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features,
custom_functions=custom_functions,
scheduler=dask.get)
npt.assert_array_equal(fset['test_f', 0], np.pi)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_custom_dask_graph():
"""Test featurize wrapper function for time series w/ custom dask graph"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err', 'test_f', 'test_meta']
meta_features = {'meta1': 0.5}
custom_functions = {'test_f': (lambda x: x.min() - x.max(), 'amplitude'),
'test_meta': (lambda x: 2. * x, 'meta1')}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features,
custom_functions=custom_functions,
scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert ('test_f', 0) in fset.columns
assert ('test_meta', 0) in fset.columns
def test_featurize_time_series_default_times():
"""Test featurize wrapper function for time series w/ missing times"""
n_channels = 3
_, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {}
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
e = [[e[0], e[1][0:-5], e[2][0:-10]]]
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
m = m[0][0]
e = e[0][0]
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
def test_featurize_time_series_default_errors():
"""Test featurize wrapper function for time series w/ missing errors"""
n_channels = 3
t, m, _ = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {}
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
t = [[t, t[0:-5], t[0:-10]]]
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
t = t[0][0]
m = m[0][0]
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
def test_featurize_time_series_pandas_metafeatures():
"""Test featurize function for metafeatures passed as Series/DataFrames."""
t, m, e = sample_values()
features_to_use = ['amplitude', 'std_err']
meta_features = | pd.Series({'meta1': 0.5}) | pandas.Series |
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
self.ragged = DataFrame({"B": range(5)})
self.ragged.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
def test_doc_string(self):
df = DataFrame(
{"B": [0, 1, 2, np.nan, 4]},
index=[
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
],
)
df
df.rolling("2s").sum()
def test_invalid_window_non_int(self):
# not a valid freq
msg = "passed window foobar is not compatible with a datetimelike index"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="foobar")
# not a datetimelike index
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
self.regular.reset_index().rolling(window="foobar")
@pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])
def test_invalid_window_nonfixed(self, freq):
# non-fixed freqs
msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])
def test_valid_window(self, freq):
self.regular.rolling(window=freq)
@pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])
def test_invalid_minp(self, minp):
# non-integer min_periods
msg = (
r"local variable 'minp' referenced before assignment|"
"min_periods must be an integer"
)
with pytest.raises(ValueError, match=msg):
self.regular.rolling(window="1D", min_periods=minp)
def test_invalid_center_datetimelike(self):
# center is not implemented
msg = "center is not implemented for datetimelike and offset based windows"
with pytest.raises(NotImplementedError, match=msg):
self.regular.rolling(window="1D", center=True)
def test_on(self):
df = self.regular
# not a valid column
msg = (
r"invalid on specified as foobar, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling(window="2s", on="foobar")
# column is valid
df = df.copy()
df["C"] = date_range("20130101", periods=len(df))
df.rolling(window="2d", on="C").sum()
# invalid columns
msg = "window must be an integer"
with pytest.raises(ValueError, match=msg):
df.rolling(window="2d", on="B")
# ok even though on non-selected
df.rolling(window="2d", on="C").B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
assert df.A.is_monotonic
df.rolling("2s", on="A").sum()
df = df.set_index("A")
assert df.index.is_monotonic
df.rolling("2s").sum()
def test_non_monotonic_on(self):
# GH 19248
df = DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
)
df = df.set_index("A")
non_monotonic_index = df.index.to_list()
non_monotonic_index[0] = non_monotonic_index[3]
df.index = non_monotonic_index
assert not df.index.is_monotonic
msg = "index must be monotonic"
with pytest.raises(ValueError, match=msg):
df.rolling("2s").sum()
df = df.reset_index()
msg = (
r"invalid on specified as A, must be a column "
"\\(of DataFrame\\), an Index or None"
)
with pytest.raises(ValueError, match=msg):
df.rolling("2s", on="A").sum()
def test_frame_on(self):
df = DataFrame(
{"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}
)
df["A"] = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
# we are doing simulating using 'on'
expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)
result = df.rolling("2s", on="A").B.sum()
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (
df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]
)
result = df.rolling("2s", on="A")[["B"]].sum()
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame(
{
"A": [0, 1, 2, 3, 4],
"B": [0, 1, 2, np.nan, 4],
"C": Index(
[
| Timestamp("20130101 09:00:00") | pandas.Timestamp |
"""Test functions in owid.datautils.dataframes module.
"""
import numpy as np
import pandas as pd
from pytest import warns
from typing import Any, Dict
from owid.datautils import dataframes
class TestCompareDataFrames:
def test_with_large_absolute_tolerance_all_equal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, True]}))
def test_with_large_absolute_tolerance_all_unequal(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3]}),
absolute_tolerance=0.9,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [False, False]}))
def test_with_large_absolute_tolerance_mixed(self):
assert dataframes.compare(
df1=pd.DataFrame({"col_01": [1, 2]}),
df2=pd.DataFrame({"col_01": [2, 3.1]}),
absolute_tolerance=1,
relative_tolerance=1e-8,
).equals(pd.DataFrame({"col_01": [True, False]}))
def test_with_large_relative_tolerance_all_equal(self):
assert dataframes.compare(
df1= | pd.DataFrame({"col_01": [1, 2]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 11:27:05 2019
@author: <NAME>
"""
""" Quick Start
In order to use this program, you will need to do these things:
* Specify a value for the variable 'server' to indicate whether local files
will be input for, perhaps, debugging mode or file paths on a remote
server will be used.
* Specify appropriate values for the variables 'path1' and 'files1'
for input file paths.
* Determine whether the variable 'files1Short' is desired. This was based
on the authors file-naming conventions and will not be appropriate in
all circumstances. Other parts of the program will need to be revised
if this variable is not used for a shorter graph title.
* Ensure that inout data is in teh format indicated in comments below.
"""
"""
This Python 3 code performs the following tasks:
* Performs statistical tests on hit rate data:
- Tests whether the distribution of hits across the four categories is
different from a random allocation of hits across categories in proportion
to the number of articles in each category in a statistically significant way.
- Tests whether categorizing articles along the dimensions of novelty and
conventionality, individually, has explanatory power
- Test whether the number of hits in each category differs in a statistically
significant way from a ransom distribution of hit articles among the
categories by binning the remaining three categories together. This mitigates
issues that arise in some circumstances when an insuffiicent expeted number
of hits prevents a valid analysis in the case of the test outlined in the
first bullet point above.
* Performs the Spearman Rank Correlation Test between citation_count and all
other data columns
* Outputs JSON files to be used by a subsequent program to graph the data
* Outputs data in a format amenable to inclusion in LaTex file tables
"""
""" This program requires all of the Python packages below, which
are all included with the Anaconda distribution of Python """
import pandas as pd
import numpy as np
from scipy.stats import spearmanr
from scipy.stats import chisquare
from scipy.stats import binom
import json
import re
server = True
""" This function formats data for output in LaTex format to a specified
number of decimal places """
def formFloat (num,places):
fStr = '{:.'+str(places)+'f}'
num = float(int(float(fStr.format(num))*10**places+0.5))/10**places
if num <= 0.025:# or num >= 0.975:
return '\\textbf{'+fStr.format(num)+'}'
elif num <= 0.05:# or num >= .95:
return '\\textit{'+fStr.format(num)+'}'
else:
return fStr.format(num)
""" This function formats data for output in LaTex format
It also includes code for a dagger symbol where the number of expected
hits was less than the minimum required for a valid statistical test """
def formFloatDagger (num,places):
fStr = '{:.'+str(places)+'f}'
num[0] = float(int(float(fStr.format(num[0]))*10**places+0.5))/10**places
if num[0] <= 0.025: # or num[0] >= 0.975:
if num[1] >= 5.0:
return '\\textbf{'+fStr.format(num[0])+'}'
else:
return '$\dagger$ \\textbf{'+fStr.format(num[0])+'} '
elif num[0] <= 0.05: # or num[0] >= .95:
if num[1] >= 5.0:
return '\\textit{'+fStr.format(num[0])+'}'
else:
return '$\dagger$ \\textit{'+fStr.format(num[0])+'} '
else:
return fStr.format(num[0])
""" This function formats data for output in LaTex format
It also permits output of the string 'NA' when a numberical value
is not passed to the function. """
def formFloatDaggerNA (num,places):
try:
fStr = '{:.'+str(places)+'f}'
num = float(int(float(fStr.format(num))*10**places+0.5))/10**places
if num <= 0.025: # or num >= 0.975:
return '\\textbf{'+fStr.format(num)+'}'
elif num <= 0.05: # or num >= .95:
return '\\textit{'+fStr.format(num)+'}'
else:
return fStr.format(num)
except:
return str(num)
""" Calculates hit rate except returns 0.0 when the total number of articles
in a category is zero to avoid dividing by zero """
def percent(row):
if row['total']> 0:
return row['hit'] / row['total']
else:
return 0.0
""" This if-else block permits an alternate, local file to be input during debugging
server is a Boolean variable that, if True, indicates that the path and files in
if block are input and, otherwise, the path and files in the else block are input. """
""" Input file format """
""" Input requires the first line to have field names and subsequent comma-delimited text files
Data dictionary:
* source_id: a unique identifier for an article. We used IDs from the Web of
Science under license from Clarivate Analytics, which we cannot disclose.
These can be string values (do not enclose in quotes in data file if this is
the case).
* med: the median z-score of all the citations in the source article
* ten: the 10th percentile z-score (left tail) of the citation z-scores
* one: the 1st percentile z-score (left tail) of the citation z-scores
* citation_count: the number of tiems the source articles was cited
Example:
source_id,med,ten,one,citation_count
0,4.37535958641463,-0.368176148773802,-1.84767079802106,1
1,8.94701613716861,0.695385836097657,-1.0789085501296,6
2,17.9740470024929,-8.85622661474813,-10.3102229485467,14
"""
""" The Boolean variable 'server' controls which paths and files are input """
if server: # settings for production runs on server
path1 = '/path_to_remote_data_folder/'
files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv','data_1995/imm95_pubwise_zsc_med.csv','data_1995/metab95_pubwise_zsc_med.csv', 'data_1995/ap95_pubwise_zsc_med.csv', \
'data_1985/d1000_85_pubwise_zsc_med.csv','data_1985/imm85_pubwise_zsc_med.csv','data_1985/metab85_pubwise_zsc_med.csv', 'data_1985/ap85_pubwise_zsc_med.csv', \
'data_2005/d1000_2005_pubwise_zsc_med.csv', 'data_2005/imm2005_pubwise_zsc_med.csv','data_2005/metab2005_pubwise_zsc_med.csv', 'data_2005/ap2005_pubwise_zsc_med.csv']
else: # settings for local debugging
path1 = '/path_to_local_data_folder/'
files1 = ['data_1995/d1000_95_pubwise_zsc_med.csv']
""" This statement extracts the filename from the path for succinct identification of the filename """
""" This statement may not be appropriate for alternate file naming conventions """
files1Short = [x.split('/')[-1] for x in files1]
""" Extract year and data set topic from filename """
years = [re.search('data_\d{4}',x).group(0).replace('data_','') for x in files1]
datasets = [re.sub('\d+','',re.search('/\w+_',x).group(0).split('_')[0].replace('/','')) for x in files1]
transDataset = {'imm':'Immunology', 'd':'Web of Science', 'metab':'Metabolism', 'ap':'Applied Physics'}
""" These lists are are used for coding results in Latex files, output in JSON files,
and create pandas DataFrames within the program """
cols = ['med','ten','one']
catConven = ['LC','HC']
catNovel = ['HN','LN']
catHit = ['hit','non-hit']
countRows = ['LNHC','HNLC','LNLC','HNHC']
countCols = catHit
countRowsBin = ['LN','HN','LC','HC']
""" Iterate through the inputted fiels """
for i in range(len(files1)):
""" These statements create empty dictionaries for storing results"""
binomRes = {} # dictionary for results of 2-category tests
Fig2Res = {} # dictionary for results of 4-category tests for data in the form of Uzzi's Fig. 2
Fig2IndRes = {} # dictionary for results of testing each of the 4 categories in Uzzi's Fig. 2 individually
graphDic = {} # Dictionary to store visualization data
df = | pd.read_csv(path1+files1[i]) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
functions that disambiguate names using an external file.
Inputs are:
- a tab delimited csv with columns "Unique Names" (= found name, as in the
text) and "NameCopy" (the right name)
- output from 01_parse_xml.py
The "disambiguate_names" function will use the disambiguation file to change
the names in the output from parse_xml.
If you want to save the log file, then rename the 'parse.log' file since it is
overwritten every time
'''
import json
import logging
import pandas as pd
import numpy as np
import re
import bmt_parser.config as cf
import bmt_parser.name_corrections as corr
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler('parse.log')
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
def prepare_disambiguation_file(path, names_in_data, name_replacements=None):
'''
path: path to disamb file
names_in_data: Set of unique names present in the dataset
name_replacements: dict where key=resolved name, and value=replacement
'''
table = pd.read_csv(path, sep=cf.CSV_SEP)
table = table.loc[:, ['Unique Names', 'NameCopy']]
table.rename(columns={'Unique Names': 'found',
'NameCopy': 'resolved'},
inplace=True)
# removing NaN values in found
table = table.loc[table['found'].notnull(), :]
# when no resolved name, replace with found
idx = table['resolved'].isnull()
empty_names = table.loc[idx, 'found']
table.loc[idx, 'resolved'] = empty_names
# corrections
table = add_missing_names(table, names_in_data)
if name_replacements:
counter = 0
for key, value in name_replacements.items():
table['resolved'][table['resolved'] == key] = value
counter += 1
logger.warning('{} names were replaced with values in '
'the provided name_replacement json'.format(counter))
table = fix_names(table)
table = fix_repeated_resolved(table)
table = resolve_initials(table)
return table
def add_missing_names(table, names_in_data):
'''adds names that are in the dataset but not in the disambiguation file
'''
names_in_disamb = set(table.found)
# names in disamb but not in data.
# these might indicate an error in parsing
not_found = names_in_disamb.difference(names_in_data)
percent_format = "{:.1%}".format(len(not_found) / len(names_in_disamb))
logger.warning('{} names in disambiguation ({}) not present in the dataset'
.format(len(not_found), percent_format))
logger.info('\n'.join(sorted(not_found)))
# reverse: names that are present in the dataset but not in the disamb file
not_found = names_in_data.difference(names_in_disamb)
percent_format = "{:.1%}".format(len(not_found) / len(names_in_data))
logger.warning('{} names ({}) not found in the disambiguation file'
.format(len(not_found), percent_format))
logger.info('\n'.join(sorted(not_found)))
# adding missing names to table
missing_names = pd.DataFrame([[name, name] for name in not_found],
columns=['found', 'resolved'])
table = table.append(missing_names, ignore_index=True)
return table
def fix_names(table):
# stripping whitespace
table['resolved'] = pd.Series([s.strip() for s in table['resolved']])
table['found'] = pd.Series([s.strip() for s in table['found']])
# removing strange characters (string terminators, etc)
table['resolved'] = table['resolved'].apply(corr.remove_strange_chars)
table['found'] = table['found'].apply(corr.remove_strange_chars)
# removing years from resolved names
table['resolved_original'] = table['resolved']
table['resolved'] = table['resolved'].apply(corr.strip_year)
# transforming into "Name Surname" for resolved names
table['resolved'] = table['resolved'].apply(corr.order_names)
# getting title (Dr., Prof.) and storing into the table
titles = [corr.get_title_and_rest(name) for name in table['found']]
names = [t[1] for t in titles]
titles = [t[0] for t in titles]
table['titles'] = pd.Series(titles)
# turning all caps into proper form
table['resolved'] = table['resolved'].apply(corr.capitalize)
table['found'] = table['found'].apply(corr.capitalize)
# getting initials from name
initials = [corr.fix_initials(name) if corr.are_initials(name)
else corr.get_initials(name) for name in names]
table['initials_found'] = pd.Series(initials)
table['initials_resolved'] = table['resolved'].apply(corr.get_initials)
table['found_are_initials'] = pd.Series([corr.are_initials(name)
for name in names])
# getting the surname
splits = [name.split(' ') for name in table['found']]
table['surname_found'] = pd.Series([s[len(s) - 1] for s in splits])
return table
def resolve_initials(table):
# getting resolved names initials
tgt = table.loc[~table['found_are_initials'],
['initials_resolved', 'resolved']].itertuples()
tgt = [(t[1], t[2]) for t in tgt]
tgt = list(set(tgt))
counts = {}
for i in tgt:
initials = i[0].upper() # disregarding lowercase letters
try:
counts[initials]['count'] += 1
except KeyError:
counts[initials] = {}
counts[initials]['name'] = i[1]
counts[initials]['count'] = 1
replacements = {c[0]: c[1]['name'] for c in counts.items()
if c[1]['count'] == 1}
# initials that do not have a full name
indices = table.loc[table['found_are_initials'], :].index
for idx in indices:
row = table.iloc[idx, ]
try:
initials = row['initials_found']
logger.warning('replacements found for "{}": "{}"'.format(
initials, replacements[initials]))
except KeyError:
logger.warning('no replacement found for initials "{}"'
.format(initials))
return table
def fix_repeated_resolved(table):
# searching for cases when resolved name is repeated as found name
copy = table.loc[table['resolved'] != table['found'], :]
names = list(copy['found'])
cases = [_ for _ in copy['resolved'] if _ in names]
# replacing those names
replacements = {'resolved': {}}
for resolved in cases:
idx_found = table['found'] == resolved
# should have only one True
if sum(idx_found) != 1:
raise ValueError('duplicated found names!')
idx_found = np.nonzero(idx_found)[0][0]
new_name = table.get_value(index=idx_found, col='resolved')
replacements['resolved'][resolved] = new_name
if replacements['resolved']:
table = table.replace(to_replace=replacements)
return table
def disambiguate_names(original_data, disamb_data):
disamb_data = {row[1]: row[2] for row
in disamb_data.loc[:, ['found', 'resolved']].itertuples()}
for i, row in original_data.iterrows():
if row.authors is not np.nan:
multiple = re.search(cf.AUTHOR_SEP, row.authors)
try:
if multiple:
authors = [corr.capitalize(a.strip()) for a
in row.authors.split(cf.AUTHOR_SEP)]
authors = [disamb_data[author] for author in authors]
original_data.set_value(
i, 'authors', cf.AUTHOR_SEP.join(authors))
else:
author = corr.capitalize(row.authors.strip())
original_data.set_value(i, 'authors',
disamb_data[row.authors])
except KeyError:
logger.warning('author(s) "{}" do not have disambiguations'
.format(row.authors))
return original_data
def main(disamb_path, original_path, name_replacements_path,
disamb_write_path=None):
original_data = | pd.read_csv(original_path, delimiter=cf.CSV_SEP) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.Index(["self", "other"])
expected = pd.DataFrame(
[["a", "x"], ["c", "z"]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
expected = pd.Series(["a", "x", "c", "z"], index=indices)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
s1 = pd.Series(["a", "b", "c"])
s2 = pd.Series(["x", "b", "z"])
result = s1.compare(s2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = | pd.Index(["self", "other"]) | pandas.Index |
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
self.tsframe.ix[:5, 'A'] = nan
self.tsframe.ix[-5:, 'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5, 'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5, 'A']).all())
self.assertTrue((padded.ix[-5:, 'A'] == padded.ix[-5, 'A']).all())
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.ix[-10:, 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0, 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
| pd.Timestamp('2013-01-02') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# # <font color='yellow'>How can we predict not just the hourly PM2.5 concentration at the site of one EPA sensor, but predict the hourly PM2.5 concentration anywhere?</font>
#
# Here, you build a new model for any given hour on any given day. This will leverage readings across all ~120 EPA sensors, as well as weather data, traffic data, purpleair data, and maybe beacon data to create a model that predicts the PM2.5 value at that location.
# In[1]:
import json
import csv
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import geopandas as gpd
import shapely
from shapely.geometry import Point, MultiPoint, Polygon, MultiPolygon
from shapely.affinity import scale
import matplotlib.pyplot as plt
import glob
import os
import datetime
from datetime import timezone
import zipfile
import pickle
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import unittest
import keras
import numpy as np
import pandas as pd
import sklearn
from sklearn import preprocessing
import xrdos
class test_xrdos(unittest.TestCase):
def test_split(self):
data = {'column1': [2, 2, 3], 'column2': [1, 3, 5]}
df = pd.DataFrame(data)
one, two = xrdos.split(df, 1)
assert one[0] == 1
assert two[0] == 2
return
def test_scaling(self):
data = {'column1': [2.0, 2.0, 3.0], 'column2': [1.0, 3.0, 5.0]}
df = pd.DataFrame(data)
df, scaler = xrdos.scaling(df)
assert df.loc[0].iloc[0] == 0
assert df.loc[2].iloc[0] == 1
return
def test_linear_regression(self):
regr = xrdos.linear_regression()
x = np.array([0.5, 1.0, 2.0])
y = np.array([0.5, 1.0, 2.0])
regr.fit(x.reshape(-1, 1), y.reshape(-1, 1))
p = np.array([0.5, 1.0, 2.0]).reshape(-1, 1)
prediction = regr.predict(p)
for i in range(len(prediction)):
assert int(prediction[i]) == int(x[i])
assert isinstance(p, np.ndarray)
return
def test_neural_network(self):
assert isinstance(
xrdos.neural_network(1),
keras.wrappers.scikit_learn.KerasRegressor)
return
def test_split_and_scale(self):
data = {'column1': [2, 2, 3], 'column2': [1, 3, 5]}
df = pd.DataFrame(data)
x, y, z = xrdos.split_and_scale(df, 1, (False, 1, False))
assert x[0] == 1
assert y.iloc[2].iloc[0] == 1
return
def test_polynomialize(self):
data = {'column1': [2, 2, 3], 'column2': [1, 3, 5]}
df = | pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime as dt, logging, numpy, pandas as pd, pyarrow as pa, unittest
import graphistry, graphistry.plotter
from common import NoAuthTestCase
logger = logging.getLogger(__name__)
nid = graphistry.plotter.Plotter._defaultNodeId
triangleNodesDict = {
'id': ['a', 'b', 'c'],
'a1': [1, 2, 3],
'a2': ['red', 'blue', 'green'],
'🙈': ['æski ēˈmōjē', '😋', 's']
}
triangleNodes = pd.DataFrame(triangleNodesDict)
hyper_df = | pd.DataFrame({'aa': [0, 1, 2], 'bb': ['a', 'b', 'c'], 'cc': ['b', 0, 1]}) | pandas.DataFrame |
"""
This code is copied from Philippjfr's notebook:
https://anaconda.org/philippjfr/sankey/notebook
"""
from functools import cmp_to_key
import holoviews as hv
import numpy as np
import pandas as pd
import param
from bokeh.models import Patches
from holoviews import Operation
from holoviews.core.util import basestring, max_range
from holoviews.element.graphs import Graph, Nodes, EdgePaths, Dataset, redim_graph
from holoviews.plotting.bokeh import GraphPlot
class Sankey(Graph):
group = param.String(default='Sankey', constant=True)
def __init__(self, data, kdims=None, vdims=None, compute=True, **params):
if isinstance(data, tuple):
data = data + (None,) * (3 - len(data))
edges, nodes, edgepaths = data
else:
edges, nodes, edgepaths = data, None, None
if nodes is not None:
if not isinstance(nodes, Dataset):
if nodes.ndims == 3:
nodes = Nodes(nodes)
else:
nodes = Dataset(nodes)
nodes = nodes.clone(kdims=nodes.kdims[0],
vdims=nodes.kdims[1:])
node_info = nodes
super(Graph, self).__init__(edges, kdims=kdims, vdims=vdims, **params)
if compute:
self._nodes = nodes
chord = layout_sankey(self)
self._nodes = chord.nodes
self._edgepaths = chord.edgepaths
self._sankey = chord._sankey
else:
if not isinstance(nodes, Nodes):
raise TypeError("Expected Nodes object in data, found %s."
% type(nodes))
self._nodes = nodes
if not isinstance(edgepaths, EdgePaths):
raise TypeError("Expected EdgePaths object in data, found %s."
% type(edgepaths))
self._edgepaths = edgepaths
self._sankey = None
self._validate()
self.redim = redim_graph(self, mode='dataset')
class SankeyPlot(GraphPlot):
label_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
filled = True
_style_groups = dict(GraphPlot._style_groups, quad='nodes', text='label')
_draw_order = ['patches', 'multi_line', 'text', 'quad']
style_opts = GraphPlot.style_opts + ['edge_fill_alpha', 'nodes_line_color', 'label_text_font_size']
def _init_glyphs(self, plot, element, ranges, source):
ret = super(SankeyPlot, self)._init_glyphs(plot, element, ranges, source)
renderer = plot.renderers.pop(plot.renderers.index(self.handles['glyph_renderer']))
plot.renderers = [renderer] + plot.renderers
return ret
def get_data(self, element, ranges, style):
data, mapping, style = super(SankeyPlot, self).get_data(element, ranges, style)
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = data['scatter_1']
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['quad_1'] = quad_data
quad_mapping['fill_color'] = mapping['scatter_1']['node_fill_color']
mapping['quad_1'] = quad_mapping
style['nodes_line_color'] = 'black'
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Chord not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name] > 0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
offset = (nodes[0]['x1'] - nodes[0]['x0']) / 4.
xs = np.array([node['x1'] for node in nodes])
data['text_1'] = dict(x=xs + offset, y=ys, text=[str(l) for l in labels])
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align='left')
return data, mapping, style
def get_extents(self, element, ranges):
"""
A Chord plot is always drawn on a unit circle.
"""
xdim, ydim = element.nodes.kdims[:2]
xpad = .05 if self.label_index is None else 0.25
x0, x1 = ranges[xdim.name]
y0, y1 = ranges[ydim.name]
xdiff = (x1 - x0)
ydiff = (y1 - y0)
x0, x1 = max_range([xdim.range, (x0 - (0.05 * xdiff), x1 + xpad * xdiff)])
y0, y1 = max_range([ydim.range, (y0 - (0.05 * ydiff), y1 + (0.05 * ydiff))])
return (x0, y0, x1, y1)
def _postprocess_hover(self, renderer, source):
if self.inspection_policy == 'edges':
if not isinstance(renderer.glyph, Patches):
return
else:
if isinstance(renderer.glyph, Patches):
return
super(SankeyPlot, self)._postprocess_hover(renderer, source)
def weightedSource(link):
return nodeCenter(link['source']) * link['value']
def weightedTarget(link):
return nodeCenter(link['target']) * link['value']
def nodeCenter(node):
return (node['y0'] + node['y1']) / 2
def ascendingBreadth(a, b):
return int(a['y0'] - b['y0'])
def ascendingSourceBreadth(a, b):
return ascendingBreadth(a['source'], b['source']) | a['index'] - b['index']
def ascendingTargetBreadth(a, b):
return ascendingBreadth(a['target'], b['target']) | a['index'] - b['index']
def quadratic_bezier(start, end, c0=(0, 0), c1=(0, 0), steps=25):
"""
Compute quadratic bezier spline given start and end coordinate and
two control points.
"""
steps = np.linspace(0, 1, steps)
sx, sy = start
ex, ey = end
cx0, cy0 = c0
cx1, cy1 = c1
xs = ((1 - steps) ** 3 * sx + 3 * ((1 - steps) ** 2) * steps * cx0 +
3 * (1 - steps) * steps ** 2 * cx1 + steps ** 3 * ex)
ys = ((1 - steps) ** 3 * sy + 3 * ((1 - steps) ** 2) * steps * cy0 +
3 * (1 - steps) * steps ** 2 * cy1 + steps ** 3 * ey)
return np.column_stack([xs, ys])
class layout_sankey(Operation):
"""
Computes a Sankey diagram from a Graph element.
Adapted from d3-sankey under BSD-3 license.
"""
bounds = param.NumericTuple(default=(0, 0, 1000, 500))
node_width = param.Number(default=15)
node_padding = param.Integer(default=10)
iterations = param.Integer(32)
def _process(self, element, key=None):
graph = {'nodes': [], 'links': []}
self.computeNodeLinks(element, graph)
self.computeNodeValues(graph)
self.computeNodeDepths(graph)
self.computeNodeBreadths(graph)
self.computeLinkBreadths(graph)
paths = []
for link in graph['links']:
source, target = link['source'], link['target']
x0, y0 = source['x1'], link['y0']
x1, y1 = target['x0'], link['y1']
start = np.array([(x0, link['width'] + y0),
(x0, y0)])
src = (x0, y0)
ctr1 = ((x0 + x1) / 2., y0)
ctr2 = ((x0 + x1) / 2., y1)
tgt = (x1, y1)
bottom = quadratic_bezier(src, tgt, ctr1, ctr2)
mid = np.array([(x1, y1),
(x1, y1 + link['width'])])
xmid = (x0 + x1) / 2.
y0 = y0 + link['width']
y1 = y1 + link['width']
src = (x1, y1)
ctr1 = (xmid, y1)
ctr2 = (xmid, y0)
tgt = (x0, y0)
top = quadratic_bezier(src, tgt, ctr1, ctr2)
spline = np.concatenate([start, bottom, mid, top])
paths.append(spline)
node_data = []
for node in graph['nodes']:
node_data.append((np.mean([node['x0'], node['x1']]),
np.mean([node['y0'], node['y1']]),
node['index']) + tuple(node['values']))
nodes = Nodes(node_data, vdims=element.nodes.vdims)
edges = EdgePaths(paths)
sankey = Sankey((element.data, nodes, edges), compute=False)
sankey._sankey = graph
return sankey
def computeNodeLinks(self, element, graph):
"""
Populate the sourceLinks and targetLinks for each node.
Also, if the source and target are not objects, assume they are indices.
"""
index = element.nodes.kdims[-1]
node_map = {}
values = element.nodes.array(element.nodes.vdims)
for node, vals in zip(element.nodes.dimension_values(index), values):
node = {'index': node, 'sourceLinks': [], 'targetLinks': [], 'values': vals}
graph['nodes'].append(node)
node_map[node['index']] = node
links = [element.dimension_values(d) for d in element.dimensions()[:3]]
for i, (src, tgt, value) in enumerate(zip(*links)):
source, target = node_map[src], node_map[tgt]
link = dict(index=i, source=source, target=target, value=value)
graph['links'].append(link)
source['sourceLinks'].append(link)
target['targetLinks'].append(link)
def computeNodeValues(self, graph):
"""
Compute the value (size) of each node by summing the associated links.
"""
for node in graph['nodes']:
source_val = np.sum([l['value'] for l in node['sourceLinks']])
target_val = np.sum([l['value'] for l in node['targetLinks']])
node['value'] = max([source_val, target_val])
def computeNodeDepths(self, graph):
"""
Iteratively assign the depth (x-position) for each node.
Nodes are assigned the maximum depth of incoming neighbors plus one;
nodes with no incoming links are assigned depth zero, while
nodes with no outgoing links are assigned the maximum depth.
"""
nodes = graph['nodes']
depth = 0
while nodes:
next_nodes = []
for node in nodes:
node['depth'] = depth
for link in node['sourceLinks']:
if link['target'] not in next_nodes:
next_nodes.append(link['target'])
nodes = next_nodes
depth += 1
nodes = graph['nodes']
depth = 0
while nodes:
next_nodes = []
for node in nodes:
node['height'] = depth
for link in node['targetLinks']:
if link['source'] not in next_nodes:
next_nodes.append(link['source'])
nodes = next_nodes
depth += 1
x0, _, x1, _ = self.p.bounds
dx = self.p.node_width
kx = (x1 - x0 - dx) / (depth - 1)
for node in graph['nodes']:
d = node['depth'] if node['sourceLinks'] else depth - 1
node['x0'] = x0 + max([0, min([depth - 1, np.floor(d)]) * kx])
node['x1'] = node['x0'] + dx
def computeNodeBreadths(self, graph):
node_map = hv.OrderedDict()
for n in graph['nodes']:
if n['x0'] not in node_map:
node_map[n['x0']] = []
node_map[n['x0']].append(n)
_, y0, _, y1 = self.p.bounds
py = self.p.node_padding
def initializeNodeBreadth():
kys = []
for nodes in node_map.values():
nsum = np.sum([node['value'] for node in nodes])
ky = (y1 - y0 - (len(nodes) - 1) * py) / nsum
kys.append(ky)
ky = np.min(kys)
for nodes in node_map.values():
for i, node in enumerate(nodes):
node['y0'] = i
node['y1'] = i + node['value'] * ky
for link in graph['links']:
link['width'] = link['value'] * ky
def relaxLeftToRight(alpha):
for nodes in node_map.values():
for node in nodes:
if not node['targetLinks']:
continue
weighted = sum([weightedSource(l) for l in node['targetLinks']])
tsum = sum([l['value'] for l in node['targetLinks']])
center = nodeCenter(node)
dy = (weighted / tsum - center) * alpha
node['y0'] += dy
node['y1'] += dy
def relaxRightToLeft(alpha):
for nodes in list(node_map.values())[::-1]:
for node in nodes:
if not node['sourceLinks']:
continue
weighted = sum([weightedTarget(l) for l in node['sourceLinks']])
tsum = sum([l['value'] for l in node['sourceLinks']])
center = nodeCenter(node)
dy = (weighted / tsum - center) * alpha
node['y0'] += dy
node['y1'] += dy
def resolveCollisions():
for nodes in node_map.values():
y = y0
n = len(nodes)
nodes.sort(key=cmp_to_key(ascendingBreadth))
for node in nodes:
dy = y - node['y0']
if dy > 0:
node['y0'] += dy
node['y1'] += dy
y = node['y1'] + py
dy = y - py - y1
if dy > 0:
node['y0'] -= dy
node['y1'] -= dy
y = node['y0']
for node in nodes[:-1][::-1]:
dy = node['y1'] + py - y;
if dy > 0:
node['y0'] -= dy
node['y1'] -= dy
y = node['y0']
initializeNodeBreadth()
resolveCollisions()
alpha = 1
for _ in range(self.p.iterations):
alpha = alpha * 0.99
relaxRightToLeft(alpha)
resolveCollisions()
relaxLeftToRight(alpha)
resolveCollisions()
def computeLinkBreadths(self, graph):
for node in graph['nodes']:
node['sourceLinks'].sort(key=cmp_to_key(ascendingTargetBreadth))
node['targetLinks'].sort(key=cmp_to_key(ascendingSourceBreadth))
for node in graph['nodes']:
y0 = y1 = node['y0']
for link in node['sourceLinks']:
link['y0'] = y0
y0 += link['width']
for link in node['targetLinks']:
link['y1'] = y1
y1 += link['width']
# Register Sankey with holoviews
hv.Store.register({Sankey: SankeyPlot}, 'bokeh')
# Convenience function for adding links
def make_links(df, groups):
"""
Makes links given a set of groups and a dataframe
:param pd.DataFrame df: Input dataframe containing groups
:param list groups: List of groups to link
:return: DataFrame of links
:rtype: pd.DataFrame
"""
links = []
for i in xrange(len(groups) - 1):
links.extend(_add_links(df.groupby(groups[i])[groups[i + 1]].value_counts().iteritems()))
return | pd.DataFrame(links) | pandas.DataFrame |
import numpy
import pandas
import sklearn
import seaborn
import matplotlib.pyplot as plot
from sklearn import datasets
from sklearn import model_selection
from sklearn import pipeline
from sklearn import preprocessing
from sklearn import linear_model
from sklearn import ensemble
from sklearn import metrics
from sklearn import inspection
# load iris into data frame
iris = datasets.load_iris()
df = pandas.DataFrame(iris.data, columns = iris.feature_names)
df['species'] = pandas.Categorical.from_codes(iris.target, iris.target_names)
# show class distribution
print("Number of each target class is ")
print(df.groupby('species').size())
# output features
print(df.head())
# plot distributions
seaborn.pairplot(data = df, hue = 'species').savefig('plots/iris/pairs.pdf')
# create feature and target dataframes
features = df.drop(['species'], axis = 1)
target = | pandas.Series(iris.target) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Notes and tries on Chaper 03 (Think Stats 2, <NAME>)
Self-study on statistics using pyhton
@author: Github: @rafaelmm82
"""
import thinkstats2
import thinkplot
import nsfg
import math
import first
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
pmf= thinkstats2.Pmf([1, 2, 2, 3, 5])
pmf
pmf.Prob(2)
pmf[2]
pmf.Incr(2, 0.2)
pmf.Prob(2)
pmf.Mult(2, 0.5)
pmf
pmf.Total()
pmf.Normalize()
pmf.Total()
pmf
live, firsts, others = first.MakeFrames()
first_pmf = thinkstats2.Pmf(firsts.prglngth, label='first')
other_pmf = thinkstats2.Pmf(others.prglngth, label='other')
width = 0.45
thinkplot.PrePlot(2, cols=2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='weeks',
ylabel='probability',
axis=[27, 46, 0, 0.6])
thinkplot.PrePlot(2)
thinkplot.SubPlot(2)
thinkplot.Pmfs([first_pmf, other_pmf])
thinkplot.Show(xlabel='weeks',
axis=[27, 46, 0, 0.6])
weeks = range(35, 46)
diffs = []
for week in weeks:
p1 = first_pmf.Prob(week)
p2 = other_pmf.Prob(week)
diff = 100 * (p1 - p2)
diffs.append(diff)
thinkplot.Bar(weeks, diffs)
d = {7: 8, 12: 8, 17: 14, 22: 4,
27:6, 32: 12, 37: 8, 42: 3,47: 2}
pmf = thinkstats2.Pmf(d, label='actual')
print('mean', pmf.Mean())
def BiasPmf(pmf, label=''):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, x)
new_pmf.Normalize()
return new_pmf
biased_pmf = BiasPmf(pmf, label='observed')
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, biased_pmf])
thinkplot.Show(xlabel='class size', ylabel='PMF')
def UnbiasPmf(pmf, label):
new_pmf = pmf.Copy(label=label)
for x, p in pmf.Items():
new_pmf.Mult(x, 1.0/x)
new_pmf.Normalize()
return new_pmf
unbiased_pmf = UnbiasPmf(pmf, label='better_values')
thinkplot.PrePlot(2)
thinkplot.Pmfs([pmf, unbiased_pmf])
thinkplot.Show(xlabel='class size', ylabel='PMF')
array = np.random.randn(4, 2)
df = | pd.DataFrame(array) | pandas.DataFrame |
import pandas as pd
from argparse import ArgumentParser
def read_metadata(f):
print(f"reading metadata file {f}...")
df = pd.read_csv(f,sep='\t',low_memory=False)
df['fulldate'] = df['date'].apply(lambda x: "XX" not in str(x))
df = df.query("fulldate == True").copy()
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import os
# Reduce CPU load. Need to perform BEFORE import numpy and some other libraries.
os.environ['MKL_NUM_THREADS'] = '2'
os.environ['OMP_NUM_THREADS'] = '2'
os.environ['NUMEXPR_NUM_THREADS'] = '2'
import gc
import math
import copy
import json
import numpy as np
import pandas as pd
import torch as th
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data_utils
from torch.nn.utils.rnn import pad_sequence
from typing import Optional, Sequence, List, Tuple, Union, Dict
import requests
from tqdm import tqdm
import re
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss
# Setup logging
import logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
log = logging.getLogger('agro')
RANDOM_SEED = 2021
"""
# Общая идея
Эта задача по смыслу сходна с задачей Sentiment Analysis.
То есть, когда тексту в соответствие ставится один или несколько классов,
например: (положительный, негативный, нейтральный)
В данном случае: несколько классов может быть присвоено одновременно (MultiLabel Classification)
Я решил, что для этой цели подойдёт архитектура Transformers.
Точнее, её первая половина: TransformerEncoder.
На вход вместо слов подаётся последовательность эмбедингов (Embeddings).
То есть, каждому слову ставится в соответствие точка в N-мерном пространстве.
Обычно N: от 100 до 300.
Для каждого `embedding` добавляем информацию о положении слова в тексте: `PositionalEncoding`.
Далее несколько слоёв TransformerEncoder обрабатывают всю последовательность сразу,
усиляя одни блоки и ослабляя другие, выделяя, таким образом, важную информацию.
Затем обработанная последовательность сравнивается с некими целевыми эмбедингами (Target Embeddings),
которые описывают то или иное заболевание.
При сравнении вся последовательность сливается в некий единый эмбединг, по одному для каждого класса.
Финальный этап, получившийся набор эмбеддингов (фиксированного размера) пропускается через Linear слой,
чтобы создать вероятности для каждого заболевания.
"""
"""
# Словарь Embeddings для русского языка
Для работы нам потребуются готовые `embeddings` для русских слов.
Есть некоторые доступные для скачивания словари на
[RusVectores](https://rusvectores.org/ru/)
Но размер словарей в них: от 150 до 300 тысяч слов, что довольно мало.
Также, не совсем понятны условия их лицензии.
Есть проект ["Наташа"](https://github.com/natasha/navec).
Размер словаря: 500k слов.
Существует также другой интересный проект:
[DeepPavlov](https://docs.deeppavlov.ai/en/0.0.7/intro/pretrained_vectors.html),
содержащий около 1.5 млн. слов.
Его лицензия: **Apache 2.0** - позволяет как свободное, так и коммерческое использование.
С последним я и буду работать.
Нам потребуется скачать весь словарь, размером 4.14Гб, а затем загрузить его в память.
"""
class GloveModel():
"""
For a given text returns a list of embeddings
"""
Pat_Split_Text = re.compile(r"[\w']+|[.,!?;]", flags=re.RegexFlag.MULTILINE)
Unk_Tag: int = -1
Num_Tag: int = -1
def __init__(self, substitutions: Optional[str] = None, log: Optional[logging.Logger] = None):
if log is None:
log = logging.getLogger()
# Load Glove Model. Download and convert from text to .feather format (which is much faster)
glove_file_feather = 'ft_native_300_ru_wiki_lenta_lower_case.feather'
if not os.path.exists(glove_file_feather):
glove_file_vec = glove_file_feather.rsplit(os.extsep, 1)[0] + '.vec'
if not os.path.exists(glove_file_vec):
log.info('Downloading glove model for russia language from DeepPavlov...')
self.download_file(
'http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/'
'ft_native_300_ru_wiki_lenta_lower_case.vec'
)
log.info('Done')
# Load model from .vec file
log.info('Loading Glove Model from .vec format...')
self.glove = self.load_glove_model(glove_file_vec, size=300)
log.info(f'{len(self.glove)} words loaded!')
log.info('Saving Glove Model to .feather format...')
self.glove.reset_index().to_feather(glove_file_feather)
else:
log.info('Loading Glove Model from .feather format...')
self.glove = pd.read_feather(glove_file_feather)
log.info(f'{len(self.glove)} words loaded!')
log.info('Sorting glove dataframe by words...')
self.glove.sort_values('word', axis=0, ignore_index=True, inplace=True)
log.info('Done')
self.subs_tab = {}
if isinstance(substitutions, str):
for line in substitutions.splitlines():
words = line.strip().lower().split()
if len(words) < 2:
continue
self.subs_tab[words[0]] = words[1:]
log.info(f'Using the substitutions table of {len(self.subs_tab)} records')
"""
Для неизвестных слов я буду использовать embedding слова 'unk'.
А для чисел - embedding слова 'num'.
Я не уверен, что авторы DeepPavlov именно так и планировали.
Но стандартных '<unk>' или '<num>' я там не обнаружил.
"""
self.Unk_Tag = int(self.glove.word.searchsorted('unk'))
self.Num_Tag = int(self.glove.word.searchsorted('num'))
assert self.glove.word[self.Unk_Tag] == 'unk', 'Failed to find "unk" token in Glove'
assert self.glove.word[self.Num_Tag] == 'num', 'Failed to find "num" token in Glove'
def __len__(self):
return len(self.glove)
def __getitem__(self, text: str) -> List[np.ndarray]:
tags = self.text2tags(text, return_offsets=False)
embeddings = [self.tag2embedding(tag) for tag in tags]
return embeddings
@staticmethod
def download_file(url: str, block_size=4096, file_name: Optional[str] = None):
"""Downloads file and saves it to local file, displays progress bar"""
with requests.get(url, stream=True) as response:
if file_name is None:
if 'Content-Disposition' in response.headers.keys():
file_name = re.findall('filename=(.+)', response.headers['Content-Disposition'])[0]
if file_name is None:
file_name = url.split('/')[-1]
expected_size_in_bytes = int(response.headers.get('content-length', 0))
received_size_in_bytes = 0
with tqdm(total=expected_size_in_bytes, unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'wb') as file:
for data in response.iter_content(block_size):
file.write(data)
pbar.update(len(data))
received_size_in_bytes += len(data)
if (expected_size_in_bytes != 0) and (expected_size_in_bytes != received_size_in_bytes):
raise UserWarning(f'Incomplete download: {received_size_in_bytes} of {expected_size_in_bytes}')
@staticmethod
def load_glove_model(file_name: str, encoding: str = 'utf-8', size: Optional[int] = None) -> pd.DataFrame:
"""
Loads glove model from text file into pandas DataFrame
Returns
-------
df : pd.DataFrame
A dataframe with two columns: 'word' and 'embedding'.
The order of words is preserved as in the source file. Thus it may be unsorted!
"""
words, embeddings = [], []
with tqdm(total=os.path.getsize(file_name), unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'r', encoding=encoding) as f:
first_line = True
line = f.readline()
while line:
split_line = line.split()
line = f.readline()
if first_line:
first_line = False
if len(split_line) == 2:
if size is None:
size = int(split_line[1])
else:
assert size == int(split_line[1]), \
f'Size specified at the first line: {int(split_line[1])} does not match: {size}'
continue
if size is not None:
word = ' '.join(split_line[0:-size])
embedding = np.array(split_line[-size:], dtype=np.float32)
assert len(embedding) == size, f'{line}'
else:
word = split_line[0]
embedding = np.array(split_line[1:], dtype=np.float32)
size = len(embedding)
words.append(word)
embeddings.append(embedding)
pbar.update(f.tell() - pbar.n)
return pd.DataFrame({'word': words, 'embedding': embeddings})
def word2tag(self, word: str, use_unk=True, use_num=True) -> int:
tag = self.glove.word.searchsorted(word)
if tag == len(self.glove):
return self.Unk_Tag if use_unk else -1
if self.glove.word[tag] == word:
return int(tag)
if use_num:
try:
num = float(word)
return self.Num_Tag
except ValueError:
pass
return self.Unk_Tag if use_unk else -1
def tag2embedding(self, tag: int) -> np.ndarray:
return self.glove.embedding[tag]
def word2embedding(self, word: str) -> np.ndarray:
tag = self.word2tag(word)
return self.glove.embedding[tag]
@staticmethod
def separate_number_chars(s) -> List[str]:
"""
Does what its name says.
Examples
--------
'october10' -> ['october', '10']
'123asdad' -> ['123', 'asdad']
'-12.3kg' -> ['-12.3', 'kg']
'1aaa2' -> ['1', 'aaa', '2']
"""
res = re.split(r'([-+]?\d+\.\d+)|([-+]?\d+)', s.strip())
res_f = [r.strip() for r in res if r is not None and r.strip() != '']
return res_f
def text2tags(self, text: str, return_offsets=True) -> Union[List[int], Tuple[List[int], List[int]]]:
text = text.lower()
tags = []
offsets = []
for m in self.Pat_Split_Text.finditer(text):
# Get next word and its offset in text
word = m.group(0)
offset = m.start(0)
# Current word can be converted to a list of words due to substitutions: 'Iam' -> ['I', 'am']
# or numbers and letters separations: '123kg' -> ['123', 'kg']
if word in self.subs_tab:
words = self.subs_tab[word]
else:
words = self.separate_number_chars(word)
# Get a list of tags, generated on the source word.
# Note: they all point to the same offset in the original text.
for word in words:
tags.append(self.word2tag(word))
offsets.append(offset)
if not return_offsets:
return tags
return tags, offsets
"""
# Решение проблемы отсутствующих слов
По условиям конкурса:
> Запрещается Использовать ручную *разметку* *тестовых* данных в качестве решения, в т.ч. любые сервисы разметки.
При этом, не вполне ясно определено, что подразумевается под *разметкой* данных.
В любом случае, речь в запрете идёт о **тестовых** данных.
Поэтому, условия конкурса НЕ запрещают мне подготовить словарь для исправления некоторых ошибок,
а также для замены некоторых слов, которые отсутствуют в `embeddings`.
"""
SUBSTITUTIONS = """
цинксодержащие цинк содержащие
проглистогонила дала препарат от глистов
проглистогонил дал препарат от глистов
проглистовать дать препарат от глистов
проглистовали дали препарат от глистов
глистогонить дать препарат от глистов
противогельминтные против глистов
спазган обезболивающий препарат
спазгане обезболивающем препарата
спазганом обезболивающим препаратом
чемерицы рвотный препарат
чемерица рвотный препарат
чемерицей рвотным препаратом
седимин железосодерщащий препарат
левомеколь антисептической мазью
левомиколь антисептическая мазь
левомеколью антисептической мазью
левомиколью антисептической мазью
левомеколем антисептической мазью
левомиколем антисептической мазью
пребиотик пробиотик
пребеотик пробиотик
прибиотик пробиотик
прибеотик пробиотик
прибиотика пробиотик
пробиотика пробиотик
прибеотика пробиотик
пробеотика пробиотик
отел отёл
отелл отёл
оттел отёл
оттелл отёл
отелу отёлу
отеллу отёлу
оттелу отёлу
оттеллу отёлу
отёле родах
отёлл отёл
оттёл отёл
оттёлл отёл
отёллу отёлу
оттёлу отёлу
оттёллу отёлу
оттела отёла
отелла отёла
оттелла отёла
оттёла отёла
отёлла отёла
оттёлла отёла
отёлом отелом
оттелом отелом
отеллом отелом
оттеллом отелом
оттёлом отелом
отёллом отелом
оттёллом отелом
отелы отёлы
отеллы отёлы
оттелы отёлы
оттеллы отёлы
отелов отёлов
отеллов отёлов
оттелов отёлов
оттеллов отёлов
телилась рожала
отелилась родила
отёлилась родила
бурёнке корове
буренке корове
тёлке корове
тёлочке корове
тёлочка телочка
тёлочку корову
укоровы у коровы
телке корове
телки коровы
бычёк бычек
телятки телята
первотелка корова
первотелки коровы
новотельной коровы
коровушки коровы
доим дою
доишь дою
сдаиваю дою
выдаиваю дою
сдаиваем дою
выдаивем дою
додаиваю дою до конца
доились давали молоко
доется доится
выдаивании доении
сцеживал доил
сцеживала доила
доением отбором молока
сдаивание дойка
отпоил напоил
отпоила напоила
отпоили напоили
выпоить напоить
выпоили напоили
пропоить напоить
пропоили напоили
поите давайте пить
поили давали пить
свищик свищ
свищики свищи
гноящийся гнойный
выдрана вырвана
апитит аппетит
аппитит аппетит
апиттит аппетит
апетит аппетит
апеттит аппетит
опетит аппетит
оппетит аппетит
опеттит аппетит
оппеттит аппетит
опитит аппетит
зарастёт зарастет
пощаще почаще
паздбища пастбища
причинай причиной
пречинай причиной
килограм килограмм
килаграм килограмм
килаграмм килограмм
пузатенькая пузатая
абсцез абсцесс
абсцес абсцесс
абсцезс абсцесс
абсцэз абсцесс
абсцэс абсцесс
абсцэзс абсцесс
перестраховываюсь чересчур переживаю
непроходили не проходили
обкололи поставили укол
колили кололи
вколото поставлено
вкалол вколол
кольнул уколол
истыкали прокололи
накосячил ошибся
ветаптеке ветеринарной аптеке
ветаптеки ветеринарной аптеки
ветаптеку ветеринарную аптеку
житкостью жидкостью
рацеоне рационе
худющие худые
здох сдох
скаждым с каждым
четветый четвертый
ожёг ожег
поднятся подняться
захромала начала хромать
искривился стал кривым
расцарапывает царапает
вычесывает чешется
подшатываются шатаются
пошатываются шатаются
ветиринар ветеринар
ветеринат ветеринар
ветеренаров ветеринаров
ветиренаров ветеринаров
ветеренара ветеринара
ветиренара ветеринара
ветеренару ветеринару
ветиренару ветеринару
ветеренаром ветеринаром
ветиренаром ветеринаром
ветеренары ветеринары
ветиренары ветеринары
расслоилось разделилось на слои
разслоилось разделилось на слои
дегтеобразное похожее на деготь
дегтеобразная похожая на деготь
кремообразное похожее на крем
кремообразная похожая на крем
волосики волосы
залысина лысина
облазит линяет
уменя у меня
делоть делать
дилоть делать
дилать делать
зади сзади
взади сзади
взаде сзади
какба как-бы
какбы как-бы
прошупывается прощупывается
прашупывается прощупывается
пращупывается прощупывается
клещь клещ
клешь клещ
клеш клещ
клещь клещ
клещем клещ
клешем клещ
рвотная рвотный
тужится напрягается
тужиться напрягаться
какает испражняется
срет испражняется
срёт испражняется
дрищет испражняется
запоносил начал поносить
дристать поносить
подсохло высохло
нарывать опухать
оттекла отекла
отекшее опухшее
отёкшее опухшее
припух опух
припухло опухло
припухла опухла
опухшая набухшая
апухшая набухшая
вздувает раздувает
воспаленное поврежденное
вспухшие опухшие
расперло опухло
зашибла ушибла
припухлостей шишек
припухлостями шишками
припухлостям шишкам
припухлостях шишках
припушлостям шишкам
покраснений красноты
жидковат жидкий
жидковатый жидкий
жидковато жидко
жиденький жидкий
животина животное
животины животного
животине животному
животиной животным
животиною животным
температурит имеет повышенную температуру
темпиратурит имеет повышенную температуру
тимпературит имеет повышенную температуру
тимпиратурит имеет повышенную температуру
температурить иметь повышенную температуру
темпиратурить иметь повышенную температуру
тимпиратурить иметь повышенную температуру
тимпературить иметь повышенную температуру
покашливает кашляет
подкашливает кашляет
покашливают кашляют
подкашливают кашляют
откашливаются кашляют
покашливал кашлял
подкашливал кашлял
покашливали кашляли
подкашливали кашляли
откашливались кашляли
"""
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe: th.Tensor = th.zeros(max_len, d_model)
position = th.arange(0, max_len, dtype=th.float).unsqueeze(1)
div_term = th.exp(th.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = th.sin(position * div_term)
pe[:, 1::2] = th.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x: th.Tensor, mask: Optional[th.Tensor] = None) -> th.Tensor:
"""
Parameters
----------
x: torch.Tensor (sequence_length, batch_size, d_model)
mask: torch.Tensor (batch_size, sequence_length)
Returns
-------
output: (sequence_length, batch_size, d_model)
"""
pos = self.pe[:x.size(0), :]
x = x + th.permute(mask, (1, 0)).unsqueeze(2).expand_as(x) * pos
return self.dropout(x)
class DoctorText(nn.Module):
"""
PyTorch Model based on sentiment predictions
It uses only Encoder part of the Transformers architecture
"""
def __init__(self,
glove: GloveModel,
d_model: int,
initial_targets: Sequence[str],
num_heads=8,
num_layers=6,
d_hidden=1024,
max_len=5000,
dropout=0.1,
causal_mask=True,
device: Optional[Union[str, th.device]] = None):
super().__init__()
self.glove = glove
self.d_model = d_model
self.num_heads = num_heads
self.num_layers = num_layers
self.num_targets = len(initial_targets)
self.d_hidden = d_hidden
self.max_len = max_len
self.dropout = dropout
self.causal_mask = causal_mask
self.device = device
self.position_encoder = PositionalEncoding(d_model, max_len=max_len)
encoder_layer = nn.TransformerEncoderLayer(d_model, nhead=num_heads, dim_feedforward=d_hidden, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
"""
Our `targets` also have embeddings. One embedding - if target is described by a word,
or multiple - if target is described by a phrase.
We can initialize target embeddings randomly to be trained during training cycle.
But, maybe, we can speed up the learning process if we initialize target embeddings
with their `original meaning`.
We can easily do that by analyzing target description and summing up the
respective embeddings of the words of that description.
Of course, anyway, targets will be changed during the training cycle.
"""
initial_target_embeddings = []
for target_phrase in initial_targets:
target_embeddings = self.glove[target_phrase]
target_embedding = np.mean(np.stack(target_embeddings), axis=0)
initial_target_embeddings.append(target_embedding)
initial_target_embeddings = th.tensor(initial_target_embeddings, dtype=th.float32).unsqueeze(dim=1)
self.targets = nn.Parameter(initial_target_embeddings)
self.collect = nn.MultiheadAttention(embed_dim=d_model, num_heads=num_heads, bias=False, add_bias_kv=False)
self.output = nn.Linear(self.num_targets * d_model, self.num_targets)
def forward(self, texts: List[str], output_spans=False, threshold=0.7):
# Convert batch of texts into tensor of embeddings
x, padding_mask, batch_offsets = self.texts2batch(texts)
# x has shape: (sequence_length, batch_size, d_model)
# padding_mask has shape: (batch_size, sequence_length)
# batch_offsets is the list of length of batch_size, which contains a list of offsets for each tag
# Add positional information into x
x = self.position_encoder.forward(x, mask=padding_mask)
# Initialize self-attention mask, so that words could attend only prior words.
attn_mask = None
if self.causal_mask:
attn_mask = th.full((len(x), len(x)), -math.inf, device=x.device, dtype=x.dtype)
attn_mask = th.triu(attn_mask, diagonal=1)
x = self.transformer_encoder.forward(x, mask=attn_mask, src_key_padding_mask=padding_mask)
# x still has shape (sequence_length, batch_size, d_model)
# Combine source embeddings into one embedding, one for each target
attn_output, attn_weights = self.collect.forward(
query=self.targets.expand((self.num_targets, x.size(1), self.d_model)),
key=x,
value=x,
key_padding_mask=padding_mask,
need_weights=True
)
# attn_output has the shape: (num_targets, batch_size, d_model)
# attn_weights has the shape: (batch_size, num_targets, sequence_length)
attn_output = attn_output.permute((1, 0, 2)).reshape(x.size(1), -1)
# attn_output now has the shape: (batch_size, num_targets * d_model)
output = th.sigmoid(self.output.forward(attn_output))
# output has the shape: (batch_size, num_targets)
if not output_spans:
return output
# Get text symbol spans from the weights of words
batch_weights: np.ndarray = attn_weights.detach().numpy()
batch_weights = np.abs(batch_weights).max(axis=1)
batch_weights = batch_weights - batch_weights.min()
batch_weights = batch_weights / batch_weights.max()
# batch_weights has now shape: (batch_size, sequence_length)
spans = self.weights2spans(texts, batch_offsets, batch_weights, threshold)
return output, spans
def texts2batch(self, texts: List[str]) -> Tuple[th.Tensor, th.Tensor, List[List[int]]]:
# Convert texts to batch of embeddings and padding masks
batch_sequences = []
batch_offsets = []
batch_lengths = []
max_len = 0
for text in texts:
tags, offsets = self.glove.text2tags(text, return_offsets=True)
sequence = [self.glove.tag2embedding(tag) for tag in tags]
sequence = th.tensor(sequence, dtype=th.float32, device=self.device)
batch_sequences.append(sequence)
batch_offsets.append(offsets)
batch_lengths.append(len(tags))
if max_len < len(tags):
max_len = len(tags)
x = pad_sequence(batch_sequences)
# Create padding mask to zero out padded values for each sequence
padding_mask = []
for length in batch_lengths:
mask = th.ones(max_len, dtype=th.float32, device=self.device)
mask[length:] = 0.0
padding_mask.append(mask)
padding_mask = th.stack(padding_mask)
return x, padding_mask, batch_offsets
@staticmethod
def weights2spans(texts: List[str],
batch_offsets: List[List[int]],
batch_weights: np.ndarray,
threshold=0.75
) -> List[List[List[int]]]:
# Get input words weight
batch_spans = []
for text, offsets, weights in zip(texts, batch_offsets, batch_weights):
spans = []
st, en = None, None
for i, w in enumerate(weights):
if i >= len(offsets):
break
if (en is not None) and (en == offsets[i]):
continue
if w < threshold:
if st is not None:
spans.append([st, offsets[i] - 1])
st, en = None, None
continue
if st is None:
st = offsets[i]
en = offsets[i]
if st is not None:
spans.append([st, len(text) - 1])
batch_spans.append(spans)
return batch_spans
@th.no_grad()
def predict(self, text: str, output_spans=True, threshold=0.75) -> Tuple[np.ndarray, List[List[int]]]:
if output_spans:
output, spans = self.forward([text], output_spans=output_spans, threshold=threshold)
return output.detach().squeeze(0).numpy(), spans[0]
output = self.forward([text], output_spans=False)
return output.detach().squeeze(0).numpy()
class CosineWarmupScheduler(optim.lr_scheduler._LRScheduler):
"""
This scheduler starts from zero, reaches maximum after `warmup` iterations,
then slowly decays for `max_iters` iterations.
It helps to 'ignite' the Transformer learning process.
"""
def __init__(self, optimizer: optim.Optimizer, warmup: int, max_iters: int):
self.warmup = warmup
self.max_num_iters = max_iters
super().__init__(optimizer)
def get_lr(self):
lr_factor = self.get_lr_factor(epoch=self.last_epoch)
return [base_lr * lr_factor for base_lr in self.base_lrs]
def get_lr_factor(self, epoch):
lr_factor = 0.5 * (1 + np.cos(np.pi * epoch / self.max_num_iters))
if epoch <= self.warmup:
lr_factor *= epoch * 1.0 / self.warmup
return lr_factor
def __repr__(self):
return f'{type(self).__name__}(warmup={self.warmup}, max_iters={self.max_num_iters})'
def perform_repair_parameters(param_groups):
"""Check and replace zero, NaN or inf parameters with random values"""
for group in param_groups:
for param in group['params']:
if isinstance(param, th.Tensor):
index = ((param.data != param.data) + (param.data == 0) +
(param.data == np.inf) + (param.data == -np.inf))
n = index.sum()
if n > 0:
param.data[index] = np.random.randn() / param.nelement()
index = ((param.data < -1e+10) + (param.data > 1e+10))
n = index.sum()
if n > 0:
param.data.clamp_(min=-1e+10, max=1e+10)
class TorchWrapper(data_utils.Dataset):
"""Convert AgroCode train DataFrame into PyTorch compatible dataset"""
def __init__(self, ds: pd.DataFrame, device: Optional[Union[str, th.device]] = None):
self.device = device
self.X = ds[ds.columns[1]]
self.Y = ds[ds.columns[2:]]
def __len__(self):
return len(self.X)
def __getitem__(self, index):
x = self.X.iloc[index]
y = self.Y.iloc[index]
return x, th.tensor(y.to_numpy(), device=self.device, dtype=th.float32)
def log_loss_score(prediction, ground_truth):
log_loss_ = 0
ground_truth = np.array(ground_truth)
for i in range(10):
log_loss_ += log_loss(ground_truth[:, i], prediction[:, i])
return log_loss_ / 10
def evaluate(model: nn.Module, dataset, batch_size=20) -> float:
model.eval()
index = np.arange(len(dataset))
prediction = []
ground_truth = []
i = 0
while i < len(dataset):
# Get test batch
e = min(i + batch_size, len(dataset))
x, y = dataset[index[i:e]]
y_hat = model.forward(x)
prediction.append(y_hat.detach().numpy())
ground_truth.append(y.numpy())
i += batch_size
prediction = np.concatenate(prediction, axis=0)
ground_truth = np.concatenate(ground_truth, axis=0)
return 1 - log_loss_score(prediction=prediction, ground_truth=ground_truth)
def train(model: nn.Module,
criterion: nn.Module,
optimizer: optim.Optimizer,
model_file_name: str,
ds_train,
ds_valid,
batch_size=20,
max_epochs=50,
repair_parameters=True,
early_stopping=True,
tolerance=1e-5,
patience=20,
rng: Optional[np.random.Generator] = None,
log: Optional[logging.Logger] = None
):
if rng is None:
rng = np.random.default_rng()
if log is None:
log = logging.getLogger()
# Get number of train and test samples, batch_size
n_train_samples, n_test_samples = len(ds_train), len(ds_valid)
train_indexes, test_indexes = np.arange(n_train_samples), np.arange(n_test_samples)
batch_size = int(np.clip(batch_size, 1, min(n_train_samples, n_test_samples)))
n_train_batches = int(np.ceil(n_train_samples / batch_size))
n_test_batches = int(np.ceil(n_test_samples / batch_size))
# To keep best parameters
best_test_loss: Optional[float] = None
best_parameters: Optional[Dict] = None
no_improvement_count = 0
n_epoch = max_epochs
n_iter = 0
scheduler = CosineWarmupScheduler(optimizer=optimizer, warmup=25, max_iters=max_epochs * n_train_batches)
log.info(f'Initialized scheduler:\n{repr(scheduler)}')
try:
# Iterate over epochs
while n_epoch < max_epochs:
# Shuffle train indexes if needed
train_indexes = rng.permutation(train_indexes)
# Reset train and test epoch indexes and registers
train_index, test_index = 0, 0
train_batch, test_batch = 0, 0
accumulated_train_loss, accumulated_test_loss = 0, 0
# Force clear unused memory
gc.collect()
# Iterate over batches in train and test datasets
with tqdm.tqdm(total=(n_train_batches + n_test_batches), ncols=80) as pbar:
while (train_index < n_train_samples) or (test_index < n_test_samples):
# Choose training or testing on this iteration
if (test_index / n_test_samples) < (train_index / n_train_samples):
# Perform testing:
model.eval()
criterion.eval()
# Get test batch
x, y = ds_valid[test_indexes[test_index:test_index + batch_size]]
# Predict
y_hat = model.forward(x)
# Calculate overall test loss
loss = criterion(y_hat, y)
loss_scalar = loss.detach().item()
accumulated_test_loss += loss_scalar
# Increment test iteration counter
test_index = test_index + len(x)
test_batch = int(np.ceil(min(n_test_samples, (test_index - 1)) / batch_size))
else:
# Perform training:
model.train()
criterion.train()
# Get next batch inputs x and targets y
x, y = ds_train[train_indexes[train_index:train_index + batch_size]]
# Pass x through model and get predictions y_hat
y_hat = model.forward(x)
# Calculate overall train loss
loss = criterion(y_hat, y)
loss_scalar = loss.detach().item()
accumulated_train_loss += loss_scalar
# Update network weights
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Check and fix broken parameters if any
if repair_parameters:
perform_repair_parameters(optimizer.param_groups)
# Update learning_rate if needed
if scheduler is not None:
scheduler.step()
# Increment train iteration counter
train_index = train_index + len(x)
train_batch = int(np.ceil(min(n_train_samples, (train_index - 1)) / batch_size))
n_iter += 1
pbar.update(train_batch + test_batch - pbar.n)
# Compute mean train and test loss for epoch
train_loss = accumulated_train_loss * batch_size / n_train_samples
test_loss = accumulated_test_loss * batch_size / n_test_samples
# Compute performance
performance = evaluate(model, ds_valid, batch_size)
# Increment epoch counter
n_epoch += 1
# Print epoch results
log.info(
f'Epoch: {n_epoch}/{max_epochs}, '
f'iter: {n_iter}, '
f'lr: {scheduler.get_lr()[0]:g}, '
f'train: {train_loss:g}, '
f'test: {test_loss:g}, '
f'perf: {performance:g}'
)
# Check for new best result
if (best_test_loss is None) or (best_test_loss > test_loss + tolerance):
# Save current best parameters
best_parameters = copy.deepcopy(model.state_dict())
log.info(f'Saving model to {model_file_name}')
th.save(best_parameters, model_file_name)
no_improvement_count = 0
best_test_loss = test_loss
else:
no_improvement_count += 1
# Check for early stopping
if early_stopping and (no_improvement_count > patience):
log.info(
f'Test score did not improve more than tolerance={tolerance} '
f'for {patience} consecutive epochs. Stopping.'
)
break
log.info('Finished training')
except StopIteration:
log.info('Training was stopped.')
except KeyboardInterrupt:
log.info('Training was interrupted by user.')
except InterruptedError:
log.info('Training was interrupted by system.')
def main():
glove = GloveModel(substitutions=SUBSTITUTIONS, log=log)
log.info('Loading train and test datasets...')
train = pd.read_csv('train.csv')
test = | pd.read_csv('test.csv') | pandas.read_csv |
from copy import deepcopy
import elasticsearch
import pandas as pd
from suricate.base import ConnectorMixin
from suricate.dftransformers.cartesian import cartesian_join
import numpy as np
import time
ixname = 'ix'
ixnamesource = 'ix_source'
ixnametarget = 'ix_target'
ixname_pairs = [ixnamesource, ixnametarget]
class EsConnector(ConnectorMixin):
"""
Elastic Search Connector for the suricate project
Attributes:
client (elasticsearch.Elasticsearch):
ixname (str): this is the name of the index column in output dataframes. The unique identifier is taken from the id in elastic search
source_suffix:
"""
def __init__(self, client, index, scoreplan, doc_type='_doc', size=30, explain=True,
ixname='ix', source_suffix='source', target_suffix='target',
es_id='es_id', es_score='es_score', suffix_score='es', es_rank='es_rank'):
"""
Args:
client (elasticsearch.Elasticsearch): elastic search client
index (str): name of the ES index to search (from GET _cat/indices)
doc_type (str): the name of the document type in the ES database
scoreplan (dict): list of matches to have (see above)
size (int): max number of hits from ES
explain (bool): get detailed scores
ixname (str): default 'ix', index name (in the sense of unique identified of record)
source_suffix (str): 'left'
target_suffix (str): 'rigth;
es_score (str): 'es_score'
es_id (str): 'es_id'
suffix_score (str): 'es'
es_rank (str):'es_rank'
"""
ConnectorMixin.__init__(self, ixname=ixname, source_suffix=source_suffix, target_suffix=target_suffix)
self.client = client
assert isinstance(self.client, elasticsearch.client.Elasticsearch)
self.index = index
self.doc_type = doc_type
self.scoreplan = scoreplan
self.size = size
self.explain = explain
self.es_score = es_score
self.es_rank = es_rank
self.es_id = es_id
self.suffix_score = suffix_score
self.usecols = list(self.scoreplan.keys())
self.outcols = [self.es_score, self.es_rank]
if self.explain is True:
self.outcols += [c + '_' + self.suffix_score for c in self.usecols]
def fetch_source(self, X, ix):
"""
Args:
X: input data (left)
ix (pd.Index): index of the records to be passed on
Returns:
pd.DataFrame formatted records
"""
return X.loc[ix]
def fetch_target(self, ix=None, X=None):
"""
Args:
X: dummy, input data to be given to the connector
ix (pd.Index): index of the records to be passed on
Returns:
pd.DataFrame: formatted records ['ix': '....']
"""
results={}
if isinstance(ix, np.ndarray):
ixl = ix
elif isinstance(ix, pd.Index):
ixl = ix.values
for i in ixl:
assert isinstance(self.client, elasticsearch.client.Elasticsearch)
res = self.client.get(index=self.index, id=i, doc_type=self.doc_type)
if res['found'] is False:
raise IndexError(
'id: {} not found in ES Index {} for doc_type {}'.format(i, self.index, self.doc_type)
)
else:
data = res['_source']
results[i] = data
X = pd.DataFrame.from_dict(data=results, orient='index')
X.index.name = self.ixname
# If we have a duplicate column ix
if self.ixname in X.columns:
X.drop(labels=[self.ixname], axis=1, inplace=True)
return X
def getsbs(self, X, on_ix=None):
"""
Args:
X (pd.DataFrame): input data (source)
on_ix (pd.MultiIndex):
Returns:
pd.DataFrame
"""
ix_source = np.unique(on_ix.get_level_values(self.ixnamesource))
ix_target = np.unique(on_ix.get_level_values(self.ixnametarget))
source = self.fetch_source(X=X, ix=ix_source)
target = self.fetch_target(ix=ix_target)
df = cartesian_join(source=source, target=target, on_ix=on_ix, ixname=self.ixname, source_suffix=self.source_suffix, target_suffix=self.target_suffix)
return df
def fit(self, X=None, y=None):
"""
Dummy transformer
Args:
X:
y:
Returns:
EsConnector
"""
return self
def transform(self, X):
"""
Args:
X (pd.DataFrame): source data
Returns:
pd.DataFrame: X_score ({['ix_source', 'ix_target'}: 'es_score', 'es_rank'])
"""
alldata = pd.DataFrame(columns=[self.ixnamesource, self.ixnametarget, self.es_score, self.es_rank])
for lix in X.index:
record = X.loc[lix]
res = self.search_record(record)
score = unpack_allhits(res)
df = | pd.DataFrame.from_dict(score, orient='columns') | pandas.DataFrame.from_dict |
## 1. Recap ##
import pandas as pd
loans = pd.read_csv("cleaned_loans_2007.csv")
print(loans.info())
## 3. Picking an error metric ##
import pandas as pd
# False positives.
fp_filter = (predictions == 1) & (loans["loan_status"] == 0)
fp = len(predictions[fp_filter])
# True positives.
tp_filter = (predictions == 1) & (loans["loan_status"] == 1)
tp = len(predictions[tp_filter])
# False negatives.
fn_filter = (predictions == 0) & (loans["loan_status"] == 1)
fn = len(predictions[fn_filter])
# True negatives
tn_filter = (predictions == 0) & (loans["loan_status"] == 0)
tn = len(predictions[tn_filter])
## 5. Class imbalance ##
import pandas as pd
import numpy
# Predict that all loans will be paid off on time.
predictions = pd.Series(numpy.ones(loans.shape[0]))
# False positives.
fp_filter = (predictions == 1) & (loans["loan_status"] == 0)
fp = len(predictions[fp_filter])
# True positives.
tp_filter = (predictions == 1) & (loans["loan_status"] == 1)
tp = len(predictions[tp_filter])
# False negatives.
fn_filter = (predictions == 0) & (loans["loan_status"] == 1)
fn = len(predictions[fn_filter])
# True negatives
tn_filter = (predictions == 0) & (loans["loan_status"] == 0)
tn = len(predictions[tn_filter])
# Rates
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
print(tpr)
print(fpr)
## 6. Logistic Regression ##
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
cols = loans.columns
train_cols = cols.drop("loan_status")
features = loans[train_cols]
target = loans["loan_status"]
lr.fit(features, target)
predictions = lr.predict(features)
## 7. Cross Validation ##
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
lr = LogisticRegression()
predictions = cross_val_predict(lr, features, target, cv=3)
predictions = pd.Series(predictions)
# False positives.
fp_filter = (predictions == 1) & (loans["loan_status"] == 0)
fp = len(predictions[fp_filter])
# True positives.
tp_filter = (predictions == 1) & (loans["loan_status"] == 1)
tp = len(predictions[tp_filter])
# False negatives.
fn_filter = (predictions == 0) & (loans["loan_status"] == 1)
fn = len(predictions[fn_filter])
# True negatives
tn_filter = (predictions == 0) & (loans["loan_status"] == 0)
tn = len(predictions[tn_filter])
# Rates
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
print(tpr)
print(fpr)
## 9. Penalizing the classifier ##
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
lr = LogisticRegression(class_weight="balanced")
predictions = cross_val_predict(lr, features, target, cv=3)
predictions = pd.Series(predictions)
# False positives.
fp_filter = (predictions == 1) & (loans["loan_status"] == 0)
fp = len(predictions[fp_filter])
# True positives.
tp_filter = (predictions == 1) & (loans["loan_status"] == 1)
tp = len(predictions[tp_filter])
# False negatives.
fn_filter = (predictions == 0) & (loans["loan_status"] == 1)
fn = len(predictions[fn_filter])
# True negatives
tn_filter = (predictions == 0) & (loans["loan_status"] == 0)
tn = len(predictions[tn_filter])
# Rates
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
print(tpr)
print(fpr)
## 10. Manual penalties ##
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
penalty = {
0: 10,
1: 1
}
lr = LogisticRegression(class_weight=penalty)
predictions = cross_val_predict(lr, features, target, cv=3)
predictions = | pd.Series(predictions) | pandas.Series |
#!/usr/bin/env python3
"""
dataframe_utils.py
Utilities for pd.DataFrame manipulation for ipy notebooks.
"""
import errno
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def ensure_exists(path):
"""Ensure the path exists; if not, make the directory."""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def read_spectrum(file_path):
"""Returns an ISO spectrum (wave, flux, etc.) from a pickle."""
spectrum = pd.read_pickle(file_path)
wave = spectrum['wavelength']
flux = spectrum['flux']
try:
fluxerr = spectrum['uncertainty']
except Exception:
pass
try:
fluxerr = spectrum['error (RMS+SYS)']
except Exception:
pass
spectrum_dict = {
'wave': wave,
'flux': flux,
'fluxerr': fluxerr,
}
return spectrum_dict
def read_metadata(metadata_df, index):
"""Returns a dictionary of useful metadata."""
file_path = metadata_df['file_path'].values[index]
base_name_pkl = file_path.split('/')[-1]
metadata_dict = {
'base_name_pkl': base_name_pkl,
'classifier': metadata_df['full_classifier'].values[index],
'obj_type': metadata_df['object_type'].values[index],
'obj_name': metadata_df['object_name'].values[index]
}
return metadata_dict
def plot_spectrum(spectrum_dict, metadata_dict, pdfpages=None,
save_dir='step5_cull_suspect/plots/',
verbose=False, **kwargs):
"""Save a PDF of a spectrum, needs a spectrum dict and metadata dict."""
# Needed source properties.
base_name_pkl = metadata_dict['base_name_pkl']
classifier = metadata_dict['classifier']
obj_type = metadata_dict['obj_type']
obj_name = metadata_dict['obj_name']
# Plot relevant quantities/labels.
fig, ax = plt.subplots()
ax.plot(spectrum_dict['wave'], spectrum_dict['flux'], **kwargs)
ax.set_title(classifier + ' - ' + base_name_pkl)
ax.text(0.05, 0.95, obj_type, transform=ax.transAxes,
color='red', ha='left')
ax.text(0.95, 0.95, obj_name, transform=ax.transAxes,
color='red', ha='right')
# Save to PDF.
ensure_exists(save_dir)
savename = base_name_pkl.replace('.pkl', '.pdf')
if pdfpages is not None:
pdfpages.savefig(fig, bbox_inches='tight')
plt.close()
else:
fig.savefig(save_dir + savename, format='pdf', bbox_inches='tight')
plt.close()
fig.clear()
if verbose:
print('Saved: ', save_dir + savename)
return
def plot_dataframe(dataframe, save_dir, **kwargs):
"""Saves to PDF all spectra in a DataFrame."""
# Iterate over the rows of the given input dataframe..
for index, data_ok in enumerate(dataframe['data_ok']):
if not data_ok:
continue
# Read in spectrum and metadata for given row.
file_path = dataframe['file_path'].values[index]
spectrum_dict = read_spectrum('../../' + file_path)
metadata_dict = read_metadata(dataframe, index)
# Save spectrum as a PDF.
plot_spectrum(spectrum_dict, metadata_dict,
save_dir=save_dir, **kwargs)
return
def convert_fits_to_pickle(path, verify_pickle=False, verbose=False):
"""Full conversion from ISO-SWS <filename.fits to <filename>.pkl,
which contains a pd.DataFrame.
Args:
path (str): Path to <filename>.fits file (of an ISO-SWS observation).
verify_pickle (bool): Confirm the pickle was succesful created; does so
by comparing the pd.DataFrame before and after writing the pickle.
Returns:
True if successful.
Note:
DataFrame can be retrieved from the pickle by, e.g.,
df = pd.read_pickle(pickle_path).
"""
if verbose:
print('Pickling: ', path)
# Convert .fits file to pandas DataFrame, header.Header object.
try:
df, header = isosws_fits_to_dataframe(path)
except Exception as e:
raise(e)
# Determine the pickle_path to save to. Being explicit here to
# 'pickle_path' is clear.
base_filename = path.replace('.fit', '.pkl').split('/')[-1]
# Save the dataframe to a pickle.
pickle_path = 'spectra/' + base_filename
df.to_pickle(pickle_path)
if verbose:
print('...saved: ', pickle_path)
# Test dataframes for equality before/after pickling if
# verify_pickle == True.
if verify_pickle:
tmp_df = | pd.read_pickle(pickle_path) | pandas.read_pickle |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.