repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
biosustain/cameo-notebooks
|
Advanced-SynBio-for-Cell-Factories-Course/Flux Balance Analysis.ipynb
|
apache-2.0
|
import pandas
pandas.options.display.max_rows = 12
import escher
from cameo import models, fba
from cameo.exceptions import Infeasible
"""
Explanation: Flux Balance Analysis
Load a few packages and functions.
End of explanation
"""
model = models.bigg.e_coli_core.copy()
print(model.objective)
"""
Explanation: Predict maximum growth rate and metabolic fluxes of wildtype E. coli.
First we load a model from the BiGG database (and make a copy of it).
End of explanation
"""
result = fba(model)
"""
Explanation: Run flux balance analysis.
End of explanation
"""
result.objective_value
"""
Explanation: The predicted growth rate is $0.87 \ h^{-1}$.
End of explanation
"""
result.data_frame
result.data_frame.describe()
active_fluxes = result.data_frame[result.data_frame.flux != 0].sort_values(by='flux')
active_fluxes
model.reactions.ATPS4r
import escher
import jupyter
escher.__version__
!pip show jupyter
escher.Builder('e_coli_core.Core metabolism', reaction_data=active_fluxes.flux.to_dict()).display_in_notebook()
result.display_on_map('e_coli_core.Core metabolism')
"""
Explanation: Take a look at the predicted metabolic fluxes.
End of explanation
"""
gene_essentiality = {}
for gene in model.genes:
mutant = model.copy()
mutant.genes.get_by_id(gene.id).knock_out()
try:
print(gene, fba(mutant).objective_value)
gene_essentiality[gene] = fba(mutant).objective_value
except Infeasible:
print(gene, 0)
gene_essentiality[gene] = 0
"""
Explanation: Exercise
Try a few different objectives.
Assess the effects of gene deletions.
Hint: a gene can be knocked out by running model.genes.get_by_id('<geneID>').knock_out() where <geneID> needs to be replace with an actual gene identifier.
Solutions
End of explanation
"""
|
tommyogden/maxwellbloch
|
docs/examples/mbs-two-sech-2pi.ipynb
|
mit
|
import numpy as np
SECH_FWHM_CONV = 1./2.6339157938
t_width = 1.0*SECH_FWHM_CONV # [τ]
print('t_width', t_width)
mb_solve_json = """
{
"atom": {
"fields": [
{
"coupled_levels": [[0, 1]],
"rabi_freq_t_args": {
"n_pi": 2.0,
"centre": 0.0,
"width": %f
},
"rabi_freq_t_func": "sech"
}
],
"num_states": 2
},
"t_min": -2.0,
"t_max": 10.0,
"t_steps": 120,
"z_min": -0.5,
"z_max": 1.5,
"z_steps": 100,
"interaction_strengths": [
10.0
],
"savefile": "mbs-two-sech-2pi"
}
"""%(t_width)
from maxwellbloch import mb_solve
mb_solve_00 = mb_solve.MBSolve().from_json_str(mb_solve_json)
"""
Explanation: Two-Level: Sech Pulse 2π — Self-Induced Transparency
Define the Problem
First we need to define a sech pulse with the area we want. We'll fix the width of the pulse and the area to find the right amplitude.
The full-width at half maximum (FWHM) $t_s$ of the sech pulse is related to the FWHM of a Gaussian by a factor of $1/2.6339157938$. (See §3.2.2 of my PhD thesis).
End of explanation
"""
print('The input pulse area is {0}'.format(np.trapz(mb_solve_00.Omegas_zt[0,0,:].real,
mb_solve_00.tlist)/np.pi))
"""
Explanation: We'll just check that the pulse area is what we want.
End of explanation
"""
%time Omegas_zt, states_zt = mb_solve_00.mbsolve(recalc=True)
"""
Explanation: Solve the Problem
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import numpy as np
sns.set_style("darkgrid")
fig = plt.figure(1, figsize=(16, 6))
ax = fig.add_subplot(111)
cmap_range = np.linspace(0.0, 1.0, 11)
cf = ax.contourf(mb_solve_00.tlist, mb_solve_00.zlist,
np.abs(mb_solve_00.Omegas_zt[0]/(2*np.pi)),
cmap_range, cmap=plt.cm.Blues)
ax.set_title('Rabi Frequency ($\Gamma / 2\pi $)')
ax.set_xlabel('Time ($1/\Gamma$)')
ax.set_ylabel('Distance ($L$)')
for y in [0.0, 1.0]:
ax.axhline(y, c='grey', lw=1.0, ls='dotted')
plt.colorbar(cf);
fig, ax = plt.subplots(figsize=(16, 4))
ax.plot(mb_solve_00.zlist, mb_solve_00.fields_area()[0]/np.pi, clip_on=False)
ax.set_ylim([0.0, 8.0])
ax.set_xlabel('Distance ($L$)')
ax.set_ylabel('Pulse Area ($\pi$)');
"""
Explanation: Plot Output
End of explanation
"""
# C = 0.1 # speed of light
# Y_MIN = 0.0 # Y-axis min
# Y_MAX = 4.0 # y-axis max
# ZOOM = 2 # level of linear interpolation
# FPS = 30 # frames per second
# ATOMS_ALPHA = 0.2 # Atom indicator transparency
# FNAME = "images/mb-solve-two-sech-2pi"
# FNAME_JSON = FNAME + '.json'
# with open(FNAME_JSON, "w") as f:
# f.write(mb_solve_json)
# !make-mp4-fixed-frame.py -f $FNAME_JSON -c $C --fps $FPS --y-min $Y_MIN --y-max $Y_MAX \
# --zoom $ZOOM --atoms-alpha $ATOMS_ALPHA #--peak-line --c-line
# FNAME_MP4 = FNAME + '.mp4'
# !make-gif-ffmpeg.sh -f $FNAME_MP4 --in-fps $FPS
# from IPython.display import Image
# Image(url=FNAME_MP4 +'.gif', format='gif')
"""
Explanation: Analysis
The $2 \pi$ sech pulse passes through, slowed but with shape unaltered. This is self-induced transparency.
Movie
End of explanation
"""
|
ThunderShiviah/code_guild
|
interactive-coding-challenges/arrays_strings/reverse_string/reverse_string_challenge-Copy1.ipynb
|
mit
|
def list_of_chars(list_chars):
# TODO: Implement me
if li
return list_chars[::-1]
"""
Explanation: <small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small>
Challenge Notebook
Problem: Implement a function to reverse a string (a list of characters), in-place.
Constraints
Test Cases
Algorithm
Code
Unit Test
Solution Notebook
Constraints
Can I assume the string is ASCII?
Yes
Note: Unicode strings could require special handling depending on your language
Since we need to do this in-place, it seems we cannot use the slice operator or the reversed function?
Correct
Since Python string are immutable, can I use a list of characters instead?
Yes
Test Cases
None -> None
[''] -> ['']
['f', 'o', 'o', ' ', 'b', 'a', 'r'] -> ['r', 'a', 'b', ' ', 'o', 'o', 'f']
Algorithm
Refer to the Solution Notebook. If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
Code
End of explanation
"""
# %load test_reverse_string.py
from nose.tools import assert_equal
class TestReverse(object):
def test_reverse(self):
assert_equal(list_of_chars(None), None)
assert_equal(list_of_chars(['']), [''])
assert_equal(list_of_chars(
['f', 'o', 'o', ' ', 'b', 'a', 'r']),
['r', 'a', 'b', ' ', 'o', 'o', 'f'])
print('Success: test_reverse')
def main():
test = TestReverse()
test.test_reverse()
if __name__ == '__main__':
main()
"""
Explanation: Unit Test
The following unit test is expected to fail until you solve the challenge.
End of explanation
"""
|
ioam/scipy-2017-holoviews-tutorial
|
notebooks/01-introduction-to-elements.ipynb
|
bsd-3-clause
|
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh')
"""
Explanation: <a href='http://www.holoviews.org'><img src="assets/hv+bk.png" alt="HV+BK logos" width="40%;" align="left"/></a>
<div style="float:right;"><h2>01. Introduction to Elements</h2></div>
Preliminaries
If the hvtutorial environment has been correctly created and activated using the instructions listed on the welcome page, the following imports should run and hv.extension('bokeh') should present a small HoloViews logo:
End of explanation
"""
xs = [i for i in range(-10,11)]
ys = [100-(x**2) for x in xs]
simple_curve = hv.Curve((xs,ys))
simple_curve
"""
Explanation: Here we import the NumPy and pandas data libraries with their standard abbreviations, plus HoloViews with its standard abbreviation hv. The line reading hv.extension('bokeh') loads and activates the bokeh plotting backend, so all visualizations will be generated using Bokeh. We will see how to use matplotlib instead of bokeh later in the tutorial Customizing Visual Appearance.
What are elements?
In short, elements are HoloViews' most basic, core primitives. All the various types of hv.Element accept semantic metadata that allows their input data to be given an automatic, visual representation. Most importantly, element objects always preserve the raw data they are supplied.
In this notebook we will explore a number of different element types and examine some of the ways that elements can supplement the supplied data with useful semantic data. To choose your own types to use in the exercises, you can browse them all in the reference gallery.
Creating elements
All basic elements accept their data as a single, mandatory positional argument which may be supplied in a number of different formats, some of which we will now examine. A handful of annotation elements are exceptions to this rule, namely Arrow, Text, Bounds, Box and Ellipse, as they require additional positional arguments.
A simple curve
To start with a simple example, we will sample a quadratic function $y=100-x^2$ at 21 different values of $x$ and wrap that data in a HoloViews element:
End of explanation
"""
print(simple_curve)
"""
Explanation: Here we supplied two lists of values as a tuple to [hv.Curve]((http://build.holoviews.org/reference/elements/bokeh/Curve.html), assigned the result to the attribute simple_curve, and let Jupyter display the object using its default visual representation. As you can see, that default visual representation is a Bokeh plot, which is automatically generated by HoloViews when Jupyter requests it. But simple_curve itself is just a wrapper around your data, not a plot, and you can choose other representations that are not plots. For instance, printing the object will give you a purely textual representation instead:
End of explanation
"""
#simple_curve.data
"""
Explanation: The textual representation indicates that this object is a continuous mapping from x to y, which is how HoloViews knew to render it as a continuous curve. You can also access the full original data if you wish:
End of explanation
"""
# Exercise: Try switching hv.Curve with hv.Area and hv.Scatter
# Optional:
# Look at the .data attribute of the elements you created to see the raw data (as a pandas DataFrame)
"""
Explanation: If you uncomment that line, you should see the original data values, though in some cases like this one the data has been converted to a better format (a Pandas dataframe instead of Python lists).
There are a number of similar elements to Curve such as Area and Scatter, which you can try out for yourself in the exercises.
End of explanation
"""
trajectory = hv.Curve((xs,ys), kdims=['distance'], vdims=['height'])
trajectory
"""
Explanation: Annotating the curve
Wrapping your data (xs and ys) here as a HoloViews element is sufficient to make it visualizable, but there are many other aspects of the data that we can capture to convey more about its meaning to HoloViews. For instance, we might want to specify what the x-axis and y-axis actually correspond to, in the real world. Perhaps this parabola is the trajectory of a ball thrown into the air, in which case we could declare the object as:
End of explanation
"""
# Exercise: Take a look at trajectory.vdims
"""
Explanation: Here we have added semantic information about our data to the Curve element. Specifically, we told HoloViews that the kdim or key dimension of our data corresponds to the real-world independent variable ('distance'), and the vdim or value dimension 'height' is the real-world dependent variable. Even though the additional information we provided is about the data, not directly about the plot, HoloViews is designed to reveal the properties of your data accurately, and so the axes now update to show what these dimensions represent.
End of explanation
"""
hv.Scatter(simple_curve)
"""
Explanation: Casting between elements
The type of an element is a declaration of important facts about your data, which gives HoloViews the appropriate hint required to generate a suitable visual representation from it. For instance, calling it a Curve is a declaration from the user that the data consists of samples from an underlying continuous function, which is why HoloViews plots it as a connected object. If we convert to an hv.Scatter object instead, the same set of data will show up as separated points, because "Scatter" does not make an assumption that the data is meant to be continuous:
End of explanation
"""
# How do you predict the representation for hv.Scatter(trajectory) will differ from
# hv.Scatter(simple_curve) above? Try it!
# Also try casting the trajectory to an area then back to a curve.
"""
Explanation: Casting the same data between different Element types in this way is often useful as a way to see your data differently, particularly if you are not certain of a single best way to interpret the data. Casting preserves your declared metadata as much as possible, propagating your declarations from the original object to the new one.
End of explanation
"""
x = np.linspace(0, 10, 500)
y = np.linspace(0, 10, 500)
xx, yy = np.meshgrid(x, y)
arr = np.sin(xx)*np.cos(yy)
image = hv.Image(arr)
"""
Explanation: Turning arrays into elements
The curve above was constructed from a list of x-values and a list of y-values. Next we will create an element using an entirely different datatype, namely a NumPy array:
End of explanation
"""
image
# Exercise: Try visualizing different two-dimensional arrays.
# You can try a new function entirely or simple modifications of the existing one
# E.g., explore the effect of squaring and cubing the sine and cosine terms
# Optional: Try supplying appropriate labels for the x- and y- axes
# Hint: The x,y positions are how you *index* (or key) the array *values* (so x and y are both kdims)
"""
Explanation: As above, we know that this data was sampled from a continuous function, but this time the data is mapping from two key dimensions, so we declare it as an [hv.Image]((http://build.holoviews.org/reference/elements/bokeh/Image.html) object. As you might expect, an Image object is visualized as an image by default:
End of explanation
"""
economic_data = pd.read_csv('../data/macro.csv')
economic_data.tail()
"""
Explanation: Selecting columns from tables to make elements
In addition to basic Python datatypes and xarray and NumPy array types, HoloViews elements can be passed tabular data in the form of pandas DataFrames:
End of explanation
"""
US_data = economic_data[economic_data['country'] == 'United States'] # Select data for the US only
US_data.tail()
growth_curve = hv.Curve(US_data, kdims=['year'], vdims=['growth'])
growth_curve
"""
Explanation: Let's build an element that helps us understand how the percentage growth in US GDP varies over time. As our dataframe contains GDP growth data for lots of countries, let us select the United States from the table and create a Curve element from it:
End of explanation
"""
# Exercise: Plot the unemployment (unem) over year
"""
Explanation: In this case, declaring the kdims and vdims does not simply declare the axis labels, it allows HoloViews to discover which columns of the data should be used from the dataframe for each of the axes.
End of explanation
"""
gdp_growth = growth_curve.redim.label(growth='GDP growth')
gdp_growth
"""
Explanation: Dimension labels
In this example, the simplistic axis labels are starting to get rather limiting. Changing the kdims and vdims is no longer trivial either, as they need to match the column names in the dataframe. Is the only solution to rename the columns in our dataframe to something more descriptive but more awkward to type?
Luckily, no. The recommendation is that you continue to use short, programmer and pandas-friendly, tab-completeable column names as these are also the most convenient dimension names to use with HoloViews.
What you should do instead is set the dimension labels, using the fact that dimensions are full, rich objects behind the scenes:
End of explanation
"""
gdp_growth.vdims
# Exercise: Use redim.label to give the year dimension a better label
"""
Explanation: With the redim method, we have associated a dimension label with the growth dimension, resulting in a new element called gdp_growth (you can check for yourself that growth_curve is unchanged). Let's look at what the new dimension contains:
End of explanation
"""
gdp_growth.redim.unit(growth='%')
# Exercise: Use redim.unit to give the year dimension a better unit
# For instance, relabel to 'Time' then give the unit as 'year'
"""
Explanation: The redim utility lets you easily change other dimension parameters, and as an example let's give our GDP growth dimension the appropriate unit:
End of explanation
"""
layout = trajectory + hv.Scatter(trajectory) + hv.Area(trajectory) + hv.Spikes(trajectory)
layout.cols(2)
"""
Explanation: Composing elements together
Viewing a single element at a time often conveys very little information for the space used. In this section, we introduce the two composition operators + and * to build Layout and Overlay objects.
Layouts
Earlier on we were casting a parabola to different element types. Viewing the different types was awkward, wasting lots of vertical space in the notebook. What we will often want to do is view these elements side by side:
End of explanation
"""
print(layout)
"""
Explanation: What we have created with the + operator is an hv.Layout object (with a hint that a two-column layout is desired):
End of explanation
"""
layout.Curve.I + layout.Spikes.I
"""
Explanation: Now let us build a new layout by selecting elements from layout:
End of explanation
"""
cannonball = trajectory.relabel('Cannonball', group='Trajectory')
integral = hv.Area(trajectory).relabel('Filled', group='Trajectory')
labelled_layout = cannonball + integral
labelled_layout
# Exercise: Try out the tab-completion of labelled_layout to build a new layout swapping the position of these elements
# Optional: Try using two levels of dictionary-style access to grab the cannonball trajectory
"""
Explanation: We see that a Layout lets us pick component elements via two levels of tab-completable attribute access. Note that by default the type of the element defines the first level of access and the second level of access automatically uses Roman numerals (because Python identifiers cannot start with numbers).
These two levels correspond to another type of semantic declaration that applies to the elements directly (rather than their dimensions), called group and label. Specifically, group allows you to declare what kind of thing this object is, while label allows you to label which specific object it is. What you put in those declarations, if anything, will form the title of the plot:
End of explanation
"""
trajectory * hv.Spikes(trajectory)
"""
Explanation: Overlays
Layout places objects side by side, allowing it to collect (almost!) any HoloViews objects that you want to indicate are related. Another operator * allows you to overlay elements into a single plot, if they live in the same space (with matching dimensions and similar ranges over those dimensions). The result of * is an Overlay:
End of explanation
"""
# Exercise: Make an overlay of the Spikes object from layout on top of the filled trajectory area of labelled_layout
"""
Explanation: The indexing system of Overlay is identical to that of Layout.
End of explanation
"""
tennis_ball = cannonball.clone((xs, 0.5*np.array(ys)), label='Tennis Ball')
cannonball + tennis_ball + (cannonball * tennis_ball)
"""
Explanation: One thing that is specific to Overlays is the use of color cycles to automatically differentiate between elements of the same type and group:
End of explanation
"""
# Optional Exercise:
# 1. Create a thrown_ball curve with half the height of tennis_ball by cloning it and assigning the label 'Thrown ball'
# 2. Add thrown_ball to the overlay
"""
Explanation: Here we use the clone method to make a shallower tennis-ball trajectory: the clone method create a new object that preserves semantic metadata while allowing overrides (in this case we override the input data and the label).
As you can see, HoloViews can determine that the two overlaid curves will be distinguished by color, and so it also provides a legend so that the mapping from color to data is clear.
End of explanation
"""
full_trajectory = cannonball.redim.label(distance='Horizontal distance', height='Vertical height')
ascending = full_trajectory[-10:1].relabel('ascending')
descending = cannonball.select(distance=(0,11.)).relabel('descending')
ascending * descending
"""
Explanation: Slicing and selecting
HoloViews elements can be easily sliced using array-style syntax or using the .select method. The following example shows how we can slice the cannonball trajectory into its ascending and descending components:
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/11dfe5b16c319f3332711a4e798a0cef/plot_stats_cluster_time_frequency.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
"""
Explanation: Non-parametric between conditions cluster statistic on single trial power
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
extracting epochs for 2 conditions
compute single trial power estimates
baseline line correct the power estimates (power ratios)
compute stats to see if the power estimates are significantly different
between conditions.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
"""
Explanation: Set parameters
End of explanation
"""
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
"""
Explanation: Factor to downsample the temporal dimension of the TFR computed by
tfr_morlet. Decimation occurs after frequency decomposition and can
be used to reduce memory usage (and possibly comptuational time of downstream
operations such as nonparametric statistics) if you don't need high
spectrotemporal resolution.
End of explanation
"""
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2],
n_permutations=100, threshold=threshold, tail=0)
"""
Explanation: Compute statistic
End of explanation
"""
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
"""
Explanation: View time-frequency plots
End of explanation
"""
|
tpin3694/tpin3694.github.io
|
machine-learning/logistic_regression_with_l1_regularization.ipynb
|
mit
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
"""
Explanation: Title: Logistic Regression With L1 Regularization
Slug: logistic_regression_with_l1_regularization
Summary: Logistic Regression With L1 Regularization using scikit-learn.
Date: 2016-12-01 12:00
Category: Machine Learning
Tags: Logistic Regression
Authors: Chris Albon
L1 regularization (also called least absolute deviations) is a powerful tool in data science. There are many tutorials out there explaining L1 regularization and I will not try to do that here. Instead, this tutorial is show the effect of the regularization parameter C on the coefficients and model accuracy.
Preliminaries
End of explanation
"""
# Load the iris dataset
iris = datasets.load_iris()
# Create X from the features
X = iris.data
# Create y from output
y = iris.target
# Remake the variable, keeping all data where the category is not 2.
X = X[y != 2]
y = y[y != 2]
"""
Explanation: Create The Data
The dataset used in this tutorial is the famous iris dataset. The Iris target data contains 50 samples from three species of Iris, y and four feature variables, X.
The dataset contains three categories (three species of Iris), however for the sake of simplicity it is easier if the target data is binary. Therefore we will remove the data from the last species of Iris.
End of explanation
"""
# View the features
X[0:5]
# View the target data
y
"""
Explanation: View The Data
End of explanation
"""
# Split the data into test and training sets, with 30% of samples being put into the test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
"""
Explanation: Split The Data Into Training And Test Sets
End of explanation
"""
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the training data and transform
X_train_std = sc.fit_transform(X_train)
# Apply the scaler to the test data
X_test_std = sc.transform(X_test)
"""
Explanation: Standardize Features
Because the regularization penalty is comprised of the sum of the absolute value of the coefficients, we need to scale the data so the coefficients are all based on the same scale.
End of explanation
"""
C = [10, 1, .1, .001]
for c in C:
clf = LogisticRegression(penalty='l1', C=c)
clf.fit(X_train, y_train)
print('C:', c)
print('Coefficient of each feature:', clf.coef_)
print('Training accuracy:', clf.score(X_train, y_train))
print('Test accuracy:', clf.score(X_test, y_test))
print('')
"""
Explanation: Run Logistic Regression With A L1 Penalty With Various Regularization Strengths
The usefulness of L1 is that it can push feature coefficients to 0, creating a method for feature selection. In the code below we run a logistic regression with a L1 penalty four times, each time decreasing the value of C. We should expect that as C decreases, more coefficients become 0.
End of explanation
"""
|
linan7788626/tutmom
|
intro.ipynb
|
bsd-3-clause
|
import numpy as np
objective = np.poly1d([1.3, 4.0, 0.6])
print objective
"""
Explanation: Introduction to optimization
The basic components
The objective function (also called the 'cost' function)
End of explanation
"""
import scipy.optimize as opt
x_ = opt.fmin(objective, [3])
print "solved: x={}".format(x_)
%matplotlib inline
x = np.linspace(-4,1,101.)
import matplotlib.pylab as mpl
mpl.plot(x, objective(x))
mpl.plot(x_, objective(x_), 'ro')
"""
Explanation: The "optimizer"
End of explanation
"""
import scipy.special as ss
import scipy.optimize as opt
import numpy as np
import matplotlib.pylab as mpl
x = np.linspace(2, 7, 200)
# 1st order Bessel
j1x = ss.j1(x)
mpl.plot(x, j1x)
# use scipy.optimize's more modern "results object" interface
result = opt.minimize_scalar(ss.j1, method="bounded", bounds=[2, 4])
j1_min = ss.j1(result.x)
mpl.plot(result.x, j1_min,'ro')
"""
Explanation: Additional components
"Box" constraints
End of explanation
"""
import mystic.models as models
print(models.rosen.__doc__)
!mystic_model_plotter.py mystic.models.rosen -f -d -x 1 -b "-3:3:.1, -1:5:.1, 1"
import mystic
mystic.model_plotter(mystic.models.rosen, fill=True, depth=True, scale=1, bounds="-3:3:.1, -1:5:.1, 1")
import scipy.optimize as opt
import numpy as np
# initial guess
x0 = [1.3, 1.6, -0.5, -1.8, 0.8]
result = opt.minimize(opt.rosen, x0)
print result.x
# number of function evaluations
print result.nfev
# again, but this time provide the derivative
result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der)
print result.x
# number of function evaluations and derivative evaluations
print result.nfev, result.njev
print ''
# however, note for a different x0...
for i in range(5):
x0 = np.random.randint(-20,20,5)
result = opt.minimize(opt.rosen, x0, jac=opt.rosen_der)
print "{} @ {} evals".format(result.x, result.nfev)
"""
Explanation: The gradient and/or hessian
End of explanation
"""
# http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html#tutorial-sqlsp
'''
Maximize: f(x) = 2*x0*x1 + 2*x0 - x0**2 - 2*x1**2
Subject to: x0**3 - x1 == 0
x1 >= 1
'''
import numpy as np
def objective(x, sign=1.0):
return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
def derivative(x, sign=1.0):
dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
dfdx1 = sign*(2*x[0] - 4*x[1])
return np.array([ dfdx0, dfdx1 ])
# unconstrained
result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,),
jac=derivative, method='SLSQP', options={'disp': True})
print("unconstrained: {}".format(result.x))
cons = ({'type': 'eq',
'fun' : lambda x: np.array([x[0]**3 - x[1]]),
'jac' : lambda x: np.array([3.0*(x[0]**2.0), -1.0])},
{'type': 'ineq',
'fun' : lambda x: np.array([x[1] - 1]),
'jac' : lambda x: np.array([0.0, 1.0])})
# constrained
result = opt.minimize(objective, [-1.0,1.0], args=(-1.0,), jac=derivative,
constraints=cons, method='SLSQP', options={'disp': True})
print("constrained: {}".format(result.x))
"""
Explanation: The penalty functions
$\psi(x) = f(x) + k*p(x)$
End of explanation
"""
# from scipy.optimize.minimize documentation
'''
**Unconstrained minimization**
Method *Nelder-Mead* uses the Simplex algorithm [1]_, [2]_. This
algorithm has been successful in many applications but other algorithms
using the first and/or second derivatives information might be preferred
for their better performances and robustness in general.
Method *Powell* is a modification of Powell's method [3]_, [4]_ which
is a conjugate direction method. It performs sequential one-dimensional
minimizations along each vector of the directions set (`direc` field in
`options` and `info`), which is updated at each iteration of the main
minimization loop. The function need not be differentiable, and no
derivatives are taken.
Method *CG* uses a nonlinear conjugate gradient algorithm by Polak and
Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp.
120-122. Only the first derivatives are used.
Method *BFGS* uses the quasi-Newton method of Broyden, Fletcher,
Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives
only. BFGS has proven good performance even for non-smooth
optimizations. This method also returns an approximation of the Hessian
inverse, stored as `hess_inv` in the OptimizeResult object.
Method *Newton-CG* uses a Newton-CG algorithm [5]_ pp. 168 (also known
as the truncated Newton method). It uses a CG method to the compute the
search direction. See also *TNC* method for a box-constrained
minimization with a similar algorithm.
Method *Anneal* uses simulated annealing, which is a probabilistic
metaheuristic algorithm for global optimization. It uses no derivative
information from the function being optimized.
Method *dogleg* uses the dog-leg trust-region algorithm [5]_
for unconstrained minimization. This algorithm requires the gradient
and Hessian; furthermore the Hessian is required to be positive definite.
Method *trust-ncg* uses the Newton conjugate gradient trust-region
algorithm [5]_ for unconstrained minimization. This algorithm requires
the gradient and either the Hessian or a function that computes the
product of the Hessian with a given vector.
**Constrained minimization**
Method *L-BFGS-B* uses the L-BFGS-B algorithm [6]_, [7]_ for bound
constrained minimization.
Method *TNC* uses a truncated Newton algorithm [5]_, [8]_ to minimize a
function with variables subject to bounds. This algorithm uses
gradient information; it is also called Newton Conjugate-Gradient. It
differs from the *Newton-CG* method described above as it wraps a C
implementation and allows each variable to be given upper and lower
bounds.
Method *COBYLA* uses the Constrained Optimization BY Linear
Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is
based on linear approximations to the objective function and each
constraint. The method wraps a FORTRAN implementation of the algorithm.
Method *SLSQP* uses Sequential Least SQuares Programming to minimize a
function of several variables with any combination of bounds, equality
and inequality constraints. The method wraps the SLSQP Optimization
subroutine originally implemented by Dieter Kraft [12]_. Note that the
wrapper handles infinite values in bounds by converting them into large
floating values.
'''
import scipy.optimize as opt
# constrained: linear (i.e. A*x + b)
print opt.cobyla.fmin_cobyla
print opt.linprog
# constrained: quadratic programming (i.e. up to x**2)
print opt.fmin_slsqp
# http://cvxopt.org/examples/tutorial/lp.html
'''
minimize: f = 2*x0 + x1
subject to:
-x0 + x1 <= 1
x0 + x1 >= 2
x1 >= 0
x0 - 2*x1 <= 4
'''
import cvxopt as cvx
from cvxopt import solvers as cvx_solvers
A = cvx.matrix([ [-1.0, -1.0, 0.0, 1.0], [1.0, -1.0, -1.0, -2.0] ])
b = cvx.matrix([ 1.0, -2.0, 0.0, 4.0 ])
cost = cvx.matrix([ 2.0, 1.0 ])
sol = cvx_solvers.lp(cost, A, b)
print(sol['x'])
# http://cvxopt.org/examples/tutorial/qp.html
'''
minimize: f = 2*x1**2 + x2**2 + x1*x2 + x1 + x2
subject to:
x1 >= 0
x2 >= 0
x1 + x2 == 1
'''
import cvxopt as cvx
from cvxopt import solvers as cvx_solvers
Q = 2*cvx.matrix([ [2, .5], [.5, 1] ])
p = cvx.matrix([1.0, 1.0])
G = cvx.matrix([[-1.0,0.0],[0.0,-1.0]])
h = cvx.matrix([0.0,0.0])
A = cvx.matrix([1.0, 1.0], (1,2))
b = cvx.matrix(1.0)
sol = cvx_solvers.qp(Q, p, G, h, A, b)
print(sol['x'])
"""
Explanation: Optimizer classifications
Constrained versus unconstrained (and importantly LP and QP)
End of explanation
"""
import scipy.optimize as opt
# probabilstic solvers, that use random hopping/mutations
print opt.differential_evolution
print opt.basinhopping
print opt.anneal
import scipy.optimize as opt
# bounds instead of an initial guess
bounds = [(-10., 10)]*5
for i in range(10):
result = opt.differential_evolution(opt.rosen, bounds)
print result.x,
# number of function evaluations
print '@ {} evals'.format(result.nfev)
"""
Explanation: Notice how much nicer it is to see the optimizer "trajectory". Now, instead of a single number, we have the path the optimizer took. scipy.optimize has a version of this, with options={'retall':True}, which returns the solver trajectory.
EXERCISE: Solve the constrained programming problem by any of the means above.
Minimize: f = -1x[0] + 4x[1]
Subject to: -3x[0] + 1x[1] <= 6
1x[0] + 2x[1] <= 4
x[1] >= -3
where: -inf <= x[0] <= inf
Local versus global
End of explanation
"""
import scipy.optimize as opt
import scipy.stats as stats
import numpy as np
# Define the function to fit.
def function(x, a, b, f, phi):
result = a * np.exp(-b * np.sin(f * x + phi))
return result
# Create a noisy data set around the actual parameters
true_params = [3, 2, 1, np.pi/4]
print "target parameters: {}".format(true_params)
x = np.linspace(0, 2*np.pi, 25)
exact = function(x, *true_params)
noisy = exact + 0.3*stats.norm.rvs(size=len(x))
# Use curve_fit to estimate the function parameters from the noisy data.
initial_guess = [1,1,1,1]
estimated_params, err_est = opt.curve_fit(function, x, noisy, p0=initial_guess)
print "solved parameters: {}".format(estimated_params)
# err_est is an estimate of the covariance matrix of the estimates
print "covarance: {}".format(err_est.diagonal())
import matplotlib.pylab as mpl
mpl.plot(x, noisy, 'ro')
mpl.plot(x, function(x, *estimated_params))
"""
Explanation: Gradient descent and steepest descent
Genetic and stochastic
Not covered: other exotic types
Other important special cases:
Least-squares fitting
End of explanation
"""
import numpy as np
import scipy.optimize as opt
def system(x,a,b,c):
x0, x1, x2 = x
eqs= [
3 * x0 - np.cos(x1*x2) + a, # == 0
x0**2 - 81*(x1+0.1)**2 + np.sin(x2) + b, # == 0
np.exp(-x0*x1) + 20*x2 + c # == 0
]
return eqs
# coefficients
a = -0.5
b = 1.06
c = (10 * np.pi - 3.0) / 3
# initial guess
x0 = [0.1, 0.1, -0.1]
# Solve the system of non-linear equations.
result = opt.root(system, x0, args=(a, b, c))
print "root:", result.x
print "solution:", result.fun
"""
Explanation: Not Covered: integer programming
Typical uses
Function minimization
Data fitting
Root finding
End of explanation
"""
import numpy as np
import scipy.stats as stats
# Create clean data.
x = np.linspace(0, 4.0, 100)
y = 1.5 * np.exp(-0.2 * x) + 0.3
# Add a bit of noise.
noise = 0.1 * stats.norm.rvs(size=100)
noisy_y = y + noise
# Fit noisy data with a linear model.
linear_coef = np.polyfit(x, noisy_y, 1)
linear_poly = np.poly1d(linear_coef)
linear_y = linear_poly(x)
# Fit noisy data with a quadratic model.
quad_coef = np.polyfit(x, noisy_y, 2)
quad_poly = np.poly1d(quad_coef)
quad_y = quad_poly(x)
import matplotlib.pylab as mpl
mpl.plot(x, noisy_y, 'ro')
mpl.plot(x, linear_y)
mpl.plot(x, quad_y)
#mpl.plot(x, y)
"""
Explanation: Parameter estimation
End of explanation
"""
import mystic.models as models
print models.zimmermann.__doc__
"""
Explanation: Standard diagnostic tools
Eyeball the plotted solution against the objective
Run several times and take the best result
Log of intermediate results, per iteration
Rare: look at the covariance matrix
Issue: how can you really be sure you have the results you were looking for?
EXERCISE: Use any of the solvers we've seen thus far to find the minimum of the zimmermann function (i.e. use mystic.models.zimmermann as the objective). Use the bounds suggested below, if your choice of solver allows it.
End of explanation
"""
|
regardscitoyens/consultation_an
|
exploitation/analyse_quanti_theme1.ipynb
|
agpl-3.0
|
def loadContributions(file, withsexe=False):
contributions = pd.read_json(path_or_buf=file, orient="columns")
rows = [];
rindex = [];
for i in range(0, contributions.shape[0]):
row = {};
row['id'] = contributions['id'][i]
rindex.append(contributions['id'][i])
if (withsexe):
if (contributions['sexe'][i] == 'Homme'):
row['sexe'] = 0
else:
row['sexe'] = 1
for question in contributions['questions'][i]:
if (question.get('Reponse')): # and (question['texte'][0:5] != 'Savez') :
row[question['titreQuestion']+' : '+question['texte']] = 1
for criteres in question.get('Reponse'):
# print(criteres['critere'].keys())
row[question['titreQuestion']+'. (Réponse) '+question['texte']+' -> '+str(criteres['critere'].get('texte'))] = 1
rows.append(row)
df = pd.DataFrame(data=rows)
df.fillna(0, inplace=True)
return df
df = loadContributions('../data/EGALITE1.brut.json', True)
df.fillna(0, inplace=True)
df.index = df['id']
df.head()
"""
Explanation: Reading the data
End of explanation
"""
from sklearn.cluster import KMeans
from sklearn import metrics
import numpy as np
X = df.drop('id', axis=1).values
def train_kmeans(nb_clusters, X):
kmeans = KMeans(n_clusters=nb_clusters, random_state=0).fit(X)
return kmeans
#print(kmeans.predict(X))
#kmeans.cluster_centers_
def select_nb_clusters():
perfs = {};
for nbclust in range(2,10):
kmeans_model = train_kmeans(nbclust, X);
labels = kmeans_model.labels_
# from http://scikit-learn.org/stable/modules/clustering.html#calinski-harabaz-index
# we are in an unsupervised model. cannot get better!
# perfs[nbclust] = metrics.calinski_harabaz_score(X, labels);
perfs[nbclust] = metrics.silhouette_score(X, labels);
print(perfs);
return perfs;
df['clusterindex'] = train_kmeans(4, X).predict(X)
#df
perfs = select_nb_clusters();
# result :
# {2: 341.07570462155348, 3: 227.39963334619881, 4: 186.90438345452918, 5: 151.03979976346525, 6: 129.11214073405731, 7: 112.37235520885432, 8: 102.35994869157568, 9: 93.848315820675438}
optimal_nb_clusters = max(perfs, key=perfs.get);
print("optimal_nb_clusters" , optimal_nb_clusters);
"""
Explanation: Build clustering model
Here we build a kmeans model , and select the "optimal" of clusters.
Here we see that the optimal number of clusters is 2.
End of explanation
"""
km_model = train_kmeans(optimal_nb_clusters, X);
df['clusterindex'] = km_model.predict(X)
lGroupBy = df.groupby(['clusterindex']).mean();
# km_model.__dict__
cluster_profile_counts = df.groupby(['clusterindex']).count();
cluster_profile_means = df.groupby(['clusterindex']).mean();
global_counts = df.count()
global_means = df.mean()
cluster_profile_counts.head()
#cluster_profile_means.head()
#df.info()
df_profiles = pd.DataFrame();
nbclusters = cluster_profile_means.shape[0]
df_profiles['clusterindex'] = range(nbclusters)
for col in cluster_profile_means.columns:
if(col != "clusterindex"):
df_profiles[col] = np.zeros(nbclusters)
for cluster in range(nbclusters):
df_profiles[col][cluster] = cluster_profile_means[col][cluster]
# row.append(df[col].mean());
df_profiles.head()
#print(df_profiles.columns)
intereseting_columns = {};
for col in df_profiles.columns:
if(col != "clusterindex"):
global_mean = df[col].mean()
diff_means_global = abs(df_profiles[col] - global_mean). max();
# print(col , diff_means_global)
if(diff_means_global > 0.1):
intereseting_columns[col] = True
#print(intereseting_columns)
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Build the optimal model and apply it
End of explanation
"""
interesting = list(intereseting_columns.keys())
df_profiles_sorted = df_profiles[interesting].sort_index(axis=1)
df_profiles_sorted.plot.bar(figsize =(1, 1))
df_profiles_sorted.plot.bar(figsize =(16, 8), legend=False)
df_profiles_sorted.T
#df_profiles.sort_index(axis=1).T
"""
Explanation: Cluster Profiles
Here, the optimal model ihas two clusters , cluster 0 with 399 cases, and 1 with 537 cases.
As this model is based on binary inputs. Given this, the best description of the clusters is by the distribution of zeros and ones of each input (question).
The figure below gives the cluster profiles of this model. Cluster 0 on the left. 1 on the right. The questions invloved as different (highest bars)
End of explanation
"""
|
leriomaggio/code-coherence-analysis
|
Benchmark Data.ipynb
|
bsd-3-clause
|
%load preamble_directives.py
"""
Explanation: Benchmark Creation
Notebook to create the report file to export Benchmark data (to be released)
Note: : this notebook assumes the use of Python 3
Preamble: Settings Django Environment
End of explanation
"""
from source_code_analysis.models import SoftwareProject
projects = SoftwareProject.objects.all()
"""
Explanation: Benchmark Report (per Project)
End of explanation
"""
# Write Coherence Report
def write_coherence_report(coherence_report_filepath, target_methods):
with open(coherence_report_filepath, 'w') as coherence_report:
for method in target_methods:
evaluation = method.agreement_evaluations.all()[0]
coherence_value = 'COHERENT' if evaluation.agreement_vote in [3, 4] else 'NOT_COHERENT'
coherence_report.write('{0}, {1}\n'.format(method.pk, coherence_value))
"""
Explanation: The Replication Dataset contains the following report files:
(Benchmark_Coherence_Data.txt, Benchmark_Raw_Data.txt):
Report files containing information about the Coherence and the raw data of methods from all the 4 considered
Software Systems.
(CoffeeMaker_Coherence_Data.txt, CoffeeMaker_Raw_Data.txt):
Report files providing the Coherence and the raw data of methods gathered from the CoffeeMaker Software System.
(JFreeChart060_Coherence_Data.txt, JFreeChart060_Raw_Data.txt):
Report files providing the Coherence and the raw data of methods gathered from the JFreeChart 0.6.0 Software System.
(JFreeChart071_Coherence_Data.txt, JFreeChart071_Raw_Data.txt):
Report files providing the Coherence and the raw data of methods gathered from the JFreeChart 0.7.1 Software System.
(JHotDraw741_Coherence_Data.txt, JHotDraw741_Raw_Data.txt):
Report files providing the Coherence and the raw data of methods gathered from the JHotDraw 7.4.1 Software System.
Coherence Data Report Structure
Report files providing information about the Coherence of methods are structured according to the CSV
(i.e., Comma Separated Values) format.
Each line of the file contains the following information:
method_id, coherence
method_id: the unique identifier of the corresponding method
coherence : the coherence value associated to the comment and the implementation of the referred method.
Allowed Coherence Values are: NOT_COHERENT and COHERENT.
In case, it would be more than straightforward to translate these values into 0, 1 values, respectively.
End of explanation
"""
# Write Raw Data Report
def write_raw_data_report(raw_report_filepath, target_methods):
with open(raw_report_filepath, 'w') as raw_report:
for method in target_methods:
software_system_name = method.project.name + method.project.version.replace('.', '')
raw_report.write('{mid}, {method_name}, {class_name}, {software_system}\n'.format(
mid=method.id, method_name=method.method_name, class_name=method.code_class.class_name,
software_system=software_system_name))
method_fp = method.file_path
relative_filepath = method_fp[method_fp.find('extracted')+len('extracted')+1:]
raw_report.write('{filepath}, {start_line}, {end_line}\n'.format(filepath=relative_filepath,
start_line=method.start_line,
end_line=method.end_line))
raw_report.write('{comment_len}\n'.format(comment_len=len(method.comment.splitlines())))
raw_report.write('{comment}'.format(comment=method.comment))
if not method.comment.endswith('\n'):
raw_report.write('\n')
raw_report.write('{code_len}\n'.format(code_len=len(method.code_fragment.splitlines())))
raw_report.write('{code}'.format(code=method.code_fragment))
if not method.code_fragment.endswith('\n'):
raw_report.write('\n')
# Last Line of this method
raw_report.write('###\n')
RAW_DATA_SUFFIX = 'Raw_Data.txt'
COHERENCE_DATA_SUFFIX = 'Coherence_Data.txt'
import os
# Create Report Folder
report_folderpath = os.path.join(os.path.abspath(os.path.curdir), 'report_files')
if not os.path.exists(report_folderpath):
os.makedirs(report_folderpath)
all_methods_list = list()
# Project-Specific Reports
for project in projects:
software_system_name = project.name + project.version.replace('.', '')
target_methods = list()
project_methods = project.code_methods.order_by('pk')
# Collect Project Methods whose evaluations are Coherent|Not Coherent
for method in project_methods:
evaluation = method.agreement_evaluations.all()[0]
if not evaluation.wrong_association and evaluation.agreement_vote != 2:
target_methods.append(method)
all_methods_list.extend(target_methods)
# Coherence Data Report
coherence_report_filename = '{0}_{1}'.format(software_system_name, COHERENCE_DATA_SUFFIX)
coherence_report_filepath = os.path.join(report_folderpath, coherence_report_filename)
write_coherence_report(coherence_report_filepath, target_methods)
# Raw Data Report
raw_report_filename = '{0}_{1}'.format(software_system_name, RAW_DATA_SUFFIX)
raw_report_filepath = os.path.join(report_folderpath, raw_report_filename)
write_raw_data_report(raw_report_filepath, target_methods)
# -- Entire Benchmark Reports
# Coherence Data Report
coherence_report_filename = '{0}_{1}'.format('Benchmark', COHERENCE_DATA_SUFFIX)
coherence_report_filepath = os.path.join(report_folderpath, coherence_report_filename)
write_coherence_report(coherence_report_filepath, all_methods_list)
# Raw Data Report
raw_report_filename = '{0}_{1}'.format('Benchmark', RAW_DATA_SUFFIX)
raw_report_filepath = os.path.join(report_folderpath, raw_report_filename)
write_raw_data_report(raw_report_filepath, all_methods_list)
"""
Explanation: Raw Data Report Structure
All the report files containing the raw data of the methods share exactly the same multiline structure.
That is (for each method):
method_id, method_name, class_name, software_system
filepath, start_line, end_line,
Length of the Head Comments
Head Comment
Length of the Implementation
Method Implementation
###
End of explanation
"""
|
QuantStack/quantstack-talks
|
2019-07-10-CICM/src/notebooks/DrawControl.ipynb
|
bsd-3-clause
|
dc = DrawControl(marker={'shapeOptions': {'color': '#0000FF'}},
rectangle={'shapeOptions': {'color': '#0000FF'}},
circle={'shapeOptions': {'color': '#0000FF'}},
circlemarker={},
)
def handle_draw(self, action, geo_json):
print(action)
print(geo_json)
dc.on_draw(handle_draw)
m.add_control(dc)
"""
Explanation: Now create the DrawControl and add it to the Map using add_control. We also register a handler for draw events. This will fire when a drawn path is created, edited or deleted (there are the actions). The geo_json argument is the serialized geometry of the drawn path, along with its embedded style.
End of explanation
"""
dc.last_action
dc.last_draw
"""
Explanation: In addition, the DrawControl also has last_action and last_draw attributes that are created dynamicaly anytime a new drawn path arrives.
End of explanation
"""
dc.clear_circles()
dc.clear_polylines()
dc.clear_rectangles()
dc.clear_markers()
dc.clear_polygons()
dc.clear()
"""
Explanation: It's possible to remove all drawings from the map
End of explanation
"""
m2 = Map(center=center, zoom=zoom, layout=dict(width='600px', height='400px'))
m2
"""
Explanation: Let's draw a second map and try to import this GeoJSON data into it.
End of explanation
"""
map_center_link = link((m, 'center'), (m2, 'center'))
map_zoom_link = link((m, 'zoom'), (m2, 'zoom'))
new_poly = GeoJSON(data=dc.last_draw)
m2.add_layer(new_poly)
"""
Explanation: We can use link to synchronize traitlets of the two maps:
End of explanation
"""
dc2 = DrawControl(polygon={'shapeOptions': {'color': '#0000FF'}}, polyline={},
circle={'shapeOptions': {'color': '#0000FF'}})
m2.add_control(dc2)
"""
Explanation: Note that the style is preserved! If you wanted to change the style, you could edit the properties.style dictionary of the GeoJSON data. Or, you could even style the original path in the DrawControl by setting the polygon dictionary of that object. See the code for details.
Now let's add a DrawControl to this second map. For fun we will disable lines and enable circles as well and change the style a bit.
End of explanation
"""
|
450586509/DLNLP
|
src/notebooks/CNN/CNN_random_word.ipynb
|
apache-2.0
|
import keras
from os.path import join
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout,Activation, Lambda,Input
from keras.layers import Embedding
from keras.layers import Convolution1D
from keras.datasets import imdb
from keras import backend as K
from keras.layers import Convolution1D, GlobalMaxPooling1D,Convolution2D,merge
from keras.utils import np_utils
from keras.models import Model
from keras.regularizers import l2
"""
Explanation: 模型介绍
基于卷积神经网络的情感分析
输入为随机初始化词向量
CNN的结构:filter_length = 3,4,5的卷积核各100 个。
SST-5数据集
End of explanation
"""
file_names = ['stsa.fine.test','stsa.fine.train','stsa.fine.dev']
file_path = '/home/bruce/data/sentiment/'
def read_file(fname=''):
with open(join(file_path,fname)) as fr:
lines = fr.readlines()
lines = [line.strip().lower() for line in lines]
lables = [int(line[0:1]) for line in lines]
words = [line[2:].split() for line in lines]
return words,lables
train_X,train_y = read_file(fname='stsa.fine.train')
test_X,test_y = read_file(fname='stsa.fine.test')
dev_X,dev_y = read_file(fname='stsa.fine.dev')
print(len(train_X))
print(len(test_X))
print(len(dev_X))
print(train_X[0:2])
print(train_y[0:2])
"""
Explanation: 数据预处理
End of explanation
"""
def statics_list2(arrays=[]):
lengths = [len(i) for i in arrays]
lengths = sorted(lengths)
length = len(lengths)
print('length = ',len(lengths))
print('max = ',lengths[-1])
print('min =',lengths[0])
print('average = ',sum(lengths)/length)
print('top 50% = ',lengths[int(0.5*length)])
print('top 80% = ',lengths[int(0.8*length)])
print('top 90% = ',lengths[int(0.9*length)])
print('top 95% = ',lengths[int(0.95*length)])
statics_list2(arrays=train_X)
def token_to_index(datas=[]):
word_index={}
count=1
for data in datas:
for list_ in data:
for w in list_:
if w not in word_index:
word_index[w] = count
count = count + 1
print('leng of word_index =',len(word_index))
for i in range(len(datas)):
datas[i] = [[ word_index[w] for w in line ] for line in datas[i]]
return datas,word_index
X,word_index = token_to_index(datas=[train_X,dev_X])
train_X,dev_X = X
print(len(word_index))
"""
Explanation: 句子长度统计信息
End of explanation
"""
max_len = 52
batch_size=32
max_features = 17612
embedding_dim = 100
nb_filter = 100
dense1_hindden = 300
nb_classes = 5
print('Build model...')
input_random = Input(shape=(max_len,), dtype='int32', name='main_input1')
embedding = Embedding(output_dim=embedding_dim, input_dim=max_features)(input_random)
# 卷积层
conv1 = Convolution1D(nb_filter = nb_filter,
filter_length = 2,
border_mode = 'valid',
activation='relu'
)(embedding)
conv2 = Convolution1D(nb_filter = nb_filter,
filter_length = 3,
border_mode = 'valid',
activation='relu'
)(embedding)
conv3 = Convolution1D(nb_filter = nb_filter,
filter_length = 4,
border_mode = 'valid',
activation='relu'
)(embedding)
conv1 =GlobalMaxPooling1D()(conv1)
conv2 =GlobalMaxPooling1D()(conv2)
conv3 =GlobalMaxPooling1D()(conv3)
merged_vector = merge([conv1,conv2,conv3], mode='concat')
# 全连接层
#dense_layer = Dense(dense1_hindden)
#dens1 = dense_layer(merged_vector)
print('dense_layer input_shape should == (300,)')
#print(dense_layer.input_shape)
#dens1 = Activation('relu')(dens1)
# softmax层
dens2 = Dense(nb_classes)(merged_vector)
output_random = Activation('softmax')(dens2)
model = Model(input=input_random,output=output_random)
print('finish build model')
model.compile(optimizer='adadelta',
loss='categorical_crossentropy',
metrics=['accuracy'])
"""
Explanation: 构建模型
End of explanation
"""
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
"""
Explanation: 模型图
End of explanation
"""
print(type(train_y[0]))
train_y_model = np_utils.to_categorical(train_y, nb_classes)
dev_y_model = np_utils.to_categorical(dev_y, nb_classes)
train_X_model = sequence.pad_sequences(train_X, maxlen=max_len)
dev_X_model = sequence.pad_sequences(dev_X, maxlen=max_len)
#test 数据
test_index_X= [[word_index[w] if w in word_index else 0 for w in line] for line in test_X]
test_X_model = sequence.pad_sequences(test_index_X,maxlen=max_len)
test_y_model = np_utils.to_categorical(test_y,nb_classes)
print(test_y_model[0:10])
def my_generator(X=None,y=None):
i = 0
max_i = int(len(X)/batch_size)
while True:
i = i % max_i
x_batch = X[i*batch_size:(i+1)*batch_size]
y_batch = y[i*batch_size:(i+1)*batch_size]
yield (x_batch,y_batch)
i = i + 1
model.fit_generator(my_generator(train_X_model,train_y_model),samples_per_epoch = 32*100,nb_epoch=100,verbose=1,validation_data=(test_X_model,test_y_model))
"""
Explanation: 模型输入
End of explanation
"""
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
nb_classes = 10
# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # returns a sequence of vectors of dimension 32
model.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
model.add(LSTM(32)) # return a single vector of dimension 32
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
"""
Explanation: 试验记录
2016年11月12日 10:16 最佳 0.4015
参数
max_len = 36 batch_size=32
max_features = 14714 embedding_dims = 100
nb_filter = 150 filter_length = 2 dense1_hindden = 100 nb_classes = 5
2016年11月12日 10:22 最佳成绩 0.4069
参数
max_len = 36 batch_size=32
max_features = 14714 embedding_dims = 50
nb_filter = 150 filter_length = 2 dense1_hindden = 100 nb_classes = 5
2016年11月12日 10:22 最佳成绩 0.4151
参数
max_len = 36 batch_size=32
max_features = 14714 embedding_dims = 150
nb_filter = 150 filter_length = 2 dense1_hindden = 100 nb_classes = 5
2016年11月12日 10:22 最佳成绩 0.4242 [ 0.4214, 0.4033,0.4024 ,0.4151,0.4242]
参数
max_len = 36 batch_size=32
max_features = 14714 embedding_dims = 200
nb_filter = 150 filter_length = 2 dense1_hindden = 100 nb_classes = 5
End of explanation
"""
|
sandeep-n/incubator-systemml
|
projects/breast_cancer/Preprocessing.ipynb
|
apache-2.0
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
from breastcancer.preprocessing import preprocess, save, train_val_split
# Ship a fresh copy of the `breastcancer` package to the Spark workers.
# Note: The zip must include the `breastcancer` directory itself,
# as well as all files within it for `addPyFile` to work correctly.
# This is equivalent to `zip -r breastcancer.zip breastcancer`.
dirname = "breastcancer"
zipname = dirname + ".zip"
shutil.make_archive(dirname, 'zip', dirname + "/..", dirname)
spark.sparkContext.addPyFile(zipname)
plt.rcParams['figure.figsize'] = (10, 6)
"""
Explanation: Predicting Breast Cancer Proliferation Scores with Apache Spark and Apache SystemML
Preprocessing
Setup
End of explanation
"""
# TODO: Filtering tiles and then cutting into samples could result
# in samples with less tissue than desired, despite that being the
# procedure of the paper. Look into simply selecting tiles of the
# desired size to begin with.
# Get list of image numbers, minus the broken ones.
broken = {2, 45, 91, 112, 242, 256, 280, 313, 329, 467}
slide_nums = sorted(set(range(1,501)) - broken)
# Settings
training = True
tile_size = 256
sample_size = 256
grayscale = False
num_partitions = 20000
add_row_indices = True
train_frac = 0.8
split_seed = 24
folder = "/home/MDM/breast_cancer/data"
save_folder = "data" # Hadoop-supported directory in which to save DataFrames
df_path = os.path.join(save_folder, "samples_{}_{}{}.parquet".format(
"labels" if training else "testing", sample_size, "_grayscale" if grayscale else ""))
train_df_path = os.path.join(save_folder, "train_{}{}.parquet".format(sample_size,
"_grayscale" if grayscale else ""))
val_df_path = os.path.join(save_folder, "val_{}{}.parquet".format(sample_size,
"_grayscale" if grayscale else ""))
# Process all slides.
df = preprocess(spark, slide_nums, tile_size=tile_size, sample_size=sample_size,
grayscale=grayscale, training=training, num_partitions=num_partitions,
folder=folder)
# Save DataFrame of samples.
save(df, df_path, sample_size, grayscale)
# Load full DataFrame from disk.
df = spark.read.load(df_path)
# Split into train and validation DataFrames based On slide number
train, val = train_val_split(spark, df, slide_nums, folder, train_frac, add_row_indices,
seed=split_seed)
# Save train and validation DataFrames.
save(train, train_df_path, sample_size, grayscale)
save(val, val_df_path, sample_size, grayscale)
"""
Explanation: Execute Preprocessing & Save
End of explanation
"""
# Load train and validation DataFrames from disk.
train = spark.read.load(train_df_path)
val = spark.read.load(val_df_path)
# Take a stratified sample.
p=0.01
train_sample = train.drop("__INDEX").sampleBy("tumor_score", fractions={1: p, 2: p, 3: p}, seed=42)
val_sample = val.drop("__INDEX").sampleBy("tumor_score", fractions={1: p, 2: p, 3: p}, seed=42)
train_sample, val_sample
# Reassign row indices.
# TODO: Wrap this in a function with appropriate default arguments.
train_sample = (
train_sample.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0]))
.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))
train_sample = train_sample.select(train_sample["__INDEX"].astype("int"),
train_sample.slide_num.astype("int"),
train_sample.tumor_score.astype("int"),
train_sample.molecular_score,
train_sample["sample"])
val_sample = (
val_sample.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0]))
.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))
val_sample = val_sample.select(val_sample["__INDEX"].astype("int"),
val_sample.slide_num.astype("int"),
val_sample.tumor_score.astype("int"),
val_sample.molecular_score,
val_sample["sample"])
train_sample, val_sample
# Save train and validation DataFrames.
tr_sample_filename = "train_{}_sample_{}{}.parquet".format(p, sample_size, "_grayscale" if grayscale else "")
val_sample_filename = "val_{}_sample_{}{}.parquet".format(p, sample_size, "_grayscale" if grayscale else "")
train_sample_path = os.path.join("save_folder", tr_sample_filename)
val_sample_path = os.path.join("save_folder", val_sample_filename)
save(train_sample, train_sample_path, sample_size, grayscale)
save(val_sample, val_sample_path, sample_size, grayscale)
"""
Explanation: Sample Data
TODO: Wrap this in a function with appropriate default arguments
End of explanation
"""
|
mdiaz236/DeepLearningFoundations
|
tensorboard/.ipynb_checkpoints/Anna KaRNNa-checkpoint.ipynb
|
mit
|
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
"""
Explanation: First we'll load the text file and convert it into integers for our network to use.
End of explanation
"""
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
"""
Explanation: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the split_frac keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
End of explanation
"""
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = tf.identity(state, name='final_state')
# Reshape output so it's a bunch of rows, one row for each cell output
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
preds = tf.nn.softmax(logits, name='predictions')
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
"""
Explanation: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
End of explanation
"""
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
"""
Explanation: Hyperparameters
Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are lstm_size and num_layers. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
End of explanation
"""
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/1', sess.graph)
"""
Explanation: Write out the graph for TensorBoard
End of explanation
"""
!mkdir -p checkpoints/anna
epochs = 1
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
"""
Explanation: Training
Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I calculate the validation loss and save a checkpoint.
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
|
synthicity/activitysim
|
activitysim/examples/example_estimation/notebooks/03_work_location.ipynb
|
agpl-3.0
|
import larch # !conda install larch #for estimation
import pandas as pd
import numpy as np
import yaml
import larch.util.excel
import os
"""
Explanation: Estimating Workplace Location Choice
This notebook illustrates how to re-estimate a single model component for ActivitySim. This process
includes running ActivitySim in estimation mode to read household travel survey files and write out
the estimation data bundles used in this notebook. To review how to do so, please visit the other
notebooks in this directory.
Load libraries
End of explanation
"""
os.chdir('test')
"""
Explanation: We'll work in our test directory, where ActivitySim has saved the estimation data bundles.
End of explanation
"""
modelname="workplace_location"
from activitysim.estimation.larch import component_model
model, data = component_model(modelname, return_data=True)
"""
Explanation: Load data and prep model for estimation
End of explanation
"""
data.coefficients
"""
Explanation: Review data loaded from EDB
Next we can review what was read the EDB, including the coefficients, model settings, utilities specification, and chooser and alternative data.
coefficients
End of explanation
"""
data.alt_values
"""
Explanation: alt_values
End of explanation
"""
data.chooser_data
"""
Explanation: chooser_data
End of explanation
"""
data.landuse
"""
Explanation: landuse
End of explanation
"""
data.spec
"""
Explanation: spec
End of explanation
"""
data.size_spec
"""
Explanation: size_spec
End of explanation
"""
model.estimate(method='BHHH', options={'maxiter':1000})
"""
Explanation: Estimate
With the model setup for estimation, the next step is to estimate the model coefficients. Make sure to use a sufficiently large enough household sample and set of zones to avoid an over-specified model, which does not have a numerically stable likelihood maximizing solution. Larch has a built-in estimation methods including BHHH, and also offers access to more advanced general purpose non-linear optimizers in the scipy package, including SLSQP, which allows for bounds and constraints on parameters. BHHH is the default and typically runs faster, but does not follow constraints on parameters.
End of explanation
"""
model.parameter_summary()
"""
Explanation: Estimated coefficients
End of explanation
"""
from activitysim.estimation.larch import update_coefficients, update_size_spec
result_dir = data.edb_directory/"estimated"
"""
Explanation: Output Estimation Results
End of explanation
"""
update_coefficients(
model, data, result_dir,
output_file=f"{modelname}_coefficients_revised.csv",
);
"""
Explanation: Write updated utility coefficients
End of explanation
"""
update_size_spec(
model, data, result_dir,
output_file=f"{modelname}_size_terms.csv",
)
"""
Explanation: Write updated size coefficients
End of explanation
"""
model.to_xlsx(
result_dir/f"{modelname}_model_estimation.xlsx",
data_statistics=False,
);
"""
Explanation: Write the model estimation report, including coefficient t-statistic and log likelihood
End of explanation
"""
pd.read_csv(result_dir/f"{modelname}_coefficients_revised.csv")
pd.read_csv(result_dir/f"{modelname}_size_terms.csv")
"""
Explanation: Next Steps
The final step is to either manually or automatically copy the *_coefficients_revised.csv file and *_size_terms.csv file to the configs folder, rename them to *_coefficients.csv and destination_choice_size_terms.csv, and run ActivitySim in simulation mode. Note that all the location
and desintation choice models share the same destination_choice_size_terms.csv input file, so if you
are updating all these models, you'll need to ensure that updated sections of this file for each model
are joined together correctly.
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.23/_downloads/6965b7b1a563cc32b2b5388d95203d43/60_cluster_rmANOVA_spatiotemporal.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
"""
Explanation: Repeated measures ANOVA on source data with spatio-temporal clustering
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
"""
Explanation: Set parameters
End of explanation
"""
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
"""
Explanation: Read epochs for all channels, removing a bad one
End of explanation
"""
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep * 1000 # convert to milliseconds
"""
Explanation: Transform to source space
End of explanation
"""
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
"""
Explanation: Transform to common cortical space
Normally you would read in estimates across several subjects and morph them
to the same cortical space (e.g. fsaverage). For example purposes, we will
simulate this by just having each "subject" have the same response (just
noisy in source space) here.
We'll only consider the left hemisphere in this tutorial.
End of explanation
"""
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
"""
Explanation: It's a good idea to spatially smooth the data, and for visualization
purposes, let's morph these to fsaverage, which is a grade 5 ICO source space
with vertices 0:10242 for each hemisphere. Usually you'd have to morph
each subject's data separately, but here since all estimates are on
'sample' we can use one morph matrix for all the heavy lifting.
End of explanation
"""
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
"""
Explanation: Now we need to prepare the group matrix for the ANOVA statistic. To make the
clustering function work correctly with the ANOVA function X needs to be a
list of multi-dimensional arrays (one per condition) of shape: samples
(subjects) x time x space.
First we permute dimensions, then split the array into a list of conditions
and discard the empty dimension resulting from the split using numpy squeeze.
End of explanation
"""
factor_levels = [2, 2]
"""
Explanation: Prepare function for arbitrary contrast
As our ANOVA function is a multi-purpose tool we need to apply a few
modifications to integrate it with the clustering function. This
includes reshaping data, setting default arguments and processing
the return values. For this reason we'll write a tiny dummy function.
We will tell the ANOVA how to interpret the data matrix in terms of
factors. This is done via the factor levels argument which is a list
of the number factor levels for each factor.
End of explanation
"""
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
"""
Explanation: Finally we will pick the interaction effect by passing 'A:B'.
(this notation is borrowed from the R formula language).
As an aside, note that in this particular example, we cannot use the A*B
notation which return both the main and the interaction effect. The reason
is that the clustering function expects stat_fun to return a 1-D array.
To get clusters for both, you must create a loop.
End of explanation
"""
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
"""
Explanation: A stat_fun must deal with a variable number of input arguments.
Inside the clustering function each condition will be passed as flattened
array, necessitated by the clustering procedure. The ANOVA however expects an
input array of dimensions: subjects X conditions X observations (optional).
The following function catches the list input and swaps the first and the
second dimension, and finally calls ANOVA.
<div class="alert alert-info"><h4>Note</h4><p>For further details on this ANOVA function consider the
corresponding
`time-frequency tutorial <tut-timefreq-twoway-anova>`.</p></div>
End of explanation
"""
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
"""
Explanation: Compute clustering statistic
To use an algorithm optimized for spatio-temporal clustering, we
just pass the spatial adjacency matrix (instead of spatio-temporal).
End of explanation
"""
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
"""
Explanation: Visualize the clusters
End of explanation
"""
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
"""
Explanation: Finally, let's investigate interaction effect by reconstructing the time
courses:
End of explanation
"""
|
fmfn/BayesianOptimization
|
examples/domain_reduction.ipynb
|
mit
|
import numpy as np
from bayes_opt import BayesianOptimization
from bayes_opt import SequentialDomainReductionTransformer
import matplotlib.pyplot as plt
"""
Explanation: Sequential Domain Reduction
Background
Sequential domain reduction is a process where the bounds of the optimization problem are mutated (typically contracted) to reduce the time required to converge to an optimal value. The advantage of this method is typically seen when a cost function is particularly expensive to calculate, or if the optimization routine oscilates heavily.
Basics
The basic steps are a pan and a zoom. These two steps are applied at one time, therefore updating the problem search space evey iteration.
Pan: recentering the region of interest around the most optimal point found.
Zoom: contract the region of interest.
Parameters
There are three parameters for the built-in SequentialDomainReductionTransformer object:
$\gamma_{osc}:$ shrinkage parameter for oscillation. Typically [0.5-0.7]. Default = 0.7
$\gamma_{pan}:$ panning parameter. Typically 1.0. Default = 1.0
$\eta:$ zoom parameter. Default = 0.9
More information can be found in this reference document:
Title: "On the robustness of a simple domain reduction scheme for simulation‐based optimization"
Date: 2002
Author: Stander, N. and Craig, K.
---
Let's start by importing the packages we'll be needing
End of explanation
"""
def ackley(**kwargs):
x = np.fromiter(kwargs.values(), dtype=float)
arg1 = -0.2 * np.sqrt(0.5 * (x[0] ** 2 + x[1] ** 2))
arg2 = 0.5 * (np.cos(2. * np.pi * x[0]) + np.cos(2. * np.pi * x[1]))
return -1.0 * (-20. * np.exp(arg1) - np.exp(arg2) + 20. + np.e)
"""
Explanation: Now let's create an example cost function. This is the Ackley function, which is quite non-linear.
End of explanation
"""
pbounds = {'x': (-5, 5), 'y': (-5, 5)}
"""
Explanation: We will use the standard bounds for this problem.
End of explanation
"""
bounds_transformer = SequentialDomainReductionTransformer()
"""
Explanation: This is where we define our bound_transformer , the Sequential Domain Reduction Transformer
End of explanation
"""
mutating_optimizer = BayesianOptimization(
f=ackley,
pbounds=pbounds,
verbose=0,
random_state=1,
bounds_transformer=bounds_transformer
)
mutating_optimizer.maximize(
init_points=2,
n_iter=50,
)
standard_optimizer = BayesianOptimization(
f=ackley,
pbounds=pbounds,
verbose=0,
random_state=1,
)
standard_optimizer.maximize(
init_points=2,
n_iter=50,
)
"""
Explanation: Now we can set up two idential optimization problems, except one has the bound_transformer variable set.
End of explanation
"""
plt.plot(mutating_optimizer.space.target, label='Mutated Optimizer')
plt.plot(standard_optimizer.space.target, label='Standard Optimizer')
plt.legend()
"""
Explanation: After both have completed we can plot to see how the objectives performed. It's quite obvious to see that the Sequential Domain Reduction technique contracted onto the optimal point relativly quickly.
End of explanation
"""
# example x-bound shrinking
x_min_bound = [b[0][0] for b in bounds_transformer.bounds]
x_max_bound = [b[0][1] for b in bounds_transformer.bounds]
x = [x[0] for x in mutating_optimizer.space.params]
plt.plot(x_min_bound[1:], label='x lower bound')
plt.plot(x_max_bound[1:], label='x upper bound')
plt.plot(x[1:], label='x')
plt.legend()
"""
Explanation: Now let's plot the actual contraction of one of the variables (x)
End of explanation
"""
|
jamesfolberth/NGC_STEM_camp_AWS
|
notebooks/machineLearning_notebooks/Intro Regression.ipynb
|
bsd-3-clause
|
import csv
import numpy as np
import scipy as sp
import pandas as pd
import sklearn as sk
import matplotlib.pyplot as plt
from IPython.display import Image
print('csv: {}'.format(csv.__version__))
print('numpy: {}'.format(np.__version__))
print('scipy: {}'.format(sp.__version__))
print('pandas: {}'.format(pd.__version__))
print('sklearn: {}'.format(sk.__version__))
"""
Explanation: Simple Linear Regression
In this module we will learn how to use data to learn a trend and use this trend to predict new observations. First we load the base libraries.
End of explanation
"""
Image(url='http://www.radford.edu/~rsheehy/Gen_flash/Tutorials/Linear_Regression/reg-tut_files/linreg3.gif')
"""
Explanation: The easiest way to learn how regression works is by thinking about an example. Consider an imaginary dataset of buildings built in Denver containing three pieces of information for each building: the year it was built, the number of stories, and the building's total height in feet.
It might seem obvious that the more stories a building has, the taller it is in feet, and vice versa. Linear regression exploits this idea. Let's say I'm a professor researching buildings and stories, and I want to use the # of stories in a building to estimate its height in feet. I can easily stand outside a building and see how many stories it has, but my tape measurer won't reach many of the roofs in Denver. I do know that the two-story building I live in is right around 20 feet high. My idea is to take the number of stories, and multiply by 10.something, but I'm not sure this will work for other buildings (commercial and industrial buildings for example).
I lament to my friends, and by a stroke of incredible luck one of my pals happens to have an old dataset lying around that contains the information I need! His parchment has records of 60 random buildings in Denver built from 1907 to 1992. Inspecting the first few entries of the parchment:
(O) ------------)
....| 770 : 54 |
....| 677 : 47 |
....| 428 : 28 |
(O) ------------)
It seems I may need to multiply by more than 10. Taking the first observations and dividing the height by the number of stories for the first three entries gives about 14.3, 14.4, and 15.3 feet per story, respectively. How can I combine all 60 observations to get a good answer? One could naively just take the average of all of these numbers, but in higher dimensions this doesn't work. To help, we have a statistical technique called linear regression. I can use regression to find a good number to multiply the number of stories by (call it $\beta$), and I hope this will help me get an accurate prediction for the height. I know this height will not be exactly right, so there is some error in each prediction. If I write this all out, we have
$$ \operatorname{(height)} = \operatorname{(# of stories)} \cdot \beta + \epsilon$$
$$ y = X \beta + \epsilon $$
From algebra, we know this is a linear equation, where $\beta$ is the slope of the line. Linear regression actually seeks to minimize the errors $\epsilon$ (the mean squared error). The plot in the link shows the linear regression line, the data it was estimated from, and the errors or deviations $\epsilon$ for each data point.
End of explanation
"""
filename = '/Users/jessicagronski/Downloads/bldgstories1.csv'
raw_data = open(filename, 'rt')
reader = csv.reader(raw_data, delimiter=',', quoting=csv.QUOTE_NONE)
x = list(reader)
data = np.array(x).astype('float')
# Load CSV with numpy
import numpy
raw_data = open(filename, 'rb')
data = numpy.loadtxt(raw_data, delimiter=",")
# Load CSV using Pandas
import pandas
colnames = ['year', 'height', 'stories']
data = pandas.read_csv(filename, names=colnames)
data = pandas.DataFrame(data, columns=colnames)
"""
Explanation: But we can learn about the math later. Let's think about other interesting questions. Which would be better for predicting: would # of stories help predict height in feet better than height would predict # of stories?
Say we decide to predict height using the # of stories. Since we are using one piece of information to predict another, this is called simple linear regression.
Would incorporating the year the building was built help me make a better prediction? This would be an example of multiple regression since we would use two pieces of (or more) information to predict.
Okay now its time to go back to python. We will import the data file, get an initial look at the data using pandas functions, and then fit some linear regression models using scikit-learn.
The dataset is in a .csv file, which we need to import. You may have already seen this, but we can use the python standard library function csv.reader, numpy.loadtxt, or pandas.read_csv to import the data. We show all three just as a reminder, but we keep the data as a pandas DataFrame object.
End of explanation
"""
print('Dimensions:')
print(data.shape)
print('Ten observations:')
print(data.head(6))
print('Correlation matrix:')
correlations = data.corr(method='pearson')
print(correlations)
"""
Explanation: Know Your Data
Now we inspect the DataFrame using some numpy functions you have already learned such as shape, head, dtypes, corr, and skew functions. Find more methods associated with DataFrame objects!
End of explanation
"""
pandas.set_option('precision', 3)
description = data.describe()
print(description)
"""
Explanation: Remember we can acces the five number summary + some using the describe function.
End of explanation
"""
from sklearn import linear_model
obj = linear_model.LinearRegression()
obj.fit(np.array(data.height.values.reshape(-1,1)), data.stories )#need this values.reshape(-1,1) to avoid deprecation warnings
print( obj.coef_, obj.intercept_ )
"""
Explanation: Regression Model
We fit a linear regression model below. We try to use height to predict the number of stories in a building.
End of explanation
"""
x_min, x_max = data.height.values.min() - .5, data.height.values.max() + .5 # for plotting
x_rng = np.linspace(x_min,x_max,200)
plt.plot(x_rng, x_rng * obj.coef_ + obj.intercept_, 'k')
plt.plot(data.height.values, data.stories.values,'ro', alpha = 0.5)
plt.show()
"""
Explanation: We show the data and the regression lines.
End of explanation
"""
obj2 = linear_model.LinearRegression()
X = np.array( (data.height.values, data.year.values))
obj2.fit(X.transpose() , data.stories)
print(obj2.coef_, obj2.intercept_)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection = '3d')
#ax.plot(data.height.values, data.year.values , data.stories.values, 'bo')
ax.plot_surface(data.height.values, data.year.values, (np.dot(X.transpose(),obj2.coef_) \
+ obj2.intercept_), color='b')
ax.show()
#plt.close()
##### doesn't work - have the students try to solve it.
print(np.dot(X.transpose(),obj2.coef_).shape)
data.height.values.shape
"""
Explanation: Check residuals for normality.
Now we will do multiple linear regression. This means we will use more than one predictor when we fit a model and predict our response variable # of stories. We will use both height and the year it was built. We can look at the mean squared error for both models and see which one predicts one better.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/awi/cmip6/models/sandbox-2/aerosol.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'sandbox-2', 'aerosol')
"""
Explanation: ES-DOC CMIP6 Model Properties - Aerosol
MIP Era: CMIP6
Institute: AWI
Source ID: SANDBOX-2
Topic: Aerosol
Sub-Topics: Transport, Emissions, Concentrations, Optical Radiative Properties, Model.
Properties: 69 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:37
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Meteorological Forcings
5. Key Properties --> Resolution
6. Key Properties --> Tuning Applied
7. Transport
8. Emissions
9. Concentrations
10. Optical Radiative Properties
11. Optical Radiative Properties --> Absorption
12. Optical Radiative Properties --> Mixtures
13. Optical Radiative Properties --> Impact Of H2o
14. Optical Radiative Properties --> Radiative Scheme
15. Optical Radiative Properties --> Cloud Interactions
16. Model
1. Key Properties
Key properties of the aerosol model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of aerosol model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Prognostic variables in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of tracers in the aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are aerosol calculations generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Physical properties of seawater in ocean
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the time evolution of the prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for aerosol physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the aerosol model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.5. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Meteorological Forcings
**
4.1. Variables 3D
Is Required: FALSE Type: STRING Cardinality: 0.1
Three dimensionsal forcing variables, e.g. U, V, W, T, Q, P, conventive mass flux
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Variables 2D
Is Required: FALSE Type: STRING Cardinality: 0.1
Two dimensionsal forcing variables, e.g. land-sea mask definition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Frequency
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Frequency with which meteological forcings are applied (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Resolution
Resolution in the aersosol model grid
5.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 5.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for aerosol model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Transport
Aerosol transport
7.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of transport in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
"""
Explanation: 7.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for aerosol transport modeling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.3. Mass Conservation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to ensure mass conservation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.4. Convention
Is Required: TRUE Type: ENUM Cardinality: 1.N
Transport by convention
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Emissions
Atmospheric aerosol emissions
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of emissions in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method used to define aerosol species (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the aerosol species are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prescribed Climatology
Is Required: FALSE Type: ENUM Cardinality: 0.1
Specify the climatology type for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed via a climatology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of aerosol species emitted and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Other Method Characteristics
Is Required: FALSE Type: STRING Cardinality: 0.1
Characteristics of the "other method" used for aerosol emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Concentrations
Atmospheric aerosol concentrations
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of concentrations in atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as mass mixing ratios.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Prescribed Fields Mmr
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed as AOD plus CCNs.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Optical Radiative Properties
Aerosol optical and radiative properties
10.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of optical and radiative properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11. Optical Radiative Properties --> Absorption
Absortion properties in aerosol scheme
11.1. Black Carbon
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of black carbon at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Dust
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of dust at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.3. Organics
Is Required: FALSE Type: FLOAT Cardinality: 0.1
Absorption mass coefficient of organics at 550nm (if non-absorbing enter 0)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Optical Radiative Properties --> Mixtures
**
12.1. External
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there external mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12.2. Internal
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there internal mixing with respect to chemical composition?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Mixing Rule
Is Required: FALSE Type: STRING Cardinality: 0.1
If there is internal mixing with respect to chemical composition then indicate the mixinrg rule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13. Optical Radiative Properties --> Impact Of H2o
**
13.1. Size
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact size?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.2. Internal Mixture
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does H2O impact internal mixture?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Optical Radiative Properties --> Radiative Scheme
Radiative scheme for aerosol
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Shortwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of shortwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Optical Radiative Properties --> Cloud Interactions
Aerosol-cloud interactions
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of aerosol-cloud interactions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.2. Twomey
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the Twomey effect included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Twomey Minimum Ccn
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If the Twomey effect is included, then what is the minimum CCN number?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.4. Drizzle
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect drizzle?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Cloud Lifetime
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the scheme affect cloud lifetime?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.6. Longwave Bands
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of longwave bands
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Model
Aerosol model
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosperic aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
"""
Explanation: 16.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the Aerosol model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other model components coupled to the Aerosol model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.4. Gas Phase Precursors
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of gas phase aerosol precursors.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.5. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type(s) of aerosol scheme used by the aerosols model (potentially multiple: some species may be covered by one type of aerosol scheme and other species covered by another type).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.6. Bulk Scheme Species
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of species covered by the bulk scheme.
End of explanation
"""
|
sdpython/ensae_teaching_cs
|
_doc/notebooks/td1a_home/2021_tsp.ipynb
|
mit
|
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
"""
Explanation: Algo - Aparté sur le voyageur de commerce
Le voyageur de commerce ou Travelling Salesman Problem en anglais est le problème NP-complet emblématique : il n'existe pas d'algorithme capable de trouver la solution optimale en temps polynômial. La seule option est de parcourir toutes les configurations pour trouver la meilleure. Ce notebook ne fait qu'aborder le problème.
End of explanation
"""
import numpy
points = numpy.random.random((6, 2))
points
"""
Explanation: Tirer des points aléatoirement et les afficher
End of explanation
"""
def distance_chemin(points, chemin):
dist = 0
for i in range(1, len(points)):
dx, dy = points[chemin[i], :] - points[chemin[i-1], :]
dist += (dx ** 2 + dy ** 2) ** 0.5
dx, dy = points[chemin[0], :] - points[chemin[-1], :]
dist += (dx ** 2 + dy ** 2) ** 0.5
return dist
distance_chemin(points, list(range(points.shape[0])))
"""
Explanation: Distance d'un chemin
End of explanation
"""
import matplotlib.pyplot as plt
def plot_points(points, chemin):
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
loop = list(chemin) + [chemin[0]]
p = points[loop]
ax[0].plot(points[:, 0], points[:, 1], 'o')
ax[1].plot(p[:, 0], p[:, 1], 'o-')
ax[1].set_title("dist=%1.2f" % distance_chemin(points, chemin))
return ax
plot_points(points, list(range(points.shape[0])));
"""
Explanation: Visualisation
End of explanation
"""
from itertools import permutations
def optimisation(points, chemin):
dist = distance_chemin(points, chemin)
best = chemin
for perm in permutations(chemin):
d = distance_chemin(points, perm)
if d < dist:
dist = d
best = perm
return best
res = optimisation(points, list(range(points.shape[0])))
plot_points(points, res);
"""
Explanation: Parcourir toutes les permutations
End of explanation
"""
from tqdm import tqdm
def optimisation(points, chemin):
dist = distance_chemin(points, chemin)
best = chemin
loop = tqdm(permutations(chemin))
for perm in loop:
loop.set_description(str(perm))
d = distance_chemin(points, perm)
if d < dist:
dist = d
best = perm
return best
res = optimisation(points, list(range(points.shape[0])))
plot_points(points, res);
"""
Explanation: Module tqdm
Utile seulement dans un notebook, très utile pour les impatients.
End of explanation
"""
def optimisation_retournement(points, chemin):
dist = distance_chemin(points, chemin)
best = chemin
for i in range(1, len(chemin)):
for j in range(i+1, len(chemin)):
chemin[i: j] = chemin[j-1: i-1: -1]
d = distance_chemin(points, chemin)
if d < dist:
dist = d
else:
chemin[i: j] = chemin[j-1: i-1: -1]
return chemin
res = optimisation_retournement(points, list(range(points.shape[0])))
plot_points(points, res);
"""
Explanation: Retournement
Les permutations ça prend du temps même avec les machines d'aujourd'hui.
End of explanation
"""
|
fluxcapacitor/source.ml
|
jupyterhub.ml/notebooks/train_deploy/zz_under_construction/tensorflow/optimize/06a_Train_Model_XLA_GPU.ipynb
|
apache-2.0
|
import tensorflow as tf
from tensorflow.python.client import timeline
import pylab
import numpy as np
import os
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
tf.logging.set_verbosity(tf.logging.INFO)
"""
Explanation: Train Model with XLA_GPU (and CPU*)
Some operations do not have XLA_GPU equivalents, so we still need to use CPU.
End of explanation
"""
tf.reset_default_graph()
"""
Explanation: Reset TensorFlow Graph
Useful in Jupyter Notebooks
End of explanation
"""
config = tf.ConfigProto(
log_device_placement=True,
)
config.gpu_options.allow_growth=True
config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
print(config)
sess = tf.Session(config=config)
print(sess)
"""
Explanation: Create TensorFlow Session
End of explanation
"""
from datetime import datetime
version = int(datetime.now().strftime("%s"))
"""
Explanation: Generate Model Version (current timestamp)
End of explanation
"""
num_samples = 100000
x_train = np.random.rand(num_samples).astype(np.float32)
print(x_train)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
print(y_train)
pylab.plot(x_train, y_train, '.')
x_test = np.random.rand(len(x_train)).astype(np.float32)
print(x_test)
noise = np.random.normal(scale=.01, size=len(x_train))
y_test = x_test * 0.1 + 0.3 + noise
print(y_test)
pylab.plot(x_test, y_test, '.')
with tf.device("/cpu:0"):
W = tf.get_variable(shape=[], name='weights')
print(W)
b = tf.get_variable(shape=[], name='bias')
print(b)
with tf.device("/device:XLA_GPU:0"):
x_observed = tf.placeholder(shape=[None],
dtype=tf.float32,
name='x_observed')
print(x_observed)
y_pred = W * x_observed + b
print(y_pred)
learning_rate = 0.025
with tf.device("/device:XLA_GPU:0"):
y_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='y_observed')
print(y_observed)
loss_op = tf.reduce_mean(tf.square(y_pred - y_observed))
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer_op.minimize(loss_op)
print("Loss Scalar: ", loss_op)
print("Optimizer Op: ", optimizer_op)
print("Train Op: ", train_op)
"""
Explanation: Load Model Training and Test/Validation Data
End of explanation
"""
with tf.device("/cpu:0"):
init_op = tf.global_variables_initializer()
print(init_op)
sess.run(init_op)
print("Initial random W: %f" % sess.run(W))
print("Initial random b: %f" % sess.run(b))
"""
Explanation: Randomly Initialize Variables (Weights and Bias)
The goal is to learn more accurate Weights and Bias during training.
End of explanation
"""
def test(x, y):
return sess.run(loss_op, feed_dict={x_observed: x, y_observed: y})
test(x_train, y_train)
"""
Explanation: View Accuracy of Pre-Training, Initial Random Variables
We want this to be close to 0, but it's relatively far away. This is why we train!
End of explanation
"""
loss_summary_scalar_op = tf.summary.scalar('loss', loss_op)
loss_summary_merge_all_op = tf.summary.merge_all()
train_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/xla_gpu/%s/train' % version,
graph=tf.get_default_graph())
test_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/xla_gpu/%s/test' % version,
graph=tf.get_default_graph())
"""
Explanation: Setup Loss Summary Operations for Tensorboard
End of explanation
"""
%%time
with tf.device("/device:XLA_GPU:0"):
run_metadata = tf.RunMetadata()
max_steps = 401
for step in range(max_steps):
if (step < max_steps - 1):
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train})
else:
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train},
options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('timeline-xla-gpu.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
if step % 10 == 0:
print(step, sess.run([W, b]))
train_summary_writer.add_summary(train_summary_log, step)
train_summary_writer.flush()
test_summary_writer.add_summary(test_summary_log, step)
test_summary_writer.flush()
pylab.plot(x_train, y_train, '.', label="target")
pylab.plot(x_train, sess.run(y_pred,
feed_dict={x_observed: x_train,
y_observed: y_train}),
".",
label="predicted")
pylab.legend()
pylab.ylim(0, 1.0)
"""
Explanation: Train Model
End of explanation
"""
import os
optimize_me_parent_path = '/root/models/optimize_me/linear/xla_gpu'
saver = tf.train.Saver()
os.system('rm -rf %s' % optimize_me_parent_path)
os.makedirs(optimize_me_parent_path)
unoptimized_model_graph_path = '%s/unoptimized_xla_gpu.pb' % optimize_me_parent_path
tf.train.write_graph(sess.graph_def,
'.',
unoptimized_model_graph_path,
as_text=False)
print(unoptimized_model_graph_path)
model_checkpoint_path = '%s/model.ckpt' % optimize_me_parent_path
saver.save(sess,
save_path=model_checkpoint_path)
print(model_checkpoint_path)
print(optimize_me_parent_path)
os.listdir(optimize_me_parent_path)
sess.close()
"""
Explanation: View Loss Summaries in Tensorboard
Navigate to the Scalars and Graphs tab at this URL:
http://[ip-address]:6006
Save Graph For Optimization
We will use this later.
End of explanation
"""
%%bash
summarize_graph --in_graph=/root/models/optimize_me/linear/xla_gpu/unoptimized_xla_gpu.pb
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
def convert_graph_to_dot(input_graph, output_dot, is_input_graph_binary):
graph = graph_pb2.GraphDef()
with open(input_graph, "rb") as fh:
if is_input_graph_binary:
graph.ParseFromString(fh.read())
else:
text_format.Merge(fh.read(), graph)
with open(output_dot, "wt") as fh:
print("digraph graphname {", file=fh)
for node in graph.node:
output_name = node.name
print(" \"" + output_name + "\" [label=\"" + node.op + "\"];", file=fh)
for input_full_name in node.input:
parts = input_full_name.split(":")
input_name = re.sub(r"^\^", "", parts[0])
print(" \"" + input_name + "\" -> \"" + output_name + "\";", file=fh)
print("}", file=fh)
print("Created dot file '%s' for graph '%s'." % (output_dot, input_graph))
input_graph='/root/models/optimize_me/linear/xla_gpu/unoptimized_xla_gpu.pb'
output_dot='/root/notebooks/unoptimized_xla_gpu.dot'
convert_graph_to_dot(input_graph=input_graph, output_dot=output_dot, is_input_graph_binary=True)
%%bash
dot -T png /root/notebooks/unoptimized_xla_gpu.dot \
-o /root/notebooks/unoptimized_xla_gpu.png > /tmp/a.out
from IPython.display import Image
Image('/root/notebooks/unoptimized_xla_gpu.png', width=1024, height=768)
"""
Explanation: Show Graph
End of explanation
"""
%%bash
dot -T png /tmp/hlo_graph_1.*.dot -o /root/notebooks/hlo_graph_1.png &>/dev/null
dot -T png /tmp/hlo_graph_10.*.dot -o /root/notebooks/hlo_graph_10.png &>/dev/null
dot -T png /tmp/hlo_graph_50.*.dot -o /root/notebooks/hlo_graph_50.png &>/dev/null
dot -T png /tmp/hlo_graph_75.*.dot -o /root/notebooks/hlo_graph_75.png &>/dev/null
"""
Explanation: XLA JIT Visualizations
End of explanation
"""
|
RNAer/Calour
|
doc/source/notebooks/microbiome_manipulation.ipynb
|
bsd-3-clause
|
import calour as ca
ca.set_log_level(11)
%matplotlib notebook
"""
Explanation: Microbiome data manipulation tutorial
This is a jupyter notebook example of how to sort, filter and handle sample metadata
Setup
End of explanation
"""
cfs=ca.read_amplicon('data/chronic-fatigue-syndrome.biom',
'data/chronic-fatigue-syndrome.sample.txt',
normalize=10000,min_reads=1000)
print(cfs)
"""
Explanation: Load the data
we use two datasets:
the Chronic faitigue syndrome data from:
Giloteaux, L., Goodrich, J.K., Walters, W.A., Levine, S.M., Ley, R.E. and Hanson, M.R., 2016.
Reduced diversity and altered composition of the gut microbiome in individuals with myalgic encephalomyelitis/chronic fatigue syndrome.
Microbiome, 4(1), p.30.
End of explanation
"""
movpic=ca.read_amplicon('data/moving_pic.biom',
'data/moving_pic.sample.txt',
normalize=10000,min_reads=1000)
print(movpic)
"""
Explanation: Moving pictures dataset. from:
Caporaso, J.G., Lauber, C.L., Costello, E.K., Berg-Lyons, D., Gonzalez, A., Stombaugh, J., Knights, D., Gajer, P., Ravel, J., Fierer, N. and Gordon, J.I., 2011.
Moving pictures of the human microbiome.
Genome biology, 12(5), p.R50.
End of explanation
"""
print(cfs.sample_metadata['Subject'].is_monotonic_increasing)
cfs=cfs.sort_samples('Subject')
"""
Explanation: sorting the samples based on a metadata field (sort_samples)
Sort the samples of the experiment based on the values in the given field.
is the original data sorted by the Subject field?
End of explanation
"""
print(cfs.sample_metadata['Subject'].is_monotonic_increasing)
"""
Explanation: and is the new data sorted?
End of explanation
"""
movpic=movpic.sort_samples('DAYS_SINCE_EXPERIMENT_START')
movpic=movpic.sort_samples('HOST_SUBJECT_ID')
print(movpic.sample_metadata['DAYS_SINCE_EXPERIMENT_START'].is_monotonic_increasing)
print(movpic.sample_metadata['HOST_SUBJECT_ID'].is_monotonic_increasing)
"""
Explanation: consecutive sorting using different fields
Keeps the order of the previous fields if values for the new field are tied.
For the moving pictures dataset, we want the data to be sorted by individual, and within each individual to be sorted by timepoint
End of explanation
"""
tt=movpic.filter_samples('HOST_SUBJECT_ID','F4')
print('* original:\n%s\n\n* filtered:\n%s' % (movpic, tt))
"""
Explanation: filter samples based on metadata field (filter_samples)
Keep only samples matching the values we supply for the selected metadata field.
lets keep only samples from participant F4
End of explanation
"""
print(movpic.sample_metadata['BODY_HABITAT'].unique())
yy=tt.filter_samples('BODY_HABITAT', ['UBERON:skin', 'UBERON:feces'])
print(yy)
"""
Explanation: we can supply a list of values instead of only one value
now lets only keep skin and fecal samples
End of explanation
"""
yy=tt.filter_samples('BODY_HABITAT', ['UBERON:skin', 'UBERON:feces'], negate=True)
print(yy)
"""
Explanation: we can also reverse the filtering (removing samples with the supplied values)
We use the negate=True parameter
let's keep just the non-skin and non-feces samples
End of explanation
"""
tt=cfs.filter_abundance(25)
print('* original:\n%s\n\n* filtered:\n%s' % (cfs, tt))
"""
Explanation: filter low abundance features (filter_abundance)
Remove all features (bacteria) with < 10 reads total (summed over all samples, after normalization).
This is useful for getting rid of non-interesting features. Note that differently from filtering based of fraction of samples where feature is present (filter_prevalence), this method (filter_abundance) will also keep features present in a small fraction of the samples, but in high frequency.
End of explanation
"""
tt=cfs.filter_abundance(25, negate=True)
print('* original:\n%s\n\n* filtered:\n%s' % (cfs,tt))
"""
Explanation: Keeping the low abundance bacteria instead
By default, the function removes the low abundance feature. This can be reversed (i.e. keep low abundance features) by using the negate=True parameter)
End of explanation
"""
# remove bacteria present in less than half of the samples
tt=cfs.filter_prevalence(0.5)
print('* original:\n%s\n\n* filtered:\n%s' % (cfs, tt))
"""
Explanation: filter non-common bacteria (filter_prevalence)
Remove bacteria based on fraction of the samples where this bacteria is present.
End of explanation
"""
# keep only high frequency bacteria (mean over all samples > 1%)
tt=cfs.filter_mean(0.01)
print('* original:\n%s\n\n* filtered:\n%s' % (cfs, tt))
"""
Explanation: Filter bacteria based on the mean frequency over all samples (filter_mean)
Remove bacteria which have a mean (over all samples) lower than the desired threshold.
End of explanation
"""
|
cornhundred/ipywidgets
|
docs/source/examples/Widget Custom.ipynb
|
bsd-3-clause
|
from __future__ import print_function
"""
Explanation: Index - Back
End of explanation
"""
import ipywidgets as widgets
from traitlets import Unicode, validate
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
"""
Explanation: Building a Custom Widget - Hello World
The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below).
To create a custom widget, you need to define the widget both in the browser and in the python kernel.
Building a Custom Widget
To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets.
Python Kernel
DOMWidget and Widget
To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets.
_view_name
Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget.
Instead, you must tell it yourself by defining specially named trait attributes, _view_name and _view_module (as seen below) and optionally _model_name and _model_module.
End of explanation
"""
%%javascript
define('hello', ["jupyter-js-widgets"], function(widgets) {
});
"""
Explanation: sync=True traitlets
Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the configurable piece of the traitlets machinery. The sync=True keyword argument tells the widget framework to handle synchronizing that value to the browser. Without sync=True, the browser would have no knowledge of _view_name or _view_module.
Other traitlet types
Unicode, used for _view_name, is not the only Traitlet type, there are many more some of which are listed below:
Any
Bool
Bytes
CBool
CBytes
CComplex
CFloat
CInt
CLong
CRegExp
CUnicode
CaselessStrEnum
Complex
Dict
DottedObjectName
Enum
Float
FunctionType
Instance
InstanceType
Int
List
Long
Set
TCPAddress
Tuple
Type
Unicode
Union
Not all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized.
Front end (JavaScript)
Models and views
The IPython widget framework front end relies heavily on Backbone.js. Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The _view_name trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model.
Import jupyter-js-widgets
You first need to import the jupyter-js-widgets module. To import modules, use the define method of require.js (as seen below).
End of explanation
"""
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
// Define the HelloView
var HelloView = widgets.DOMWidgetView.extend({
});
return {
HelloView: HelloView
}
});
"""
Explanation: Define the view
Next, define your widget view class. Inherit from the DOMWidgetView by using the .extend method.
End of explanation
"""
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
// Render the view.
render: function() {
this.el.textContent = 'Hello World!';
},
});
return {
HelloView: HelloView
};
});
"""
Explanation: Render method
Lastly, override the base render method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via this.el. The el property is the DOM element associated with the view.
End of explanation
"""
HelloWidget()
"""
Explanation: Test
You should be able to display your widget just like any other widget now.
End of explanation
"""
class HelloWidget(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('hello').tag(sync=True)
value = Unicode('Hello World!').tag(sync=True)
"""
Explanation: Making the widget stateful
There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back end. First you need to add a traitlet in the back end. Use the name of value to stay consistent with the rest of the widget framework and to allow your widget to be used with interact.
End of explanation
"""
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
"""
Explanation: Accessing the model from the view
To access the model associated with a view instance, use the model property of the view. get and set methods are used to interact with the Backbone model. get is trivial, however you have to be careful when using set. After calling the model set you need call the view's touch method. This associates the set operation with a particular view so output will be routed to the correct cell. The model also has an on method, which allows you to listen to events triggered by the model (like value changes).
Rendering model contents
By replacing the string literal with a call to model.get, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes.
End of explanation
"""
%%javascript
require.undef('hello');
define('hello', ["jupyter-js-widgets"], function(widgets) {
var HelloView = widgets.DOMWidgetView.extend({
render: function() {
this.value_changed();
this.model.on('change:value', this.value_changed, this);
},
value_changed: function() {
this.el.textContent = this.model.get('value');
},
});
return {
HelloView : HelloView
};
});
"""
Explanation: Dynamic updates
To get the view to update itself dynamically, register a function to update the view's value when the model's value property changes. This can be done using the model.on method. The on method takes three parameters, an event name, callback handle, and callback context. The Backbone event named change will fire whenever the model changes. By appending :value to it, you tell Backbone to only listen to the change event of the value property (as seen below).
End of explanation
"""
w = HelloWidget()
w
w.value = 'test'
"""
Explanation: Test
End of explanation
"""
|
jeffzhengye/pylearn
|
tensorflow_learning/tf2/notebooks/tf_keras_介绍_工程师版.ipynb
|
unlicense
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
"""
Explanation: TensorFlow Keras 介绍-工程师版
Author: fchollet<br>
Date created: 2020/04/01<br>
Last modified: 2020/04/28<br>
Description: 使用TensorFlow keras高级api构建真实世界机器学习解决方案你所需要知道的 (Everything you need to know to use Keras to build real-world machine learning solutions.)<br>
翻译: 叶正
设置-Setup
End of explanation
"""
x = tf.constant([[5, 2], [1, 3]])
print(x)
"""
Explanation: 介绍
你是一位正在寻找基于tensorflow-keras驱动的深度学习在实际产品中解决方案的工程师吗?
本指南将为你介绍tf-keras 核心api概念和使用方法。
本指南中,你将学到以下知识:
Tensorflow tensor 和 gradient tape
训练模型前的数据准备 (转化为 NumPy 数组ndarray 或者 tf.data.Dataset 对象).
数据预处理:如特征标注化(feature normalization) 或词汇表索引 vocabulary indexing.
模型构建:数据到预测模型,使用Keras Functional API.
使用keras自带的fit()方法训练模型,同时可以存储checkpoints, 监控指标和容错
模型评估和推理(test data)
自定义 fit()功能, 如对抗网络GAN训练.
使用多GPU加速训练
模型优化:参数调优(hyperparameter tuning.)
在移动设备和 IoT 设备上部署机器学习模型
在该指南最后,你可以接着学下以下内容来增强你对这些概念的理解
图像分类,Image classification
文本分类,Text classification
信用卡欺诈检测,Credit card fraud detection
张量,Tensors
TensorFlow 是可微编程的一个基本构造层。它的核心像Numpy,是一个可以对N维矩阵(tensors)进行操作控制的框架。
然而,Numpy与TensorFlow有三个关键不同之处:
1.TensorFlow可以利用硬件如GPUs和TPUs,进行加速
2.TensorFlow能对任意可微矩阵自动计算梯度
3.TensorFlow的计算可以分配到一台或多台机器的大量设备上。
首先认识一下TensorFlow的核心:Tensor
常量Tensor
End of explanation
"""
x.numpy()
"""
Explanation: 它的值可以通过调用.numpy():
End of explanation
"""
print("dtype:", x.dtype)
print("shape:", x.shape)
"""
Explanation: 像Numpy数组,对变量赋予dtype和shape的特征,
End of explanation
"""
print(tf.ones(shape=(2, 1)))
print(tf.zeros(shape=(2, 1)))
"""
Explanation: 常用tf.ones和tf.zeros(就像np.ones和np.zeros)新建常量tensors :
End of explanation
"""
x = tf.random.normal(shape=(2, 2), mean=0.0, stddev=1.0)
#x = tf.random.uniform(shape=(2, 2), minval=0, maxval=10, dtype="int32")
print(x)
"""
Explanation: 创建随机常量型张量:
End of explanation
"""
initial_value = tf.random.normal(shape=(2, 2))
a = tf.Variable(initial_value)
print(a)
"""
Explanation: 变量,Variables
特殊的tensors,可以储存可变的状态,例如模型的权重weights
可以创建带初始值的Variable:
End of explanation
"""
new_value = tf.random.normal(shape=(2, 2))
a.assign(new_value)
print(a)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j]
added_value = tf.random.normal(shape=(2, 2))
a.assign_add(added_value)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j] + added_value[i, j]
"""
Explanation: 用.assign(value),.assign_add(increment)或者.assign_sub(decrement):
End of explanation
"""
a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
with tf.GradientTape() as tape:
tape.watch(a) # Start recording the history of operations applied to `a`
c = tf.sqrt(tf.square(a) + tf.square(b)) # Do some math using `a`
# What's the gradient of `c` with respect to `a`?
dc_da = tape.gradient(c, a)
print(dc_da)
"""
Explanation: 梯度
另一个与Numpy主要不同在于,可以自动查找任何可微表达式的梯度。只需打开GradientTape,通过tape.watch() watching,建立可微表达式并用作输入:
End of explanation
"""
a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
with tf.GradientTape() as outer_tape:
outer_tape.watch(a)
with tf.GradientTape() as tape:
tape.watch(a)
c = tf.sqrt(tf.square(a) + tf.square(b))
dc_da = tape.gradient(c, a)
print(dc_da, type(dc_da))
d2c_da2 = outer_tape.gradient(dc_da, a)
print(d2c_da2)
"""
Explanation: 通过嵌套tapes,可以计算高阶倒数:
End of explanation
"""
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="int")
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
"""
Explanation: 数据加载和预处理,Data loading & preprocessing
神经网络无法直接处理原始数据,如文本文件、经过编码的JPEG图像文件或者CSV文件。
神经网络只能处理向量化vectorized和standardized标准化的表示
文本文件需要读入后转化为tf string tensor,然后分隔成单词(token)。最后对字词建立索引并转换成整数型tensor。
图片数据需要读入后并解码成整型integer tensor,然后转换成浮点型并归一化成较小的数值(通常0~1).
CSV数据首先需要解析,将数值型属性转换成浮点型floating tensor,对categorical 类别型属性索引并转换成整型tensor。
通常对每个属性值进行归一化,使其具有零平均值和单位方差。
开始!
数据加载,Data loading
tf-Keras 模型接受3中类型的输入inputs:
NumPy arrays, 与Scikit-Learn和其他Python库类似。如果数据能读入内存,这是个不错的选择
TensorFlow Dataset objects. TensorFlow Dataset objects,这可以提高性能,更适合于数据不能读入内存的数据集来说,且数据从硬盘或其他分布式文件系统读取的方式。
Python generators 可以生成不同批次的数据(例如:定制的keras.utils.Sequence的子类)。
在训练模型前,数据形式需要符合这三种之一。如果数据集较大,且在GPU上训练模型,建议考虑使用Dataset对象,因为这个类可以处理好性能关键的具体工作:
- 当GPU忙的时候,可以在CPU上异步地预处理数据集,并缓冲成队列。
- 将数据预读入GPU内存,在GPU处理完前一批数据时可以立即获得数据,因此可以充分利用GPU。
Keras有一些列工具可以将硬盘上原始数据转换成Dataset:
- tf.keras.preprocessing.image_dataset_from_directory 将存储在特定分类文件夹中的图形文件转换成带标签的图形tensor数据集。
- tf.keras.preprocessing.text_dataset_from_directory 与上述类似,但针对文本文件。
此外,TensorFlow tf.data包含其他类似的工具,例如tf.data.experimental.make_csv_dataset,从CSV文件加载结构化数据。
例子:从硬盘上图形文件中获取带标注的数据集
假设图形文件按类别存储在不同的文件夹中,如下所示:
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
可以操作如下:
```python
创建数据集
dataset = keras.preprocessing.image_dataset_from_directory(
'path/to/main_directory', batch_size=64, image_size=(200, 200))
迭代访问该dataset生成的数据batches
for data, labels in dataset:
print(data.shape) # (64, 200, 200, 3)
print(data.dtype) # float32
print(labels.shape) # (64,)
print(labels.dtype) # int32
``
样本的标签可以是它所在文件夹的数字字母序号。很自然,这也可以显示的赋值,如:class_names=['class_a', 'class_b'],标签0赋值给class_a,标签1赋值给class_b`。
例子:从文本文件获取带标签的数据集
同样地,后缀为.txt的文件分类存储在不同文件夹中,你可以:
```python
dataset = keras.preprocessing.text_dataset_from_directory(
'path/to/main_directory', batch_size=64)
样例
for data, labels in dataset:
print(data.shape) # (64,)
print(data.dtype) # string
print(labels.shape) # (64,)
print(labels.dtype) # int32
```
Keras数据预处理,Data preprocessing with Keras
当数据是字符串/整型/浮点型Nmumpy 矩阵,或者是Dataset对象(或者Python生成器)用于生成成批的字符串/整型/浮点型 tensor,此时就需要数据预处理preprocess。
这就是说:
- 字符串型数据的分词,并索引属性,Tokenization of string data, followed by token indexing.
- 特征归一化,Feature normalization.
- Rescaling,数据缩放至更小值(一般来说,神经网络的输出数值应该接近零,通常希望数据零平均值和单位方差,或者数据在[0,1]。)
理想的机器学习模型是端到端的 end-to-end
通常,需要设法使数据预处理尽可能成为模型的一部分,而不是外加一个数据处理通道。这是因为当需要重复使用模型时,外加的数据预处理可移植性较差。比如一个处理文本的模型:使用一个特殊的分词算法和一个专门的词汇索引。当需要迁移模型至移动app或JavaScriptapp,需要用目标语言重新设立预处理。这可能非常棘手,任何一下点与原处理过程不一致就可能彻底让模型无效,或者严重降低它的效果。
如果能简单的导出端对端的模型就会变得很简单,因为预处理已经包含在其中。理想的模型是期望输入越接近原始数据越好:图形模型最好是[0,255]RGB像素值,文本模型最好是utf-8的字符串。这样使用导出模型就不需要知道预处理环节。
Keras预处理层 Using Keras preprocessing layers
在Keras中,模型内置预处理常使用预处理层preprocessing layers。包括:
- TextVectorization层:使原始字符串型文本数据的向量化
- Normalization层:属性值标准化
- 图形缩放、修剪、图形数据增强,Image rescaling, cropping, or image data augmentation
使用Keras预处理层的主要好处:在训练中或训练后直接引入模型,使得模型可移植性变强。
有些预处理层have a state:
- TextVectorization:保留词或词组到整型索引的映射
- Normalization:保留特征的平均值和方差
预处理层的状态可以在部分训练样本或全部样本上调用layer.adapt(data)获得。
例子:将字符串转换成整型词索引序列
End of explanation
"""
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="binary", ngrams=2)
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
"""
Explanation: 例子:将字符串转换成one-hot编码的双词序列
End of explanation
"""
from tensorflow.keras.layers.experimental.preprocessing import Normalization
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
normalizer = Normalization(axis=-1)
normalizer.adapt(training_data)
normalized_data = normalizer(training_data)
print("var: %.4f" % np.var(normalized_data))
print("mean: %.4f" % np.mean(normalized_data))
"""
Explanation: 例子:标准化属性值
End of explanation
"""
from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
cropper = CenterCrop(height=150, width=150)
scaler = Rescaling(scale=1.0 / 255)
output_data = scaler(cropper(training_data))
print("shape:", output_data.shape)
print("min:", np.min(output_data))
print("max:", np.max(output_data))
"""
Explanation: 例子:缩放和中心裁剪图像
Rescaling层和CenterCrop层都是无状态的stateless,因此不需要调用adapt()。
End of explanation
"""
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))
"""
Explanation: 采用Keras Functional API建立模型
模型的一层A "layer"简单说就是输入输出的转换。比如:线性映射层就是将输入映射到16维属性空间:
python
dense = keras.layers.Dense(units=16)
而一个模型"model"就是由多个层layer组成的有向无环图。一个模型可以想象成一个大的层,里面含很多子层可以通过引入数据训练。
最常用且最有效的办法建立Keras模型就是funtional API。可以从指定特定形状(dtype可选)的输入开始采用功能API建立模型。Keras里,如果每个维度可变,则可指定为None。例如:输入为200*200的RGB图形可以为(200,200,3),但是输入为任意大小RGB的图像则可定义为(None,None,3)。
End of explanation
"""
from tensorflow.keras import layers
# Center-crop images to 150x150
x = CenterCrop(height=150, width=150)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1.0 / 255)(x)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation="softmax")(x)
"""
Explanation: 定义好输入形式后,可以在输入的基础上链接层转换直到得到最终输出结果:
End of explanation
"""
model = keras.Model(inputs=inputs, outputs=outputs)
"""
Explanation: 当你像搭积木一样定义好由不同层组成的有向无环图时,就建立了你的输入到输出的转化,也就是生成了一个模型对象:
End of explanation
"""
data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
processed_data = model(data)
print(processed_data.shape)
"""
Explanation: 这个模型就想一个大的layer,可以输入一个batch的数据,如下:
End of explanation
"""
model.summary()
"""
Explanation: 可以打印出模型的摘要,其显示的是你的数据在模型的每个阶段是如何做变换的。这对程序调试非常有用。
需要注意的是,每层输出都会显示batch的大小batch size。这里batch大小为None,表明模型可以处理任意batch大小的数据。
End of explanation
"""
# Get the data as Numpy arrays
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Build a simple model
inputs = keras.Input(shape=(28, 28))
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.summary()
# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
# Train the model for 1 epoch from Numpy data
batch_size = 64
print("Fit on NumPy data")
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1)
# Train the model for 1 epoch using a dataset
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
print("Fit on Dataset")
history = model.fit(dataset, epochs=1)
"""
Explanation: 当你的模型有多个输入和输出的时候,Functional API使得模型的构建更加的容易。
想更深入得了解次部分,请看guide to the Functional API.
使用 keras model的 fit()方法进行训练
现在,已经学会了的:
- 怎么准备数据
- 怎么建立处理数据的模型
下一步就是在数据上训练模型。Model类具有内置训练循环,fit()方法。Dataset对象、可以参数batch 数据的Python生成器或者Numpy矩阵。
在调用fit()前,需要指定优化器optimizer和损失函数loss function。这就是compile():
python
model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.CategoricalCrossentropy())
损失函数和优化器可以通过字符串标识符指定:
python
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
一旦模型编译了,就可以给模型输入数据。以下就是给模型输入Numpy数据的例子:
python
model.fit(numpy_array_of_samples, numpy_array_of_labels,
batch_size=32, epochs=10)
除了数据,还需要指定2个关键参数:batch_size和重复次数(epochs)。以下是训练时batch为32个样本,重复10次的例子。
python
model.fit(dataset_of_samples_and_labels, epochs=10)
因为从数据集上生成的数据通常是分了批的,通常不需要指定batch大小。
以下是MINIST数字分类的例子:
End of explanation
"""
print(history.history)
"""
Explanation: 调用Fit()时返回“history”对象,记录整个训练过程发生了什么。history.history词典包含每个epoch时的metrics值(本例子中只有一个metric,loss)。
End of explanation
"""
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
history = model.fit(dataset, epochs=1)
"""
Explanation: 深入 fit(), 请看:
guide to training & evaluation with the built-in Keras methods.
跟踪性能指标
当训练模型时,需要跟踪如分类准确率、精度、召回率,AUC等指标。此外,不仅在训练数据集上在验证数据集上也需要监控这些指标。
监控指标
可以将指标对象赋值给compile(),如下:
End of explanation
"""
val_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
history = model.fit(dataset, epochs=1, validation_data=val_dataset)
"""
Explanation: 将验证数据传递给fit()
将验证数据传递给fit()可以监控验证损失和验证指标。验证指标再每次重复后会上报。
End of explanation
"""
loss, acc = model.evaluate(val_dataset) # returns loss and metrics
print("loss: %.2f" % loss)
print("acc: %.2f" % acc)
"""
Explanation: Using callbacks for checkpointing (and more) 用callbacks做保持模型
如果训练时间较长,那么在训练过程中定时保存模型就尤为重要。训练过程一旦崩溃,就可以利用保存的模型重新训练重新开始。
Keras一个重要特征就是callbacks,在fit()中配置。Callbacks是在训练过程中不同节点调用的对象,尤其是:
- 在每个batch的开始和结束
- 每个epoch的开始和结束
Callbacks 是使得模型训练可以完全脚本化的一种方法
你可以使用callbacks周期性的来存储你的模型。
举例: 使用ModelCheckpoint callback 来在每个epoch结束时存储模型。
python
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath='path/to/my/model_{epoch}',
save_freq='epoch')
]
model.fit(dataset, epochs=2, callbacks=callbacks)
也可以使用callbacks周期性的更改学习率,把监控的各种metrics发到slack机器人、邮件通知等。
深入详见 callbacks API documentation 和
guide to writing custom callbacks.
使用TensorBoard监控训练过程
keras 命令行中进度条不是最友好的方法来监控模型的loss和metrics。更好的选择是
TensorBoard, 一个基于web的应用,可以实时的显示loss,metrics以及更多。
使用方法如下:传入 keras.callbacks.TensorBoard callback:
python
callbacks = [
keras.callbacks.TensorBoard(log_dir='./logs')
]
model.fit(dataset, epochs=2, callbacks=callbacks)
tensorboard启动方法:
tensorboard --logdir=./logs
更多使用方法请看:
Here's more information.
调用 fit()后: 评估测试性能和生产对新数据的预测
模型训练好后,可以使用 evaluate()评估模型在新数据集上的loss和metrics:
End of explanation
"""
predictions = model.predict(val_dataset)
print(predictions.shape)
"""
Explanation: 也可以使用predict()预测,predict()是用来预测新数据不需要标签,所以不返回loss等指标
End of explanation
"""
# Example training data, of dtype `string`.
samples = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
labels = [[0], [1]]
# Prepare a TextVectorization layer.
vectorizer = TextVectorization(output_mode="int")
vectorizer.adapt(samples)
# Asynchronous preprocessing: the text vectorization is part of the tf.data pipeline.
# First, create a dataset
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Apply text vectorization to the samples
dataset = dataset.map(lambda x, y: (vectorizer(x), y))
# Prefetch with a buffer size of 2 batches
dataset = dataset.prefetch(2)
# Our model should expect sequences of integers as inputs
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=10, output_dim=32)(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
"""
Explanation: fit()中使用自定义的训练步骤training step
默认设置,fit()是配置为监督学习环境。如果需要不同的训练过程(如对抗网络GAN的训练循环),可以提供自定义的实现Model.train_step(),keras内部fit()方法会重复的调用该方法。
Metrics, callbacks,可以如常工作
下面是重新实现 fit():
``python
class CustomModel(keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass tofit().
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured incompile()`)
loss = self.compiled_loss(y, y_pred,regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
Construct and compile an instance of CustomModel
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = CustomModel(inputs, outputs)
model.compile(optimizer='adam', loss='mse', metrics=[...])
Just use fit as usual
model.fit(dataset, epochs=3, callbacks=...)
```
想更深入请看:
"Customizing what happens in fit()".
使用即可执行eager execution来调试你的模型
如果自定义training steps or custom layers,通常需要对其进行调试。
The debugging experience is an integral part of a framework: with Keras, the debugging
workflow is designed with the user in mind.
默认情况下,keras模型会编译成高度优化的计算图,执行速度更快。也就是说模型中所写的python code(e.g. in a custom train_step),并不是实际执行的code。这使得debug较为困难
通城Debugging最后能一步一步的执行,大家都喜欢用print(打印出过程信息),设置你还想用pdb。这时我们需要使用即可执行eager execution模型,如下:
参数设置 run_eagerly=True,在 compile()方法中:
python
model.compile(optimizer='adam', loss='mse', run_eagerly=True)
当然,该中方法的不足是模型要显著的慢一些。当模型完成调试时,正式训练时还是建议使用计算图模式。
使用多GPU加速训练
tf Keras 自带工业级的多GPU支持,和分布式multi-worker训练。通过 tf.distribute API实现.
如果你的机器上有多个GPU,可以同时使用所有GPU训练模型:
创建 tf.distribute.MirroredStrategy 对象
在strategy's scope内构建和编译模型
跟之前一样调用fit() and evaluate()
```python
Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & compile().
model = Model(...)
model.compile(...)
Train the model on all available devices.
train_dataset, val_dataset, test_dataset = get_dataset()
model.fit(train_dataset, epochs=2, validation_data=val_dataset)
Test the model on all available devices.
model.evaluate(test_dataset)
```
For a detailed introduction to multi-GPU & distributed training, see
this guide.
GPU设备上同步进行预处理 VS. 异步在主机CPU上预处理
前文中讲述了预处理,其中直接在模型中使用(CenterCrop and Rescaling)等预处理层。
如果我们想在设备上做预处理,预处理作为模型的一部分是一个很好的选择。比如,GPU加速的特征标注化或图像数据扩展(image augmentation)。
但是这种预处理在以下情况不适合:特别是使用TextVectorization层进行文本预处理。由于其序列特性且只能在CPU上运行,在CPU上使用异步处理是个更好想法。
异步处理时,预处理操作在CPU上运行。当你的GPU忙时(处理上一个batch的数据),预处理后的samples会缓存到一个队列queue中。在GPU可用前,就可以把已经在queue中缓冲的处理好的样本提前获取(prefetching)到GPU内存中。这就保证了预处理不会阻塞GPU的使用。
异步预处理,使用dataset.map来注入预处理操作到数据处理流程pipeline中即可:
End of explanation
"""
# Our dataset will yield samples that are strings
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Our model should expect strings as inputs
inputs = keras.Input(shape=(1,), dtype="string")
x = vectorizer(inputs)
x = layers.Embedding(input_dim=10, output_dim=32)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
"""
Explanation: 与文本向量化预处理作为模型一部分对比:
End of explanation
"""
|
rjdkmr/do_x3dna
|
docs/notebooks/calculate_elasticity_tutorial.ipynb
|
gpl-3.0
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import dnaMD
%matplotlib inline
"""
Explanation: Elastic Properties and Deformation Energy
This tutorial discuss the analyses that can be performed using the dnaMD Python module included in the do_x3dna package. The tutorial is prepared using Jupyter Notebook and this notebook tutorial file could be downloaded from this link.
Download the input files that are used in the tutorial from this link.
Two following input files are required in this tutorial
tutorial_data/elasticity_DNA/free_dna.h5
tutorial_data/elasticity_DNA/bound_dna.h5
These two files should be present inside tutorial_data/elasticity_DNA of the present working directory.
The above two files can be created by the steps as shown here
Importing Python Modules
numpy: Required for the calculations involving large arrays
matplotlib: Required to plot the results
dnaMD: Python module to analyze DNA/RNA structures from the do_x3dna output files.
End of explanation
"""
eyDNA = dnaMD.dnaEY(27, 'BST', filename='elasticity_DNA/free_dna.h5')
"""
Explanation: Initializing eyDNA object with free_dna.h5 file
eyDNA object is initialized by using the total number of base-pairs and HDF5 file.
This class contains all the required functions to calculate the elastic properties and deformation free energy.
End of explanation
"""
# All frames
avg, mod_matrix = eyDNA.getStretchTwistBendModulus([4,20], paxis='X')
print('Average values for all frames: ', avg)
print('Modulus matrix for all frames: \n', mod_matrix )
print(' ')
# Elastic matrix
avg, mod_matrix = eyDNA.getStretchTwistBendModulus([4,20], paxis='X', matrix=True)
print('Average values for all frames: ', avg)
print('Elastic constant matrix for all frames: \n', mod_matrix )
print(' ')
"""
Explanation: Determining modulus matrix - bending, stretching and twisting
Modulus matrix for all three major motions (bending, stretching and twisting) can be obtained with getStrecthTwistBend method.
In the following example, matrix is calculated for all frames and first 5000 frames, respectively.
End of explanation
"""
time, modulus = eyDNA.getModulusByTime([4,20], frameGap=500, masked=True)
print('Keys in returned dictionary:\n', '\n'.join(list(modulus.keys())), '\n-----------')
# Stretching modulus
plt.plot(time, modulus['stretch'])
plt.scatter(time, modulus['stretch'])
plt.xlabel('Time (ps)')
plt.ylabel(r'Stretching Modulus (pN)')
plt.show()
# Twist rigidity
plt.plot(time, modulus['twist'])
plt.scatter(time, modulus['twist'])
plt.xlabel('Time (ps)')
plt.ylabel(r'Rigidity (pN nm$^2$)')
plt.show()
# Stretch twist coupling
plt.plot(time, modulus['stretch-twist'])
plt.scatter(time, modulus['stretch-twist'])
plt.xlabel('Time (ps)')
plt.ylabel(r'Stretch-Twist Coupling (pN nm)',)
plt.show()
"""
Explanation: The elastic matrix is in this form:
$$\text{Elastic matrix} = \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix}
$$
Where:
$Bx$ - Bending motion in one plane
$By$ - Bending motion in another orthogonal plane
$S$ - Stretching motion
$T$ - Twisting motion
$$\text{modulus matrix} =
\begin{bmatrix}
M_{Bx} & M_{Bx,By} & M_{Bx,S} & M_{Bx,T} \
M_{Bx,By} & M_{By} & M_{By,S} & M_{By,T} \
M_{Bx,S} & M_{By,S} & M_{S} & M_{S,T} \
M_{Bx,T} & M_{Bx,T} & M_{S,T} & M_{T}
\end{bmatrix}
$$
$$
= 4.1419464 \times \begin{bmatrix}
K_{Bx} & K_{Bx,By} & K_{Bx,S} & K_{Bx,T} \
K_{Bx,By} & K_{By} & K_{By,S} & K_{By,T} \
K_{Bx,S} & K_{By,S} & K_{S} & K_{S,T} \
K_{Bx,T} & K_{Bx,T} & K_{S,T} & K_{T}
\end{bmatrix} \times L_0
$$
Where:
$M_{Bx}$ - Bending-1 stiffness in one plane
$M_{By}$ - Bending-2 stiffness in another orthogonal plane
$M_{S}$ - Stretch Modulus
$M_{T}$ - Twist rigidity
$M_{Bx,By}$ - Bending-1 and Bending-2 coupling
$M_{By,S}$ - Bending-2 and stretching coupling
$M_{S,T}$ - Stretching Twsiting coupling
$M_{Bx,S}$ - Bending-1 Stretching coupling
$M_{By,T}$ - Bending-2 Twisting coupling
$M_{Bx,T}$ - Bending-1 Twisting coupling
Convergence in bending, stretching and twisting with their couplings
Elasticities cannot be calcualted from an individual snapshot or frame. However, these properties can be calculated as a function of time by considering all the frames up to that time. For example, 0-50 ns, 0-100 ns, 0-150 ns etc. By this method, we can analyze the convergence and also further we can calculate error using block average method.
Elasticities over the time can be calculated using getElasticityByTime method.
If esType='BST', A ordered dictionary of 1D arrays of shape (nframes). The keys in dictionary are name of the elasticity in the same order as listed above..
$M_{Bx}$ - bend-1 - Bending-1 stiffness in one plane
$M_{By}$ - bend-2 - Bending-2 stiffness in another orthogonal plane
$M_{S}$ - stretch - Stretch Modulus
$M_{T}$ - twist - Twist rigidity
$M_{Bx,By}$ - bend-1-bend-2 - Bending-1 and Bending-2 coupling
$M_{By,S}$ - bend-2-stretch - Bending-2 and stretching coupling
$M_{S,T}$ - stretch-twist - Stretching Twsiting coupling
$M_{Bx,S}$ - bend-1-stretch - Bending-1 Stretching coupling
$M_{By,T}$ - bend-2-twist - Bending-2 Twisting coupling
$M_{Bx,T}$ - bend-1-twist - Bending-1 Twisting coupling
If esType='ST', 2D array with three properties of shape (3, frame) will be returned.
$M_{S}$ - stretch - Stretch Modulus
$M_{T}$ - twist - Twist rigidity
$M_{S,T}$ -stretch-twist - Stretching Twsiting coupling
In the following example, modulus as a function of time was calculated by adding 1000 frames.
End of explanation
"""
# Load parameters of bound DNA
boundDNA = dnaMD.DNA(27, filename='elasticity_DNA/bound_dna.h5')
"""
Explanation: Deformation free energy of bound DNA
Deformation energy of a probe DNA (bound DNA) can be calculated with reference to the DNA present in the current object.
The deformation free energy is calculated using elastic matrix as follows
$$G = \frac{1}{2L_0}\mathbf{xKx^T}$$
$$\mathbf{x} = \begin{bmatrix}
(\theta^{x} - \theta^{x}_0) & (\theta^{y} - \theta^{y}_0) & (L - L_0) & (\phi - \phi_0)
\end{bmatrix}$$
Where, $\mathbf{K}$, $\theta^{x}_0$, $\theta^{y}_0$, $L_0$ and $\phi_0$ is calculated from reference DNA while $\theta^{x}$, $\theta^{y}$, $L$ and $\phi$ is calculated for probe DNA from each frame.
We already loaded the data for reference DNA above. Here, we will load data for probe DNA.
End of explanation
"""
# Deformation free energy of bound DNA and calculate all above listed terms
time, energy = eyDNA.getGlobalDeformationEnergy([4,20], boundDNA, paxis='X', which='all', masked=True)
energyTerms=list(energy.keys())
print('Keys in returned dictionary:\n', '\n'.join(energyTerms), '\n-----------')
# Plot two energy terms
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(hspace=0.3)
ax1 = fig.add_subplot(211)
ax1.set_title('Bound DNA, entire elastic matrix')
ax1.plot(time, energy['full'])
ax1.set_xlabel('Time (ps)')
ax1.set_ylabel(r'Deformation Free Energy (kJ/mol)',)
ax2 = fig.add_subplot(212)
ax2.set_title('Bound DNA, only diagonal of elastic matrix')
ax2.plot(time, energy['diag'])
ax2.set_xlabel('Time (ps)')
ax2.set_ylabel(r'Deformation Free Energy (kJ/mol)',)
plt.show()
# Calculate average and error for each energy terms
error = dnaMD.get_error(time, list(energy.values()), len(energyTerms), err_type='block', tool='gmx analyze')
print("==============================================")
print('{0:<16}{1:>14}{2:>14}'.format('Energy(kJ/mol)', 'Average', 'Error'))
print("----------------------------------------------")
for i in range(len(energyTerms)):
print('{0:<16}{1:>14.3f}{2:>14.3f}'.format(energyTerms[i], np.mean(energy[energyTerms[i]]),error[i]))
print("==============================================\n")
"""
Explanation: Deformation free energy can be calculated for the following motions that can be used with which option.
'full' : Use entire elastic matrix -- all motions with their coupling
'diag' : Use diagonal of elastic matrix -- all motions but no coupling
'b1' : Only bending-1 motion
'b2' : Only bending-2 motion
'stretch' : Only stretching motion
'twist' : Only Twisting motions
'st_coupling' : Only stretch-twist coupling motion
'bs_coupling' : Only Bending and stretching coupling
'bt_coupling' : Only Bending and Twisting coupling
'bb_coupling' : Only bending-1 and bending-2 coupling
'bend' : Both bending motions with their coupling
'st' : Stretching and twisting motions with their coupling
'bs' : Bending (b1, b2) and stretching motions with their coupling
'bt' : Bending (b1, b2) and twisting motions with their coupling
which can be either 'all' or a list of energy terms given above.
End of explanation
"""
# base-step
avg, matrix = eyDNA.calculateLocalElasticity([10,13], helical=False)
# Print matrix in nice format
out = ''
mean_out = ''
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
if j != matrix.shape[0]-1:
out += '{0:>10.5f} '.format(matrix[i][j])
else:
out += '{0:>10.5f}\n'.format(matrix[i][j])
mean_out += '{0:>15.3f} '.format(avg[i])
print('Average values for all frames: ', mean_out)
print('=========== ============== Elastic Matrix =============== ===========\n')
print(out)
print('=========== ====================== ====================== ===========')
# helical base-step
avg, matrix = eyDNA.calculateLocalElasticity([10,13], helical=True)
# Print matrix in nice format
out = ''
mean_out = ''
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
if j != matrix.shape[0]-1:
out += '{0:>10.5f} '.format(matrix[i][j])
else:
out += '{0:>10.5f}\n'.format(matrix[i][j])
mean_out += '{0:>15.3f} '.format(avg[i])
print('\n\nAverage values for all frames: ', mean_out)
print('=========== ============== Elastic Matrix =============== ===========\n')
print(out)
print('=========== ====================== ====================== ===========')
"""
Explanation: Local elastic properties or stiffness
Local elastic properties can be caluclated using either local base-step parameters or local helical base-step parameters.
In case of base-step parameters: Shift ($Dx$), Slide ($Dy$), Rise ($Dz$), Tilt ($\tau$), Roll ($\rho$) and Twist ($\omega$), following elastic matrix is calculated.
$$
\mathbf{K}{base-step} = \begin{bmatrix}
K{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \
\end{bmatrix}
$$
In case of helical-base-step parameters: x-displacement ($dx$), y-displacement ($dy$), h-rise ($h$), inclination ($\eta$), tip ($\theta$) and twist ($\Omega$), following elastic matrix is calculated.
$$
\mathbf{K}{helical-base-step} = \begin{bmatrix}
K{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \
\end{bmatrix}
$$
End of explanation
"""
# Here calculate energy for one base-step
time, energy = eyDNA.getLocalDeformationEnergy([10,13], boundDNA, helical=False, which='all')
energyTerms=list(energy.keys())
print('Keys in returned dictionary:\n', '\n'.join(energyTerms), '\n-----------')
# Plot two energy terms
fig = plt.figure(figsize=(8,8))
fig.subplots_adjust(hspace=0.3)
ax1 = fig.add_subplot(211)
ax1.set_title('Bound DNA, entire elastic matrix')
ax1.plot(time, energy['full'])
ax1.set_xlabel('Time (ps)')
ax1.set_ylabel(r'Local Deformation Energy (kJ/mol)',)
ax2 = fig.add_subplot(212)
ax2.set_title('Bound DNA, only diagonal of elastic matrix')
ax2.plot(time, energy['diag'])
ax2.set_xlabel('Time (ps)')
ax2.set_ylabel(r'Local Deformation Energy (kJ/mol)',)
plt.show()
# Calculate average and error for each energy terms
error = dnaMD.get_error(time, list(energy.values()), len(energyTerms), err_type='block', tool='gmx analyze')
print("==============================================")
print('{0:<16}{1:>14}{2:>14}'.format('Energy(kJ/mol)', 'Average', 'Error'))
print("----------------------------------------------")
for i in range(len(energyTerms)):
print('{0:<16}{1:>14.3f}{2:>14.3f}'.format(energyTerms[i], np.mean(energy[energyTerms[i]]),error[i]))
print("==============================================\n")
"""
Explanation: Local deformation energy of a local small segment
Using the above elastic matrix, deformation energy of this base-step in bound DNA can be calucalted.
End of explanation
"""
# First calculation for local base-step parameters
segments, energies, error = eyDNA.getLocalDeformationEnergySegments([4,20], boundDNA, span=4,
helical=False, which='all',
err_type='block',
tool='gmx analyze')
energyTerms=list(energies.keys())
print('Keys in returned dictionary:\n', '\n'.join(energyTerms), '\n-----------')
# Now plot the data
fig = plt.figure(figsize=(14,8))
fig.subplots_adjust(hspace=0.3)
mpl.rcParams.update({'font.size': 16})
xticks = range(len(segments))
ax1 = fig.add_subplot(111)
ax1.set_title('Local base-step parameters')
for term in energyTerms:
ax1.errorbar(xticks, energies[term], yerr=error[term], ms=10, elinewidth=3, fmt='-o', label=term)
ax1.set_xticks(xticks)
ax1.set_xticklabels(segments, rotation='vertical')
ax1.set_xlabel('base-step number')
ax1.set_ylabel(r'Deformation Energy (kJ/mol)',)
plt.legend()
plt.show()
"""
Explanation: Deformation energy of the consecutive overlapped DNA segments
Above method gives energy of a small local segment of the DNA. However, we mostly interested in large segment of the DNA. This large segment can be further divided into smaller local segments. For these smaller segments local deformation energy can be calculated. Here these segments overlapped with each other.
End of explanation
"""
# Secind calculation for local base-step parameters
segments, energies, error = eyDNA.getLocalDeformationEnergySegments([4,20], boundDNA, span=4,
helical=True, which='all',
err_type='block',
tool='gmx analyze')
energyTerms=list(energies.keys())
print('Keys in returned dictionary:\n', '\n'.join(energyTerms), '\n-----------')
# Now plot the data
fig = plt.figure(figsize=(14,8))
fig.subplots_adjust(hspace=0.3)
mpl.rcParams.update({'font.size': 16})
xticks = range(len(segments))
ax1 = fig.add_subplot(111)
ax1.set_title('Local base-step parameters')
for term in energyTerms:
ax1.errorbar(xticks, energies[term], yerr=error[term], ms=10, elinewidth=3, fmt='-o', label=term)
ax1.set_xticks(xticks)
ax1.set_xticklabels(segments, rotation='vertical')
ax1.set_xlabel('base-step number')
ax1.set_ylabel(r'Deformation Energy (kJ/mol)',)
plt.legend()
plt.show()
"""
Explanation: Same as the above but energy is calculated using helical base-step parameters
End of explanation
"""
|
marco-olimpio/ufrn
|
EEC2006/5_FinalProject_Regression_KNN/.ipynb_checkpoints/ForestFirePredictionVictor-checkpoint.ipynb
|
gpl-3.0
|
Victor acho que poderíamos abordar isso no trabalho
https://machinelearningmastery.com/feature-selection-machine-learning-python/
mas podemos deixar para depois que terminar, seria a cereja do bolo
"""
Explanation: Forest Fire Span Prediction
Objectives:
This notebook aims to explore a dataset where you could apply the knn algorithm to predict something. This kind of problems are known as regression problems and some examples related to this category of problems are to predict stock prices or predict house prices based on a serie of events.
The dataset:
The dataset choosen was the 'Forest Fire Data Set'. The aim of the data set is to predict the burned area of forest fires. We have found the data set on http://archive.ics.uci.edu/ml/datasets/Forest+Fires and the dataset is properlly referenced in 1
| Data Set Characteristics | Number of Instances | Area | Attribute Characteristics | Number of Attributes |Associated Tasks | Missing Values? |
|--------------------------|---------------------|----------|---------------------------|----------------------|------------------------|-----------------|
| Multivariate | 517 | Physical | Real | 13 | Regression | N/A
Data set abstract: This is a difficult regression task, where the aim is to predict the burned area of forest fires, in the northeast region of Portugal, by using meteorological and other data.
Participants:
Marco Olimpio - marco.olimpio at gmail
Rebecca Betwel - bekbetwel at gmail
Victor Hugo - victorhugo.automacao at gmail
Contents
Introduction
K Nearest Neighborhoods - KNN
Feature Selection
Experiments
Results and comparitions
References
<a id='intro'></a>
1. Introduction
Data set features:
Structure of the FWI System
The diagram below illustrates the components of the FWI System. Calculation of the components is based on consecutive daily observations of temperature, relative humidity, wind speed, and 24-hour rainfall. The six standard components provide numeric ratings of relative potential for wildland fire.
<img src="fwi_structure.gif"/>
<br/>
<img src="park.png"/>
Location
X - X-axis spatial coordinate within the Montesinho park map: 1 to 9
Y - Y-axis spatial coordinate within the Montesinho park map: 2 to 9
Date
month - Month of the year: "jan" to "dec"
day - Day of the week: "mon" to "sun"
Fire Wheater Index - FWI
FFMC - Fine Fuel Moisture Code The Fine Fuel Moisture Code (FFMC) is a numeric rating of the moisture content of litter and other cured fine fuels. This code is an indicator of the relative ease of ignition and the flammability of fine fuel: 18.7 to 96.20
DMC - The Duff Moisture Code (DMC) is a numeric rating of the average moisture content of loosely compacted organic layers of moderate depth. This code gives an indication of fuel consumption in moderate duff layers and medium-size woody material.: 1.1 to 291.3
DC - The Drought Code (DC) is a numeric rating of the average moisture content of deep, compact organic layers. This code is a useful indicator of seasonal drought effects on forest fuels and the amount of smoldering in deep duff layers and large logs.: 7.9 to 860.6
ISI - The Initial Spread Index (ISI) is a numeric rating of the expected rate of fire spread. It combines the effects of wind and the FFMC on rate of spread without the influence of variable quantities of fuel.: 0.0 to 56.10
temp - Temperature: 2.2 to 33.30
RH - Relative Humidity in %: 15.0 to 100
wind - Wind speed inkm/h: 0.40 to 9.40
rain - Outside rain in mm/m2 : 0.0 to 6.4
area - The burned area of the forest (in ha): 0.00 to 1090.84
<a id="knn"></a>
2. K-Nearest Neighborhood - KNN
<a id='feature'></a>
3. Feature Selection
End of explanation
"""
import math
#
import numpy as np
import pandas as pd
#
%matplotlib inline
import matplotlib.pyplot as plt
firedb = pd.read_csv("forestfires.csv")
firedb.columns
"""
Explanation: 3. Start - Loading, Checking and Adjusting Data Set
End of explanation
"""
firedb.info()
firedb['area'] = firedb['area'].astype(np.float32)
firedb[['FFMC','ISI','temp','RH','wind','rain']].plot(figsize=(17,15))
firedb['month'].value_counts()
firedb[['area']].plot(figsize=(17,15))
firedb['area_adjusted'] = np.log(firedb['area']+1)
"""
Explanation: No null values.
End of explanation
"""
fig, axes = plt.subplots(nrows=1, ncols=2)
firedb['area'].plot.hist(ax=axes[0],figsize=(17,8))
firedb['area_adjusted'].plot.hist(ax=axes[1])
"""
Explanation: <div style="background-color:red">Pq a transformação?</div>
End of explanation
"""
from sklearn.neighbors import KNeighborsRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score, KFold
from sklearn import preprocessing
firedb.head()
hyper_params = range(1,20)
features_list = [ ['FFMC'], ['DMC'], ['DC'], ['ISI'], ['temp'], ['RH'], ['wind'], ['rain'],
['X', 'Y', 'FFMC', 'DMC','DC','ISI'], ['X', 'Y', 'temp','RH','wind','rain'],
['FFMC', 'DMC','DC','ISI'], ['temp','RH','wind','rain'],
['DMC', 'wind'], ['DC','RH','wind'],
['X', 'Y', 'DMC', 'wind'], ['X', 'Y', 'DC','RH','wind'],
['FFMC', 'DMC','DC','ISI', 'temp','RH','wind','rain'],
['X', 'Y', 'FFMC', 'DMC','DC','ISI', 'temp','RH','wind','rain'] ]
num_folds = [3, 5, 7, 9, 10, 11, 13, 15, 17, 19, 21, 23]
outputs = ['area', 'area_adjusted']
import csv
# initializing our file that will act like our database for the results
#open the file in the 'write' mode
file = open('results_db.csv','w')
writer = csv.writer(file)
db_row = ['HYPER_PARAM', 'FEATURES', 'K_FOLDS', 'OUTPUT',
'AVG_RMSE', 'AVG_RMSE_%_AREA', 'STD_RMSE', 'CV_RMSE',
'AVG_MAE', 'AVG_MAE_%_AREA', 'STD_MAE', 'CV_MAE']
#write the row mounted
writer.writerow(db_row)
#close the file
file.close()
from IPython.core.display import clear_output
from time import time
start_time = time()
k = 0
k_total = len(hyper_params) * len(features_list) * len(num_folds) * len(outputs)
for hp in hyper_params:
for features in features_list:
for fold in num_folds:
for output in outputs:
k += 1
kf = KFold(fold, shuffle=True, random_state=1)
model = KNeighborsRegressor(n_neighbors = hp, algorithm='auto')
mses = cross_val_score(model, firedb[features],
firedb[output], scoring="neg_mean_squared_error", cv=kf)
rmses = np.sqrt(np.absolute(mses))
avg_rmse = np.mean(rmses)
avg_rmse_per_area = avg_rmse / np.mean(firedb[output])
std_rmse = np.std(rmses)
cv_rmse = std_rmse / np.mean(firedb[output])
maes = cross_val_score(model, firedb[features],
firedb[output], scoring="neg_mean_absolute_error", cv=kf)
maes = np.absolute(maes)
avg_mae = np.mean(maes)
avg_mae_per_area = avg_mae / np.mean(firedb[output])
std_mae = np.std(maes)
cv_mae = std_mae / np.mean(firedb[output])
db_row = [ hp, ', '.join(features), fold, output,
avg_rmse, avg_rmse_per_area, std_rmse, cv_rmse,
avg_mae, avg_mae_per_area, std_mae, cv_mae ]
print('ITERATION %d OF %d' % (k, k_total) )
print( 'HP: ', hp )
print('FEATURES: ', ', '.join(features) )
print('FOLDS: ', fold)
print('OUTPUT: ', output)
print('AVG_RMSE: ', avg_rmse)
print('AVG_RMSE_PER_AREA: ', avg_rmse_per_area)
print('STD_RMSE: ', std_rmse)
print('CV_RMSE: ', cv_rmse)
print('AVG_MAE: ', avg_mae)
print('AVG_MAE_PER_AREA: ', avg_mae_per_area)
print('STD_MAE: ', std_mae)
print('CV_MAE: ', cv_mae)
print('\n\n')
#clear_output(wait = True)
#open the file that will act like a database in the 'append' mode
#which allow us to append a row each time we open it
file = open('results_db.csv','a')
writer = csv.writer(file)
#write the row mounted
writer.writerow(db_row)
#close the file
file.close()
end_time = time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time)
results = pd.read_csv('results_db_complete.csv')
results.head()
results[ results['OUTPUT'] == 'area' ].sort_values(by='AVG_RMSE')[:5]
results[ results['OUTPUT'] == 'area' ].sort_values(by='STD_RMSE')[:5]
results[ results['OUTPUT'] == 'area' ].sort_values(by='AVG_MAE')[:5]
results[ results['OUTPUT'] == 'area' ].sort_values(by='STD_MAE')[:5]
mean_weight = 0.5
std_weight = 1-mean_weight
results['SCORE_RMSE'] = mean_weight*results['AVG_RMSE'] + std_weight*results['STD_RMSE']
results['SCORE_RMSE'] = ( (np.absolute(results['SCORE_RMSE'] - results['SCORE_RMSE'].mean())) /
results['SCORE_RMSE'].std() )
results['SCORE_MAE'] = mean_weight*results['AVG_MAE'] + std_weight*results['STD_MAE']
results['SCORE_MAE'] = ( (np.absolute(results['SCORE_MAE'] - results['SCORE_MAE'].mean())) /
results['SCORE_MAE'].std() )
results.head()
results[ results['OUTPUT'] == 'area' ].sort_values(by='SCORE_RMSE')[:5]
results[ results['OUTPUT'] == 'area' ].sort_values(by='SCORE_MAE')[:5]
features_str = str(results[ results['OUTPUT'] == 'area' ].sort_values(by='SCORE_RMSE')[:1].iloc[0,1])
features = []
for feature in features_str.split(','):
features.append(feature.strip())
hp = int(results[ results['OUTPUT'] == 'area' ].sort_values(by='SCORE_RMSE')[:1]['HYPER_PARAM'])
fold = int(results[ results['OUTPUT'] == 'area' ].sort_values(by='SCORE_RMSE')[:1]['K_FOLDS'])
kf = KFold(fold, shuffle=True, random_state=1)
model = KNeighborsRegressor(n_neighbors = hp, algorithm='auto')
mses = cross_val_score(model, firedb[features],
firedb['area'], scoring="neg_mean_squared_error", cv=kf)
rmses = np.sqrt(np.absolute(mses))
avg_rmse = np.mean(rmses)
std_rmse = np.std(rmses)
print(avg_rmse,std_rmse)
rmses
predictions = model.predict(firedb[features])
predictions
hyper_params = range(1,10)
mse_values = []
mad_values = []
predictions = []
numFolds = 10
# 10-fold cross validation
#kf = KFold(n_splits=10)
le = preprocessing.LabelEncoder()
x = firedb.ix[:, range(1, 10)].values
Y = le.fit_transform(firedb.ix[:, 10].values)
kf = KFold(numFolds, shuffle=True)
conv_X = pd.get_dummies(firedb.ix[:, range(1, 10)])
kf = KFold(n_splits = 10, shuffle = True)
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
result = next(kf.split(firedb), None)
print (result)
train = firedb.iloc[result[0]]
test = firedb.iloc[result[1]]
for train_index, test_index in kf.split(X):
train_X = conv_X.ix[train_indices, :]
train_Y = Y[train_indices]
test_X = conv_X.ix[test_indices, :]
test_Y = Y[test_indices]
for knumber in hyper_params:
# Configuring the classificator
knn = KNeighborsRegressor(n_neighbors=knumber, algorithm='brute', n_jobs=3)
# Creating model
knn.fit(train_df[['accommodates','bedrooms','bathrooms','number_of_reviews']], train_df['price'])
# Predicting
predictions = knn.predict(test_df[['accommodates','bedrooms','bathrooms','number_of_reviews']])
# Checking the mean squared error
mse_values.append(mean_squared_error(predictions, test_df['price']))
mad_values.append(mean_absolute_error(predictions, test))
mse_values.plot()
mad_values.plot()
"""
Explanation: <a id='pca'></a>
Principal Component Analisys
<a id='experiments'></a>
Experiments
End of explanation
"""
|
kkhenriquez/python-for-data-science
|
Week-8-NLP-Databases/Natural Language Processing of Movie Reviews using nltk .ipynb
|
mit
|
import nltk
nltk.download("movie_reviews")
nltk.download()
"""
Explanation: Natural Language Processing with nltk
nltk is the most popular Python package for Natural Language processing, it provides algorithms for importing, cleaning, pre-processing text data in human language and then apply computational linguistics algorithms like sentiment analysis.
Inspect the Movie Reviews Dataset
It also includes many easy-to-use datasets in the nltk.corpus package, we can download for example the movie_reviews package using the nltk.download function:
End of explanation
"""
from nltk.corpus import movie_reviews
"""
Explanation: You can also list and download other datasets interactively just typing:
nltk.download()
in the Jupyter Notebook.
Once the data have been downloaded, we can import them from nltk.corpus
End of explanation
"""
len(movie_reviews.fileids())
movie_reviews.fileids()[:5]
movie_reviews.fileids()[-5:]
"""
Explanation: The fileids method provided by all the datasets in nltk.corpus gives access to a list of all the files available.
In particular in the movie_reviews dataset we have 2000 text files, each of them is a review of a movie, and they are already split in a neg folder for the negative reviews and a pos folder for the positive reviews:
End of explanation
"""
negative_fileids = movie_reviews.fileids('neg')
positive_fileids = movie_reviews.fileids('pos')
len(negative_fileids), len(positive_fileids)
"""
Explanation: fileids can also filter the available files based on their category, which is the name of the subfolders they are located in. Therefore we can have lists of positive and negative reviews separately.
End of explanation
"""
print(movie_reviews.raw(fileids=positive_fileids[0]))
"""
Explanation: We can inspect one of the reviews using the raw method of movie_reviews, each file is split into sentences, the curators of this dataset also removed from each review from any direct mention of the rating of the movie.
End of explanation
"""
romeo_text = """Why then, O brawling love! O loving hate!
O any thing, of nothing first create!
O heavy lightness, serious vanity,
Misshapen chaos of well-seeming forms,
Feather of lead, bright smoke, cold fire, sick health,
Still-waking sleep, that is not what it is!
This love feel I, that feel no love in this."""
"""
Explanation: Tokenize Text in Words
End of explanation
"""
romeo_text.split()
"""
Explanation: The first step in Natural Language processing is generally to split the text into words, this process might appear simple but it is very tedious to handle all corner cases, see for example all the issues with punctuation we have to solve if we just start with a split on whitespace:
End of explanation
"""
nltk.download("punkt")
"""
Explanation: nltk has a sophisticated word tokenizer trained on English named punkt, we first have to download its parameters:
End of explanation
"""
romeo_words = nltk.word_tokenize(romeo_text)
romeo_words
"""
Explanation: Then we can use the word_tokenize function to properly tokenize this text, compare to the whitespace splitting we used above:
End of explanation
"""
movie_reviews.words(fileids=positive_fileids[0])
"""
Explanation: Good news is that the movie_reviews corpus already has direct access to tokenized text with the words method:
End of explanation
"""
{word:True for word in romeo_words}
type(_)
def build_bag_of_words_features(words):
return {word:True for word in words}
build_bag_of_words_features(romeo_words)
"""
Explanation: Build a bag-of-words model
The simplest model for analyzing text is just to think about text as an unordered collection of words (bag-of-words). This can generally allow to infer from the text the category, the topic or the sentiment.
From the bag-of-words model we can build features to be used by a classifier, here we assume that each word is a feature that can either be True or False.
We implement this in Python as a dictionary where for each word in a sentence we associate True, if a word is missing, that would be the same as assigning False.
End of explanation
"""
nltk.download("stopwords")
import string
string.punctuation
"""
Explanation: This is what we wanted, but we notice that also punctuation like "!" and words useless for classification purposes like "of" or "that" are also included.
Those words are named "stopwords" and nltk has a convenient corpus we can download:
End of explanation
"""
useless_words = nltk.corpus.stopwords.words("english") + list(string.punctuation)
useless_words
#type(useless_words)
def build_bag_of_words_features_filtered(words):
return {
word:1 for word in words \
if not word in useless_words}
build_bag_of_words_features_filtered(romeo_words)
"""
Explanation: Using the Python string.punctuation list and the English stopwords we can build better features by filtering out those words that would not help in the classification:
End of explanation
"""
all_words = movie_reviews.words()
len(all_words)/1e6
"""
Explanation: Plotting Frequencies of Words
It is common to explore a dataset before starting the analysis, in this section we will find the most common words and plot their frequency.
Using the .words() function with no argument we can extract the words from the entire dataset and check that it is about 1.6 millions.
End of explanation
"""
filtered_words = [word for word in movie_reviews.words() if not word in useless_words]
type(filtered_words)
len(filtered_words)/1e6
"""
Explanation: First we want to filter out useless_words as defined in the previous section, this will reduce the length of the dataset by more than a factor of 2:
End of explanation
"""
from collections import Counter
word_counter = Counter(filtered_words)
"""
Explanation: The collection package of the standard library contains a Counter class that is handy for counting frequencies of words in our list:
End of explanation
"""
most_common_words = word_counter.most_common()[:10]
most_common_words
"""
Explanation: It also has a most_common() method to access the words with the higher count:
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Then we would like to have a visualization of this using matplotlib.
First we want to use the Jupyter magic function
%matplotlib inline
to setup the Notebook to show the plot embedded into the Jupyter Notebook page, you can also test:
%matplotlib notebook
for a more interactive plotting interface which however is not as well supported on all platforms and browsers.
End of explanation
"""
sorted_word_counts = sorted(list(word_counter.values()), reverse=True)
plt.loglog(sorted_word_counts)
plt.ylabel("Freq")
plt.xlabel("Word Rank");
"""
Explanation: We can sort the word counts and plot their values on Logarithmic axes to check the shape of the distribution. This visualization is particularly useful if comparing 2 or more datasets, a flatter distribution indicates a large vocabulary while a peaked distribution a restricted vocabulary often due to a focused topic or specialized language.
End of explanation
"""
plt.hist(sorted_word_counts, bins=50);
plt.hist(sorted_word_counts, bins=50, log=True);
"""
Explanation: Another related plot is the histogram of sorted_word_counts, which displays how many words have a count in a specific range.
Of course the distribution is highly peaked at low counts, i.e. most of the words appear which a low count, so we better display it on semilogarithmic axes to inspect the tail of the distribution.
End of explanation
"""
negative_features = [
(build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'neg') \
for f in negative_fileids
]
print(negative_features[3])
positive_features = [
(build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'pos') \
for f in positive_fileids
]
print(positive_features[6])
from nltk.classify import NaiveBayesClassifier
"""
Explanation: Train a Classifier for Sentiment Analysis
Using our build_bag_of_words_features function we can build separately the negative and positive features.
Basically for each of the 1000 negative and for the 1000 positive review, we create one dictionary of the words and we associate the label "neg" and "pos" to it.
End of explanation
"""
split = 800
sentiment_classifier = NaiveBayesClassifier.train(positive_features[:split]+negative_features[:split])
"""
Explanation: One of the simplest supervised machine learning classifiers is the Naive Bayes Classifier, it can be trained on 80% of the data to learn what words are generally associated with positive or with negative reviews.
End of explanation
"""
nltk.classify.util.accuracy(sentiment_classifier, positive_features[:split]+negative_features[:split])*100
"""
Explanation: We can check after training what is the accuracy on the training set, i.e. the same data used for training, we expect this to be a very high number because the algorithm already "saw" those data. Accuracy is the fraction of the data that is classified correctly, we can turn it into percent:
End of explanation
"""
nltk.classify.util.accuracy(sentiment_classifier, positive_features[split:]+negative_features[split:])*100
"""
Explanation: The accuracy above is mostly a check that nothing went very wrong in the training, the real measure of accuracy is on the remaining 20% of the data that wasn't used in training, the test data:
End of explanation
"""
sentiment_classifier.show_most_informative_features()
"""
Explanation: Accuracy here is around 70% which is pretty good for such a simple model if we consider that the estimated accuracy for a person is about 80%.
We can finally print the most informative features, i.e. the words that mostly identify a positive or a negative review:
End of explanation
"""
|
PythonFreeCourse/Notebooks
|
week03/1_While_Loops.ipynb
|
mit
|
current_number = 2
while current_number <= 16:
twice_number = current_number + current_number
print(f"{current_number} and {current_number} are {twice_number}")
current_number = twice_number
"""
Explanation: <img src="images/logo.jpg" style="display: block; margin-left: auto; margin-right: auto;" alt="לוגו של מיזם לימוד הפייתון. נחש מצויר בצבעי צהוב וכחול, הנע בין האותיות של שם הקורס: לומדים פייתון. הסלוגן המופיע מעל לשם הקורס הוא מיזם חינמי ללימוד תכנות בעברית.">
<p style="text-align: right; direction: rtl; float: right;">לולאות</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">הקדמה</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
זהו בוקר אפלולי וגשום של יום ראשון. השמיים אפורים ואתם במיטה מתחת לפוך המפנק שלכם, מתפללים שאתם עדיין בתוך החלום המתוק ההוא.<br>
השעה היא 7:30. השעון המעורר מנגן שוב את השיר שפעם היה האהוב עליכם, והיום מעלה בכם אסוציאציות קשות שמערבות את הטלפון החכם שלכם ופטיש כבד מאוד.<br>
הפגישה שלכם תתקיים בשעה 9:00, ואתם יודעים בוודאות שתספיקו להגיע בזמן אם תתעוררו בשעה 8:00 ותמהרו מאוד.<br>
היד מושטת לכפתור ה"נודניק" שיפעיל שוב את השעון המעורר שלכם בעוד 10 דקות. ועוד פעם. ושוב.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אם נתאר את האלגוריתם שלפיו פעלתם, נוכל להגיד כך:<br>
<q>כל עוד השעון מצלצל, והשעה היא לפני 8:00, לחץ על כפתור הנודניק בשעון</q>.<br>
נצייר את דרך הפעולה שלכם:
</p>
<figure>
<img src="images/while-flow.svg?v=4" width="400px" alt="התרשים מתחיל ב'הלכת לישון'. לאחר מכן הוא בודק האם השעון מצלצל וגם השעה היא לפני 8:00.">
<figcaption style="text-align: center; direction: rtl; clear: both;">
תרשים הזרימה של שעון מעורר בבוקר מדכא.
</figcaption>
</figure>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
<strong>תרגול</strong>:
נסו לשרטט אלגוריתם להטלת 6 בקובייה.<br>
כל עוד לא התקבל 6 בקובייה, הטילו את הקובייה מחדש.<br>
כשקיבלתם 6 בקובייה, פִּצְחוּ ב<a href="https://www.youtube.com/watch?v=Md7OvU5JIcI">צהלולים</a> בקול גדול.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right; clear: both;">הגדרה</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
המבנה ששרטטנו זה עתה נקרא לולאה.<br>
נשתמש בלולאה כשנרצה לחזור על פעולה מספר פעמים שלאו דווקא ידוע לנו מראש.<br>
אם ננסה לנתח את הלולאה כרעיון, נגלה שכל לולאה מורכבת מ־4 חלקים מרכזיים:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li><em>אתחול הסביבה</em> – הגדרת ערכים שבהם נשתמש בלולאה.</li>
<li><em>תנאי הלולאה</em> – ביטוי בוליאני. כל עוד הביטוי הזה שקול ל־<code>True</code>, גוף הלולאה ימשיך להתבצע.</li>
<li><em>גוף הלולאה</em> – הפעולות שיקרו בכל פעם שתנאי הלולאה מתקיים.</li>
<li><em>צעד לקראת סוף הלולאה</em> – הליך שמבטיח שהלולאה תסתיים.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במקרה שלנו:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>שלב <em style="color: #107896">אתחול הסביבה</em> כולל:
<ol>
<li><em style="color: #829456;">הגדרת הצלצול הראשוני ל־7:30</em>.</li>
<li><em style="color: #829456;">הגדרת שעת ההשכמה הסופית ל־8:00</em>.</li>
<li><em style="color: #829456;">הגדרת ה"נודניק" לצלצל בעוד 10 דקות</em>.</li>
</ol>
</li>
<li><em style="color: #107896">תנאי הלולאה</em> יכול להיות <em style="color: #829456;">השעון המעורר פועל וגם השעה היא לפני שעת ההשכמה הסופית</em>.</li>
<li><em style="color: #107896">גוף הלולאה</em> יכול להיות <em style="color: #829456;">לחיצה על הנודניק</em> ו<em style="color: #829456;">חזרה לישון</em>.</li>
<li><em style="color: #107896">צעד לקראת סוף הלולאה</em> הוא <em style="color: #829456;">הזמן שעבר בין צלצול לצלצול, שמקרב אותנו לשעה 8:00</em>.</li>
</ol>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נסו לחשוב על דוגמה משלכם ללולאה כלשהי. פרקו אותה ל־4 הרעיונות המרכזיים שבהם דנו.
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">דוגמאות</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
קל לחשוב על דוגמאות ללולאות מחיי היום־יום.<br>
כמעט בכל פעם שאנחנו אומרים "עד ש־" או "כל עוד־", אנחנו בונים לולאה במציאות.<br>
בכל פעם שאנחנו חוזרים על משהו שוב ושוב, אנחנו פועלים לפי לולאה מסוימת שמניעה אותנו.<br>
רוב חיי היום־יום שלנו מתנהלים בלולאות, וזו הסיבה שלולאות הן כלי חזק כל כך בתכנות.<br>
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפניכם כמה דוגמאות ללולאות:
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li>בזמן הארוחה, המשך לאכול עד שאתה שבע.</li>
<li>התעורר מהשינה! כל עוד אתה עירני, המשך ללמוד פייתון.</li>
<li>פתח את הספר בעמוד הראשון. כל עוד לא קראת את כל העמודים בספר, קרא את העמוד הנוכחי ואז עבור לעמוד הבא.</li>
<li>כל עוד לא מצאת אישה שנקראת הרמיוני, גש למישהי אקראית בעלת שיער חום מתולתל ועיניים חומות שנמצאת על הרציף ושאל אותה אם היא הרמיוני.</li>
<li>בליל כל הקדושים, התחפש, צא החוצה, וכל עוד לא ביקרת בכל הבתים ברחוב: לך לבית, צלצל בפעמון, אמור "ממתק או תעלול", קח ממתק, אמור תודה ולך לבית הבא.</li>
</ul>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
<strong>תרגול</strong>:
מצאו את 4 הרעיונות המרכזיים שעליהם דיברנו בכל אחת מהדוגמאות.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">כתיבת לולאה</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
בסרט המוזיקלי <cite>Hans Christian Andersen</cite> מ־1952, מופיע השיר <cite><a href="https://www.youtube.com/watch?v=fXi3bjKowJU&feature=youtu.be&t=96">Inchworm</a></cite>, ושרים בו כך:
</p>
<blockquote>
Two and two are four<br>
Four and four are eight<br>
Eight and eight are sixteen<br>
Sixteen and sixteen are thirty-two<br>
</blockquote>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
זה הזמן לכתוב Inchworm משלנו.<br>
נשרטט איך כתיבת לולאה עבור שיר שכזה תיראה:
</p>
<figure>
<img src="images/while-song.svg?v=4" width="550px" alt="מתחילים את השיר, ואז עוברים לתא שמגדיר את 'המשתנה הנוכחי' כ־2. אחר כך עוברים לתא שבו כתוב 'האם המשתנה הנוכחי קטן או שווה ל־16?'. התשובה לא מובילה לסיום התוכנית, והתשובה כן מובילה לשמירה של המשתנה הנוכחי ועוד המשתנה הנוכחי על משתנה שנקרא 'סכום המספרים'. התא הבא אליו מגיעים מפה במקרה של כן הוא הדפסת שורה בשיר, שהיא למעשה 'המשתנה הנוכחי ועוד המשתנה הנוכחי הם סכום המספרים'. התא הבא מופיע בשרשרת הוא התא שהופך את הערך של המשתנה הנוכחי להיות הערך של סכום המספרים. התא הבא חוזר לתנאי שממנו התחלנו.">
<figcaption style="text-align: center; direction: rtl; clear: both;">
תרשים זרימה שמתאר את התוכנית שנבנה להדפסת השיר Inchworm.
</figcaption>
</figure>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
במקרה שלנו:
</p>
<ol style="text-align: right; direction: rtl; float: right; clear: both;">
<li>שלב <em style="color: #107896">אתחול הסביבה</em> הוא השלב שבו <em style="color: #829456;">נגדיר משתנה עם הערך הראשון להדפסה: 2</em>.</li>
<li><em style="color: #107896">תנאי הלולאה</em> בודק אם אנחנו <em style="color: #829456;">בשורה האחרונה של השיר או לפניה</em>.</li>
<li><em style="color: #107896">גוף הלולאה</em> כולל:
<ol>
<li><em style="color: #829456;">הגדרת משתנה שמכיל את סכום שני המספרים</em>.</li>
<li><em style="color: #829456;">הדפסת שורה בשיר לפי הערך שחישבנו</em>.</li>
</ol>
</li>
<li><em style="color: #107896">הצעד לקראת סוף הלולאה</em> במקרה שלנו הוא <em style="color: #829456;">קידום המשתנה שמכיל את המספר הנוכחי</em>.</li>
</ol>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
טוב, זה הזמן לקצת קוד, לא?
</p>
End of explanation
"""
current_number = 2
"""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
הקוד הזה משתמש במילת המפתח <code>while</code> כדי ליצור לולאה.<br>
הלולאה תואמת לחלוטין את תיאור השלבים המילולי שמופיע מעליה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ננסה להיזכר איך נראית השורה הראשונה של השיר, וננסה להבין מה הקוד שנכתב למעלה אומר.
</p>
<q>2 and 2 are 4</q>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
דבר ראשון, עלינו להכין את הסביבה לפני השימוש בלולאה.<br>
נשתמש במשתנה שישמור עבורנו את הערך המספרי שיעמוד בראש השורה הנוכחית:
</p>
End of explanation
"""
while current_number <= 16:
"""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
המספר שפותח את השורה האחרונה בשיר הוא 16, ולכן נרצה שהלולאה תרוץ כל עוד המספר הנוכחי ששמרנו קטן מ־16 או שווה לו.<br>
נרשום את מילת המפתח <code>while</code>, ואחריה ביטוי בוליאני שיקבע מתי גוף הלולאה ירוץ. נסיים בנקודתיים.<br>
בכל פעם שהביטוי הבוליאני יהיה שווה ל־<code>True</code>, גוף הלולאה ירוץ.<br>
בפעם הראשונה (והיחידה) שהביטוי הבוליאני יהיה שקול ל־<code>False</code>, גוף הלולאה לא יתבצע והתוכנית תמשיך לבצע את הקוד שנמצא אחרי הלולאה.<br>
</p>
End of explanation
"""
twice_number = current_number + current_number
"""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
אחרי שכתבנו את התנאי, זה הזמן לכתוב מה אנחנו רוצים שיתבצע בכל פעם שהתנאי יתקיים.<br>
החלק הזה נקרא "גוף הלולאה", וכל הרצה שלו נקראת "<dfn>אִיטֶרַצְיָה</dfn>", או בעברית, "<dfn>חִזְרוּר</dfn>".
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נתחיל בהגדרת המספר שיודפס בסוף השורה, שהוא המספר בתחילת השורה ועוד עצמו.<br>
שימו לב להזחה, שמציינת שחלק הקוד הזה שייך ללולאת ה־<code>while</code> ושהוא ירוץ בכל פעם שהביטוי הבוליאני שבראשה שקול ל־<code>True</code>.
</p>
End of explanation
"""
print(f"{current_number} and {current_number} are {twice_number}")
"""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
נדפיס את השורה עם הפרטים שיצרנו:
</p>
End of explanation
"""
current_number = twice_number
"""
Explanation: <p style="text-align: right; direction: rtl; float: right; clear: both;">
לסיום, לקראת הדפסת השורה הבאה, נקדם את המשתנה שמכיל את הערך שמודפס בתחילת כל שורה בשיר.<br>
הפעולה הזו תכין את המשתנה לשורה הבאה, וגם תקדם את הלולאה לסופה.<br>
כיוון שתחילת כל שורה חדשה בשיר זהה לסוף השורה הקודמת, נוכל לרשום בפשטות:
</p>
End of explanation
"""
def sum_positive_numbers(max_number):
total = 0
first_number = 1
while first_number <= max_number:
total = total + first_number
first_number = first_number + 1
return total
user_number = int(input("Please enter a number: "))
print(sum_positive_numbers(user_number))
"""
Explanation: <p style="text-align: right; direction: rtl; float: right;">סיכום ביניים</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כפי שראינו, לולאת <code>while</code> משתמשת בביטוי בוליאני כדי להחליט אם להריץ קוד מסוים.<br>
היא בודקת אם הביטוי הבוליאני שקול ל־<code>True</code>, ואם כן, היא מריצה את קטע הקוד בגוף הלולאה.<br>
כל עוד הביטוי הבוליאני המופיע ליד המילה <code>while</code> שקול ל־<code>True</code>, גוף הלולאה ימשיך לרוץ.<br>
כשהביטוי יהפוך להיות שקול ל־<code>False</code>, הלולאה תפסיק את ריצת הקוד בגוף הלולאה, והקוד שאחריה ימשיך לרוץ.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הרעיון של <code>while</code> מקביל ל־<code>if</code> שגופו רץ וחוזר לראש התנאי פעם אחר פעם, עד שהביטוי הבוליאני שבראש התנאי שקול ל־<code>False</code>.
</p>
<div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
כתבו קוד שמקבל מהמשתמש מספר שלם גדול מ־1, ומדפיס את כל המספרים מ־1 ועד המספר שנקלט.
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">דוגמאות נוספות</p>
<p style="text-align: right; direction: rtl; float: right;">תרגילים חשבוניים</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפניכם דוגמה של קוד שמחשב את סכום כל המספרים הטבעיים, מ־1 ועד למספר שהזין המשתמש.
</p>
End of explanation
"""
def get_grades(number_of_grades):
grades = []
while len(grades) < number_of_grades:
current_grade = int(input("Please enter a student grade: "))
grades = grades + [current_grade]
return grades
def get_highest_grade(grades):
highest_grade = grades[0]
current_grade_index = 1
while current_grade_index < len(grades):
if grades[current_grade_index] > highest_grade:
highest_grade = grades[current_grade_index]
current_grade_index = current_grade_index + 1
return highest_grade
number_of_grades = int(input("How many students are there?: "))
grades = get_grades(number_of_grades)
highest_grade = get_highest_grade(grades)
print(f"The highest grade is {highest_grade}")
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הגדירו את 4 החלקים המופיעים בלולאה המופיעה בקוד מעלה.<br>
שרטטו כיצד היא עובדת.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">מיקומים ברשימה</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
לפניכם קוד שמקבל כקלט את מספר התלמידים בכיתה.<br>
לאחר מכן, הוא מקבל כקלט את הציון של כל תלמיד במבחן האחרון.<br>
לבסוף, הקוד מחזיר את הציון הגבוה ביותר בכיתה.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
שימו לב לשימוש שנעשה כאן בלולאות כדי לגשת למיקומי ערכים ברשימה.
</p>
End of explanation
"""
i = 1
j = 1
while i <= 10:
line = ''
while j <= 10:
line = line + str(i * j) + '\t'
j = j + 1
print(line)
j = 1
i = i + 1
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
חשבו על דרך לממש את הקוד הזה עם לולאה אחת בלבד.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right;">לולאה בתוך לולאה</p>
End of explanation
"""
i = 1
while i < 10:
print(i)
print("End of the program")
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 70%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הסבירו לעצמכם כיצד הקוד הזה עובד.<br>
במידת הצורך, הזינו את הקוד ב־<a href="http://pythontutor.com/visualize.html#mode=edit">PythonTutor</a> כדי לראות מה הוא עושה.
</p>
</div>
<div style="display: flex; width: 20%; border-right: 0.1rem solid #A5A5A5; padding: 1rem 2rem;">
<p style="text-align: center; direction: rtl; justify-content: center; align-items: center; clear: both;">
<strong>חשוב!</strong><br>
פתרו לפני שתמשיכו!
</p>
</div>
</div>
<span style="text-align: right; direction: rtl; float: right; clear: both;">ניפוי שגיאות</span>
<p style="text-align: right; direction: rtl; float: right;">לולאה אין־סופית</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אם אתם חדשים בנושא הלולאות והתנסיתם בפתירת התרגילים, יש סיכוי לא רע שנתקעה לכם המחברת.<br>
כשאנחנו מתעסקים עם לולאת <code>while</code>, יש סיכון ממשי שניצור בטעות לולאה שלא תסתיים לעולם.<br>
המצב שבו לולאה לא מסתיימת נקרא "<dfn>לולאה אין־סופית</dfn>", והוא נובע מכך שתנאי הלולאה שקול תמיד ל־<code>True</code>.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
ישנן שתי טעויות נפוצות שגורמות ללולאות להיות אין־סופיות.<br>
זיהוי הטעות וטיפול בה יאפשרו ללולאה שלכם לעבוד כראוי:
</p>
<ul style="text-align: right; direction: rtl; float: right; clear: both;">
<li><em>טעות בתנאי</em> – אין מצב שבו הביטוי הבוליאני בראש הלולאה יהיה שקול ל־<code>False</code>.</li>
<li><em>טעות בקידום</em> – לא בוצע צעד שיקדם את הלולאה לכיוון הסוף שלה, ועקב כך הביטוי הבוליאני נשאר שקול ל־<code>True</code>.</li>
</ul>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
אם ליד תא במחברת שלכם מופיעה כוכבית ותאים אחרים לא יכולים לרוץ, סימן שאותו תא עדיין רץ.<br>
אם הוא רץ זמן רב מדי, יש סיכוי שמדובר בלולאה אין־סופית. אם זה אכן המצב, בחרו בסרגל הכלים של המחברת ב־"Kernel" ואז ב־"Restart".<br>
פעולה זו תעצור את הריצה של המחברת שלכם, ותאפשר לכם לתקן את הקוד הבעייתי ולהריץ אותו מחדש.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
הנה דוגמה ללולאה אין־סופית, בתוכנה שמטרתה לספור מ־1 עד 10:
</p>
End of explanation
"""
i = 8
while i <= 0:
print(i)
print("End of the program")
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
למה הלולאה הזו אין־סופית? תקנו אותה כך שתפעל כראוי.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right; clear: both;">הלולאה לא רצה</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
המקרה ההפוך מלולאה אין־סופית הוא לולאה שפשוט לא רצה.<br>
במילים אחרות – לולאה שתנאי הכניסה שלה שקול ל־<code>False</code> בהרצתה הראשונה.<br>
במקרה הזה, ראוי לבדוק כיצד אתחלנו את הסביבה, ואם התנאי שכתבנו אכן עובד.
</p>
End of explanation
"""
numbers = [1, 2, 3, 4]
index = 0
total = 0
while index <= len(numbers):
total = total + numbers[index]
index = index + 1
print(total)
"""
Explanation: <div class="align-center" style="display: flex; text-align: right; direction: rtl; clear: both;">
<div style="display: flex; width: 10%; float: right; clear: both;">
<img src="images/exercise.svg" style="height: 50px !important;" alt="תרגול">
</div>
<div style="width: 90%">
<p style="text-align: right; direction: rtl; float: right; clear: both;">
את הלולאה הזו כתב מתכנת מפוזר במיוחד, ויש בה יותר מבעיה אחת.<br>
מצאו את הבעיות, תקנו אותן והריצו את התוכנית.<br>
הפלט הרצוי, משמאל לימין, הוא: <samp dir="ltr" style="direction: ltr">8, 4, 2, 1</samp>.
</p>
</div>
</div>
<p style="text-align: right; direction: rtl; float: right; clear: both;">סטייה באחד</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
זאת טעות נפוצה מאוד, עד כדי כך שיש לה שם ואפילו <a href="https://en.wikipedia.org/wiki/Off-by-one_error">ערך בוויקיפדיה</a>!<br>
בשגיאה מסוג "<dfn>סטייה באחד</dfn>" (באנגלית: "<dfn>Off By One</dfn>") מתכנת שוכח לטפל במקרה האחרון, או מטפל במקרה אחד יותר מדי.
</p>
<p style="text-align: right; direction: rtl; float: right; clear: both;">
נראה דוגמה:
</p>
End of explanation
"""
|
feststelltaste/software-analytics
|
courses/20190918_Uni_Leipzig/Data Science On Software Data (Presentation).ipynb
|
gpl-3.0
|
pd.read_csv("../datasets/google_trends_datascience.csv", index_col=0).plot();
"""
Explanation: Data Science on <br/> Software Data
<b>Markus Harrer</b>, Software Development Analyst
@feststelltaste
<small>Visual Software Analytics Summer School, 18 September 2019</small>
<img src="../../demos/resources/innoq_logo.jpg" width=20% height="20%" align="right"/>
About Me
In the past
Bachelor student
Researcher
Software developer
Master student*
Master's degree candidate*
Application developer
*and househusband
Now
<img src="../../demos/resources/about_me.png" style="width:85%;" >
My Motivation for Data Analysis in Software Development
The current problem in the industry
<img src="../../demos/resources/kombar0_en.png" style="width:95%;" align="center"/>
The current problem in the industry
<img src="../../demos/resources/kombar4_en.png" style="width:95%;" align="center"/>
"Software Analytics" to the rescue?
Definition Software Analytics
"Software Analytics is analytics on <b>software data</b> for managers and <b class="green">software engineers</b> with the aim of empowering software development individuals and teams to <i>gain and share insight from their data</i> to <b>make better decisions</b>."
<br/>
<div align="right"><small>Tim Menzies and Thomas Zimmermann</small></div>
Which kind of Software Data do we have?
static
runtime
chronological
Community
<b>=> a great variety!</b>
My problem with classic Software Analytics
<img src="../../demos/resources/freq1_en.png" style="width:80%;" align="center"/>
My problem with classic Software Analytics
<img src="../../demos/resources/freq2_en.png" style="width:80%;" align="center"/>
My problem with classic Software Analytics
<img src="../../demos/resources/freq3_en.png" style="width:80%;" align="center"/>
My problem with classic Software Analytics
<img src="../../demos/resources/freq4_en.png" style="width:80%;" align="center"/>
My problem with classic Software Analytics
<img src="../../demos/resources/freq5_en.png" style="width:80%;" align="center"/>
Some analysis tasks from practice
Communicating negative performance implications of complex data models
Spotting concurrency problems in custom-built frameworks
Identifying performance bottlenecks across different software systems
Making lost knowledge visible due to turnover
Analyzing the health of a open source community
"It depends" aka "context matters!"
<div align="center">
<img src="../../demos/resources/context.png" style="width:70%;" /></div>
<b>Individual systems == individual problems => individual analyses => individual insights!</b>
Others see that problem, too
Thomas Zimmermann in "One size does not fit all":
<br/><br/>
<div style="font-size:70%;" align="center">
"The main lesson: There is no one size fits all model. Even if you find models that work for most, they will not work for everyone. There is much <strong>academic research</strong> into <strong>general models</strong>. In contrast, <b><span class="green">industrial practitioners</span></b> are often fine with <b><span class="green">models that just work for their data</span></b> if the model provides some insight or allows them to work more efficiently."<br/><br/></div>
But: "... the methods typically are applicable on different datasets." <b>=> we see what's possible!</b>
<br/><br/><div align="center"><h1><b><strong>Data Science</strong> on <b><span class="green">Software Data</span></b>:<br/><br/> A Lightweight Implementation of <b><span class="blue">Software Analytics</span></b></h1></div>
Data Science
What is Data Science?
"Statistics on a <b><span class="green">Mac</span></b>."
<br/>
<br/>
<div align="right"><small>https://twitter.com/cdixon/status/428914681911070720</small></div>
<b>Data Science Venn Diagram (Drew Conway)</b>
<img src="../../demos/resources/venn_diagram.png" style="width:50%;" >
My Definition
What does "data" mean for me?
"Without data you‘re just another person with an opinion."
<br/>
<div align="right"><small>W. Edwards Deming</small></div>
<b>=> Delivering credible insights based on <span class="green">facts</span>.</b>
What does "science" mean for me?
"The aim of science is to seek the simplest explanations of complex facts."
<br/>
<div align="right"><small>Albert Einstein</small></div>
<b>=> Working out insights in a <span class="green">comprehensible</span> way.</b>
Why Data Science at all?
High demand in data analytics
<img src="../../demos/resources/data_scientist_sexy.png" style="width: 80%;"/>
Young job positions are paid well...
Data from Stack Overflow Developer Survey 2019
<img src="../../demos/resources/stackoverflow_salary_devtype-1.svg" style="width: 65%;"/>
... but also demanding?
Data from Stack Overflow Developer Survey 2019
<b>"Who's Actively Looking for a Job?" (Top 5)</b>
<img src="../../demos/resources/stackoverflow_on_job_search.png" style="width: 100%;"/>
Big and supportive community
Free online courses, videos and tutorials (e. g. DataCamp with > 4.6M members)
Online communities that help each other (e. g. Stack Overflow)
Online competitions to improve own skills (e. g. Kaggle)
Free and easy to use tools!
"R is for statisticians who want to program, Python is for developers who want to do statistics."
<img src="../../demos/resources/r_vs_python_pandas.png" style="width: 57%;"/>
Data Science popularity is still growing!
End of explanation
"""
import pandas as pd
log = pd.read_csv("../datasets/git_log_intellij.csv.gz")
log.head()
"""
Explanation: "100" == max. popularity!
How far away are <span class="green">Software Engineers</span></b> from <strong>Data Science</strong>?
What is a Data Scientist?
"A data scientist is someone who<br/>
is better at statistics<br/>
than any <b><span class="green">software engineer</span></b><br/>
and better at <b><span class="green">software engineering</span></b><br/>
than any statistician."
<br/>
<br/>
<div align="right"><small>From https://twitter.com/cdixon/status/428914681911070720</small></div>
<b>Not so far away as you may have thought!</b>
How to Get Started?
Reuse a Proven Approach (~ scientific method)
<small>Roger Pengs "Stages of Data Analysis"</small><br/>
I. Stating Question
II. Exploratory Data Analysis
III. Formal Modeling
IV. Interpretation
V. Communication
<b>=> from a <strong>question</strong> over <span class="green">data</span> to <span class="blue" style="background-color: #FFFF00">insights</span>!</b>
Be Aware of the "Seven principles
...of inductive software engineering" (Tim Menzies)
1. Human before algorithms
1. Plan for Scale
1. Get Early Feedback
1. Be Open Minded
1. Be Smart with Your Learning
1. Live with the Data You Have
1. Develop a Broad Skill Set That Uses a Big Toolkit
Use Literate Statistical Programming
(Intent + Code + Data + Results)<br />
* Logical Step<br />
+ Automation<br />
= Literate Statistical Programming
Approach: Computational notebooks
Computational Notebook Example
<br/>
<div align="center"><img src="../../demos/resources/notebook_approach.jpg"></div>
Use Standard Data Science Tools
One of the more popular tech stacks
Jupyter Notebook
Python 3
pandas
matplotlib
Jupyter Notebook
Interactive Notebook
* Document-based analyses
* Executable Code
* Displaying results immediately
* Everything in one place
* Every step to the solution visible
<b><span class="green">=> Working out results in a comprehensible way!</span></b>
Python 3
Best programming language for Data Science!
* Easy
* Effective
* Fast
* Fun
* Automation
<b><span class="green">=> Data Analysis becomes repeatable</span></b>
pandas
Pragmatic data analysis framework
* Tabular data structures ("programmable Excel sheet")
* Really fast
* Flexible
* Expressive
<b><span class="green">=> Good integration point for your data sources!</span></b>
matplotlib
Programmable visualization library
Programmatic creation of graphics
Plots line charts, bar charts, pie charts and much more
Integrated into pandas
<b><span class="green">=> Direct visualization of results in Jupyter Notebooks</span></b>
The Python ecosystem
<br/>
<div class="row">
<div class="column">
<b>Data Analysis</b>
<ul>
<li>NumPy</li>
<li>scikit-learn</li>
<li>TensorFlow</li>
<li>SciPy</li>
<li>PySpark</li>
<li>py2neo</li>
</ul>
</div>
<div class="column">
<b>Visualization and more</b>
<ul>
<li>pygal</li>
<li>Bokeh</li>
<li>python-pptx</li>
<li>RISE</li>
<li>Requests, xmldataset, Selenium, Flask...</li>
</ul>
</div>
</div>
<b><span class="green">=> Provides the flexibility that is needed in specific situations</span></b>
Other Technologies
Jupyter Notebook works also with other technological platforms e. g.
* jQAssistant software scanner / Neo4j graph database
* JVM-based languages via beakerx / Tablesaw
* bash
<b><span class="green">=> If you want to use special technology, you can!</span></b>
Anaconda 3
Data Science Python Distribution
Free all-inclusive package
Brings everything you need to get started
Optimized for running fast on your operating system
<b><span class="green">=> Download, install, ready, go!</span></b>
My Recommendations for an easy start
My TOP 5's*
https://www.feststelltaste.de/category/top5/
Courses, videos, blogs, books and more...
<small>*some pages are still under development</small>
My Book Recommendations
Adam Tornhill: Software Design X-Ray
Wes McKinney: Python For Data Analysis
Jeff Leek: The Elements of Data Analytic Style
Tim Menzies, Laurie Williams, Thomas Zimmermann: Perspectives on Data Science for Software Engineering
Hands-On
Programming Demo
Case Study
IntelliJ IDEA
IDE for Java developers
Almost entirely written in Java
Big and long-living project
I. Stating Question (1/3)
Write down your question explicitly
Explain analysis idea comprehensibly
I. Stating Question (2/3)
<b>Question</b>
* Which code is complex and did change often lately?
I. Stating Question (3/3)
Implementation Idea
Tools: Jupyter, Python, pandas, matplotlib
Heuristics:
"complex": many lines of code
"change often": number of Git commits
"lately": last 30 days
Meta goal: Get to know the basic mechanics of the stack.
II. Exploratory Data analysis
Load and explore possible data sets
Clean up and filter the raw data
We load Git log dataset extracted from a Git repository.
End of explanation
"""
log.info()
"""
Explanation: We explore some basic key elements of the dataset
End of explanation
"""
log['timestamp'] = pd.to_datetime(log['timestamp'])
log.head()
"""
Explanation: <b>1</b> DataFrame (~ programmable Excel worksheet), <b>6</b> Series (= columns), <b>1128819</b> rows (= entries)
We convert the text with a time to a real timestamp object.
End of explanation
"""
# use log['timestamp'].max() instead of pd.Timedelta('today') to avoid outdated data in the future
recent = log[log['timestamp'] > log['timestamp'].max() - pd.Timedelta('30 days')]
recent.head()
"""
Explanation: We filter out older changes.
End of explanation
"""
java = recent[recent['filename'].str.endswith(".java")].copy()
java.head()
"""
Explanation: We keep just code written in Java.
End of explanation
"""
changes = java.groupby('filename')[['sha']].count()
changes.head()
"""
Explanation: III. Formal Modeling
Create new perspective on the data
Join data with other datasets
We aggregate the rows by counting the number of changes per file.
End of explanation
"""
loc = pd.read_csv("../datasets/cloc_intellij.csv.gz", index_col=1)
loc.head()
"""
Explanation: We add additional information about the number of lines of all currently existing files...
End of explanation
"""
hotspots = changes.join(loc[['code']]).dropna(subset=['code'])
hotspots.head()
"""
Explanation: ...and join this data with the existing dataset.
End of explanation
"""
top10 = hotspots.sort_values(by="sha", ascending=False).head(10)
top10
"""
Explanation: VI. Interpretation
Work out the essence of the analysis
Make the central message / new insight clear
We show only the TOP 10 hotspots in the code.
End of explanation
"""
ax = top10.plot.scatter('sha', 'code');
for k, v in top10.iterrows():
ax.annotate(k.split("/")[-1], v)
"""
Explanation: V. Communication
Transform insights into a comprehensible visualization
Communicate the next steps after the analysis
We plot the TOP 10 list as XY diagram.
End of explanation
"""
|
tcstewar/testing_notebooks
|
Working memory overshoot.ipynb
|
gpl-2.0
|
dimensions = 10
input_scale = 1
n_neurons_per_dim = 50
intercept_low = -0.5
intercept_high = 1.0
tau_input = 0.01
tau_recurrent = 0.1
tau_reset = 0.2
max_rate_high = 200
max_rate_low = 150
sensory_delay = 0.05
reset_scale = 0.3
model = nengo.Network()
with model:
vocab = spa.Vocabulary(dimensions)
value = vocab.parse('A').v
def stim(t):
if 0.5 < t - sensory_delay < 0.75:
return value
else:
return [0]*dimensions
stim = nengo.Node(stim)
a = nengo.Ensemble(n_neurons=n_neurons_per_dim * dimensions,
dimensions=dimensions,
max_rates=nengo.dists.Uniform(max_rate_low, max_rate_high),
intercepts=nengo.dists.Uniform(intercept_low, intercept_high))
b = nengo.Ensemble(n_neurons=n_neurons_per_dim * dimensions,
dimensions=dimensions,
max_rates=nengo.dists.Uniform(max_rate_low, max_rate_high),
intercepts=nengo.dists.Uniform(intercept_low, intercept_high))
nengo.Connection(stim, a, synapse=None)
nengo.Connection(a, b, synapse=tau_input, transform=input_scale)
nengo.Connection(b, b, synapse=tau_recurrent)
def reset(t):
if t - sensory_delay > 1.75:
return 1
else:
return 0
reset_stim = nengo.Node(reset)
reset_value = vocab.parse('B').v
reset_value.shape = dimensions, 1
nengo.Connection(reset_stim, b.neurons, transform=np.ones((b.n_neurons, 1))*-reset_scale, synapse=tau_reset)
#nengo.Connection(reset_stim, b, transform=reset_value*reset_scale, synapse=tau_reset)
p_value = nengo.Probe(b, synapse=0.01)
p_neurons = nengo.Probe(b.neurons)
sim = nengo.Simulator(model)
sim.run(2.5)
"""
Explanation: The goal here is to make a model where you put something into working memory, then leave it there for a while, and then clear the working memory. When people do this in real brains, they get this sort of effect
<img src=http://jn.physiology.org/content/jn/91/3/1424/F12.medium.gif/>
(Source: https://jn.physiology.org/content/91/3/1424 )
Let's try to model this as a high-dimensional integrator, an input, and some inhibition to shut it off at the end.
End of explanation
"""
rates = sim.data[p_neurons]
ratesf = nengo.synapses.Lowpass(0.05).filt(rates)
pylab.plot(sim.trange(), np.mean(ratesf, axis=1))
pylab.axvline(0.5)
pylab.axvline(0.75)
pylab.axvline(1.75)
pylab.show()
"""
Explanation: Here is the plot of the average firing rate across all the neurons
End of explanation
"""
encs = sim.data[b].encoders
similarity = np.dot(encs, value)
items = np.where(similarity>0.0) # TODO: What is this threshold in the real data?
print 'N= %d' % len(items[0])
pylab.plot(sim.trange(), np.mean(ratesf[:,items[0]], axis=1))
pylab.axvline(0.5)
pylab.axvline(0.75)
pylab.axvline(1.75)
pylab.show()
"""
Explanation: But that's across all the neurons. In the empirical data we're comparing to, it looks like they only selected neurons that are preferentially active for that stimulus. This corresponds to the dot product between the encoder and the actual value being greater than zero.
However, the threshold probably shouldn't be exactly zero, as that would include a whole bunch of neurons that only just barely are preferentially active for the stimulus, and which probably wouldn't even be statistically determined to be preferentially active given the recording time. So we probably need to do some sort of higher threshold. But I don't know what it should be.
End of explanation
"""
|
empet/PSCourse
|
Histograme.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
u=np.random.random()
print u
v=np.random.random(5)
print v
A=np.random.random((2,3))
print A
"""
Explanation: Generatori ai distributilor de probabilitate din Numpy. Histograme
Biblioteca numpy.random contine functii ce implementeaza algoritmi de simulare a diverse distributii de probabilitate
discrete sau continue.
Numerele pseudo-aleatoare uniform distribuite pe $[0,1)$ sunt generate de functia
numpy.random.random(), care se bazeaza pe generatorul Mersenne-Twister.
Definitia functiei este:
numpy.random.random(size=None)
unde size este cuvantul cheie ce indica daca se returneaza un singur numar $u\in[0,1)$ sau un array, ale carui dimensiuni se indica printr-un tuple.
Seed-ul generatorului se poate seta printr-un numar intreg (vezi si codul C al generatorului Mersenne-Twister) astfel:
numpy.random.seed(numar).
End of explanation
"""
x=np.random.random(2000)
histo=plt.hist(x, bins=15, normed=True, color='g')
plt.plot([0,1], [1,1], 'r')# graficul densitatii uniforme pe [0,1)
"""
Explanation: Seed-ul se seteaza in perioada de debugging a codului, pentru ca avandu-l setat in orice rulare se genereaza acelasi sir de numere.
Daca nu este setat explicit, atunci Numpy alege un seed aleator bazat pe momentul de timp al lansarii in executie a codului.
Pentru a ilustra ca o functie genereaza valori de observatie asupra unei variabile aleatoare de distributie prescrisa, se asociaza sirului de date histograma si se traseaza peste histograma graficul densitatii
distributiei respective, pentru comparatie.
Sa generam de exemplu 2000 de valori de observatie asupra unei v.a. $U\sim Unif[0,1)$ si apoi sa desenem
histograma datelor cu un numar prescris de bare.
Functia care genereaza histograma este:
matplotlib.pyplot.hist() pe care o apelam simplu, plt.hist.
Argumentele si cuvintele cheie uzuale ale functiei sunt:
- x este array-ul ce contine datele
- bins=10 numarul de bare; implicit 10
- range=None, range reprezinta limitele valorilor din x intre care generam histograma. Daca
nu este setat, atunci se genereaza histograma datelor cuprinse in intervalul [xmin, xmax].
- normed=False (implicit) sau True;
Daca normed=True, atunci bara corespunzatoare fiecarui interval
de divizare a datelor are aria egala cu (vezi algoritmul prezentat in Cursul 10):
$\displaystyle\frac{nr.\: valori\: din\: interval}{nr\: total\: de\: valori}$
Daca normed=False, atunci bara are aria egala cu lungimea bazei ori numarul de valori ce apartin intervalului corespunzator barei.
color=None; acest cuvant cheie seteaza culoarea barelor in histograma.
End of explanation
"""
def fexpo(theta, x):
return np.exp(-x/theta)/theta
"""
Explanation: Functia np.random.exponential(theta, size=None) returneaza valori de observatie asupra unei variabile aleatoare exponential distribuita, de parametru $\theta=theta$.
Functia care evalueaza densitatea distributiei $Exp(\theta)$:
End of explanation
"""
theta=2.7
N=2000# nr de valori generate
x=np.random.exponential(theta, N)
histo=plt.hist(x, bins=20, normed=True, color='g', alpha=0.6)
xmin=np.min(x)
xmax=np.max(x)
M=100
X=np.linspace(xmin, xmax, M)
Y=fexpo(theta, X)
plt.plot(X,Y, 'r')#traseaza graficul densitatii exponentiale
"""
Explanation: Generam N valori de observatie asupra distributiei $Exp(\theta)$, afisam histograma valorilor
generate si desenam comparativ graficul densitatii:
End of explanation
"""
val = np.loadtxt("Vizite.txt")
histog=plt.hist(val,bins=100, normed=True, color='r')
"""
Explanation: Functia np.random.exponential implementeaza algoritmul de simulare a distributiei Exp prin metoda inversarii
(Cursul 12).
La proiectul in C/C++, valorile generate vor fi salvate intr-un fisier text, care apoi este citit
in Python pentru a afisa histograma sau alte obiecte grafice.
Dam un exemplu de citire a unui fisier text, salvat in directorul curent si apoi generam histograma datelor
citite.
End of explanation
"""
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
"""
Explanation: Fisierul Vizite.txt contine numarul de vizite ale site-urilor monitorizate de sati in luna martie.
Histograma ilustreaza ca o proportie mare din numarul de site-uri monitorizate sunt vizitate de un numar redus de ori si doar cateva sunt vizitate foarte mult.
End of explanation
"""
|
Rodolfobm/DLND-First-Project_First_Neural_Network
|
dlnd-your-first-neural-network.ipynb
|
gpl-3.0
|
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1/(1+np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_grad = hidden_outputs*(1-hidden_outputs)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)# update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot((hidden_grad*hidden_errors), inputs.T) # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import sys
### Set the hyperparameters here ###
epochs = 3000
learning_rate = 0.01
hidden_nodes = 15
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
ANSWER:
The model can predict really well except for abnormal days, such as holyday season, which is the case between DEC22 and New year, that happens probably because people get together or travel in these times, being mostly inside their houses.
Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
End of explanation
"""
|
yl565/statsmodels
|
examples/notebooks/statespace_varmax.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
dta = sm.datasets.webuse('lutkepohl2', 'http://www.stata-press.com/data/r12/')
dta.index = dta.qtr
endog = dta.ix['1960-04-01':'1978-10-01', ['dln_inv', 'dln_inc', 'dln_consump']]
"""
Explanation: VARMAX models
This is a brief introduction notebook to VARMAX models in Statsmodels. The VARMAX model is generically specified as:
$$
y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t +
M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q}
$$
where $y_t$ is a $\text{k_endog} \times 1$ vector.
End of explanation
"""
exog = endog['dln_consump']
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(2,0), trend='nc', exog=exog)
res = mod.fit(maxiter=1000)
print(res.summary())
"""
Explanation: Model specification
The VARMAX class in Statsmodels allows estimation of VAR, VMA, and VARMA models (through the order argument), optionally with a constant term (via the trend argument). Exogenous regressors may also be included (as usual in Statsmodels, by the exog argument), and in this way a time trend may be added. Finally, the class allows measurement error (via the measurement_error argument) and allows specifying either a diagonal or unstructured innovation covariance matrix (via the error_cov_type argument).
Example 1: VAR
Below is a simple VARX(2) model in two endogenous variables and an exogenous series, but no constant term. Notice that we needed to allow for more iterations than the default (which is maxiter=50) in order for the likelihood estimation to converge. This is not unusual in VAR models which have to estimate a large number of parameters, often on a relatively small number of time series: this model, for example, estimates 27 parameters off of 75 observations of 3 variables.
End of explanation
"""
ax = res.impulse_responses(10, orthogonalized=True).plot(figsize=(13,3))
ax.set(xlabel='t', title='Responses to a shock to `dln_inv`');
"""
Explanation: From the estimated VAR model, we can plot the impulse response functions of the endogenous variables.
End of explanation
"""
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(0,2), error_cov_type='diagonal')
res = mod.fit(maxiter=1000)
print(res.summary())
"""
Explanation: Example 2: VMA
A vector moving average model can also be formulated. Below we show a VMA(2) on the same data, but where the innovations to the process are uncorrelated. In this example we leave out the exogenous regressor but now include the constant term.
End of explanation
"""
mod = sm.tsa.VARMAX(endog[['dln_inv', 'dln_inc']], order=(1,1))
res = mod.fit(maxiter=1000)
print(res.summary())
"""
Explanation: Caution: VARMA(p,q) specifications
Although the model allows estimating VARMA(p,q) specifications, these models are not identified without additional restrictions on the representation matrices, which are not built-in. For this reason, it is recommended that the user proceed with error (and indeed a warning is issued when these models are specified). Nonetheless, they may in some circumstances provide useful information.
End of explanation
"""
|
jmschrei/pomegranate
|
tutorials/old/Tutorial_5_Bayes_Classifiers.ipynb
|
mit
|
X = numpy.concatenate((numpy.random.normal(3, 1, 200), numpy.random.normal(10, 2, 1000)))
y = numpy.concatenate((numpy.zeros(200), numpy.ones(1000)))
x1 = X[:200]
x2 = X[200:]
plt.figure(figsize=(16, 5))
plt.hist(x1, bins=25, color='m', edgecolor='m', label="Class A")
plt.hist(x2, bins=25, color='c', edgecolor='c', label="Class B")
plt.xlabel("Value", fontsize=14)
plt.ylabel("Count", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: Naive Bayes and Bayes Classifiers: A Tutorial
author: Jacob Schreiber <br>
contact: jmschreiber91@gmail.com
Bayes classifiers are some of the simplest machine learning models that exist, due to their intuitive probabilistic interpretation and simple fitting step. Each class is modeled as a probability distribution, and the data is interpreted as samples drawn from these underlying distributions. Fitting the model to data is as simple as calculating maximum likelihood parameters for the data that falls under each class, and making predictions is as simple as using Bayes' rule to determine which class is most likely given the distributions. Bayes' Rule is the following:
\begin{equation}
P(M|D) = \frac{P(D|M)P(M)}{P(D)}
\end{equation}
where M stands for the model and D stands for the data. $P(M)$ is known as the <i>prior</i>, because it is the probability that a sample is of a certain class before you even know what the sample is. This is generally just the frequency of each class. Intuitively, it makes sense that you would want to model this, because if one class occurs 10x more than another class, it is more likely that a given sample will belong to that distribution. $P(D|M)$ is the likelihood, or the probability, of the data under a given model. Lastly, $P(M|D)$ is the posterior, which is the probability of each component of the model, or class, being the component which generated the data. It is called the posterior because the prior corresponds to probabilities before seeing data, and the posterior corresponds to probabilities after observing the data. In cases where the prior is uniform, the posterior is just equal to the normalized likelihoods. This equation forms the basis of most probabilistic modeling, with interesting priors allowing the user to inject sophisticated expert knowledge into the problem directly.
Let's take a look at some single dimensional data in order to introduce these concepts more thoroughly.
End of explanation
"""
d1 = NormalDistribution.from_samples(x1)
d2 = NormalDistribution.from_samples(x2)
idxs = numpy.arange(0, 15, 0.1)
p1 = map(d1.probability, idxs)
p2 = map(d2.probability, idxs)
plt.figure(figsize=(16, 5))
plt.plot(idxs, p1, color='m'); plt.fill_between(idxs, 0, p1, facecolor='m', alpha=0.2)
plt.plot(idxs, p2, color='c'); plt.fill_between(idxs, 0, p2, facecolor='c', alpha=0.2)
plt.xlabel("Value", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: The data seems like it comes from two normal distributions, with the cyan class being more prevalent than the magenta class. A natural way to model this data would be to create a normal distribution for the cyan data, and another for the magenta distribution.
Let's take a look at doing that. All we need to do is use the from_samples class method of the NormalDistribution class.
End of explanation
"""
magenta_prior = 1. * len(x1) / len(X)
cyan_prior = 1. * len(x2) / len(X)
plt.figure(figsize=(4, 6))
plt.title("Prior Probabilities P(M)", fontsize=14)
plt.bar(0, magenta_prior, facecolor='m', edgecolor='m')
plt.bar(1, cyan_prior, facecolor='c', edgecolor='c')
plt.xticks([0, 1], ['P(Magenta)', 'P(Cyan)'], fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: It looks like some aspects of the data are captured well by doing things this way-- specifically the mean and variance of the normal distributions. This allows us to easily calculate $P(D|M)$ as the probability of a sample under either the cyan or magenta distributions using the normal (or Gaussian) probability density equation:
\begin{align}
P(D|M) &= P(x|\mu, \sigma) \
&= \frac{1}{\sqrt{2\pi\sigma^{2}}} exp \left(-\frac{(x-u)^{2}}{2\sigma^{2}} \right)
\end{align}
However, if we look at the original data, we see that the cyan distributions is both much wider than the purple distribution and much taller, as there were more samples from that class in general. If we reduce that data down to these two distributions, we lose the class imbalance. We want our prior to model this class imbalance, with the reasoning being that if we randomly draw a sample from the samples observed thus far, it is far more likely to be a cyan than a magenta sample. Let's take a look at this class imbalance exactly.
End of explanation
"""
d1 = NormalDistribution.from_samples(x1)
d2 = NormalDistribution.from_samples(x2)
idxs = numpy.arange(0, 15, 0.1)
p_magenta = numpy.array(map(d1.probability, idxs)) * magenta_prior
p_cyan = numpy.array(map(d2.probability, idxs)) * cyan_prior
plt.figure(figsize=(16, 5))
plt.plot(idxs, p_magenta, color='m'); plt.fill_between(idxs, 0, p_magenta, facecolor='m', alpha=0.2)
plt.plot(idxs, p_cyan, color='c'); plt.fill_between(idxs, 0, p_cyan, facecolor='c', alpha=0.2)
plt.xlabel("Value", fontsize=14)
plt.ylabel("P(M)P(D|M)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: The prior $P(M)$ is a vector of probabilities over the classes that the model can predict, also known as components. In this case, if we draw a sample randomly from the data that we have, there is a ~83% chance that it will come from the cyan class and a ~17% chance that it will come from the magenta class.
Let's multiply the probability densities we got before by this imbalance.
End of explanation
"""
magenta_posterior = p_magenta / (p_magenta + p_cyan)
cyan_posterior = p_cyan / (p_magenta + p_cyan)
plt.figure(figsize=(16, 5))
plt.subplot(211)
plt.plot(idxs, p_magenta, color='m'); plt.fill_between(idxs, 0, p_magenta, facecolor='m', alpha=0.2)
plt.plot(idxs, p_cyan, color='c'); plt.fill_between(idxs, 0, p_cyan, facecolor='c', alpha=0.2)
plt.xlabel("Value", fontsize=14)
plt.ylabel("P(M)P(D|M)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.subplot(212)
plt.plot(idxs, magenta_posterior, color='m')
plt.plot(idxs, cyan_posterior, color='c')
plt.xlabel("Value", fontsize=14)
plt.ylabel("P(M|D)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: This looks a lot more faithful to the original data, and actually corresponds to $P(M)P(D|M)$, the prior multiplied by the likelihood. However, these aren't actually probability distributions anymore, as they no longer integrate to 1. This is why the $P(M)P(D|M)$ term has to be normalized by the $P(D)$ term in Bayes' rule in order to get a probability distribution over the components. However, $P(D)$ is difficult to determine exactly-- what is the probability of the data? Well, we can sum over the classes to get that value, since $P(D) = \sum_{i=1}^{c} P(D|M)P(M)$ for a problem with c classes. This translates into $P(D) = P(M=Cyan)P(D|M=Cyan) + P(M=Magenta)P(D|M=Magenta)$ for this specific problem, and those values can just be pulled from the unnormalized plots above.
This gives us the full Bayes' rule, with the posterior $P(M|D)$ being the proportion of density of the above plot coming from each of the two distributions at any point on the line. Let's take a look at the posterior probabilities of the two classes on the same line.
End of explanation
"""
idxs = idxs.reshape(idxs.shape[0], 1)
X = X.reshape(X.shape[0], 1)
model = NaiveBayes.from_samples(NormalDistribution, X, y)
posteriors = model.predict_proba(idxs)
plt.figure(figsize=(14, 4))
plt.plot(idxs, posteriors[:,0], color='m')
plt.plot(idxs, posteriors[:,1], color='c')
plt.xlabel("Value", fontsize=14)
plt.ylabel("P(M|D)", fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: The top plot shows the same densities as before, while the bottom plot shows the proportion of the density belonging to either class at that point. This proportion is known as the posterior $P(M|D)$, and can be interpreted as the probability of that point belonging to each class. This is one of the native benefits of probabilistic models, that instead of providing a hard class label for each sample, they can provide a soft label in the form of the probability of belonging to each class.
We can implement all of this simply in pomegranate using the NaiveBayes class.
End of explanation
"""
X = numpy.concatenate([numpy.random.normal(3, 2, size=(150, 2)), numpy.random.normal(7, 1, size=(250, 2))])
y = numpy.concatenate([numpy.zeros(150), numpy.ones(250)])
plt.figure(figsize=(8, 8))
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c')
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m')
plt.xlim(-2, 10)
plt.ylim(-4, 12)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: Looks like we're getting the same plots for the posteriors just through fitting the naive Bayes model directly to data. The predictions made will come directly from the posteriors in this plot, with cyan predictions happening whenever the cyan posterior is greater than the magenta posterior, and vice-versa.
Naive Bayes
In the univariate setting, naive Bayes is identical to a general Bayes classifier. The divergence occurs in the multivariate setting, the naive Bayes model assumes independence of all features, while a Bayes classifier is more general and can support more complicated interactions or covariances between features. Let's take a look at what this means in terms of Bayes' rule.
\begin{align}
P(M|D) &= \frac{P(M)P(D|M)}{P(D)} \
&= \frac{P(M)\prod_{i=1}^{d}P(D_{i}|M_{i})}{P(D)}
\end{align}
This looks fairly simple to compute, as we just need to pass each dimension into the appropriate distribution and then multiply the returned probabilities together. This simplicity is one of the reasons why naive Bayes is so widely used. Let's look closer at using this in pomegranate, starting off by generating two blobs of data that overlap a bit and inspecting them.
End of explanation
"""
from sklearn.naive_bayes import GaussianNB
model = NaiveBayes.from_samples(NormalDistribution, X, y)
clf = GaussianNB().fit(X, y)
xx, yy = np.meshgrid(np.arange(-2, 10, 0.02), np.arange(-4, 12, 0.02))
Z1 = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
Z2 = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.title("pomegranate naive Bayes", fontsize=16)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c')
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m')
plt.contour(xx, yy, Z1)
plt.xlim(-2, 10)
plt.ylim(-4, 12)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.subplot(122)
plt.title("sklearn naive Bayes", fontsize=16)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c')
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m')
plt.contour(xx, yy, Z2)
plt.xlim(-2, 10)
plt.ylim(-4, 12)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
"""
Explanation: Now, let's fit our naive Bayes model to this data using pomegranate. We can use the from_samples class method, pass in the distribution that we want to model each dimension, and then the data. We choose to use NormalDistribution in this particular case, but any supported distribution would work equally well, such as BernoulliDistribution or ExponentialDistribution. To ensure we get the correct decision boundary, let's also plot the boundary recovered by sklearn.
End of explanation
"""
def plot_signal(X, n):
plt.figure(figsize=(16, 6))
t_current = 0
for i in range(n):
mu, std, t = X[i]
chunk = numpy.random.normal(mu, std, int(t))
plt.plot(numpy.arange(t_current, t_current+t), chunk, c='cm'[i % 2])
t_current += t
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Time (s)", fontsize=14)
plt.ylabel("Signal", fontsize=14)
plt.ylim(20, 40)
plt.show()
def create_signal(n):
X, y = [], []
for i in range(n):
mu = numpy.random.normal(30.0, 0.4)
std = numpy.random.lognormal(-0.1, 0.4)
t = int(numpy.random.exponential(50)) + 1
X.append([mu, std, int(t)])
y.append(0)
mu = numpy.random.normal(30.5, 0.8)
std = numpy.random.lognormal(-0.3, 0.6)
t = int(numpy.random.exponential(200)) + 1
X.append([mu, std, int(t)])
y.append(1)
return numpy.array(X), numpy.array(y)
X_train, y_train = create_signal(1000)
X_test, y_test = create_signal(250)
plot_signal(X_train, 20)
"""
Explanation: Drawing the decision boundary helps to verify that we've produced a good result by cleanly splitting the two blobs from each other.
Bayes' rule provides a great deal of flexibility in terms of what the actually likelihood functions are. For example, when considering a multivariate distribution, there is no need for each dimension to be modeled by the same distribution. In fact, each dimension can be modeled by a different distribution, as long as we can multiply the $P(D|M)$ terms together.
Let's consider the example of some noisy signals that have been segmented. We know that they come from two underlying phenomena, the cyan phenomena and the magenta phenomena, and want to classify future segments. To do this, we have three features-- the mean signal of the segment, the standard deviation, and the duration.
End of explanation
"""
model = NaiveBayes.from_samples(NormalDistribution, X_train, y_train)
print "Gaussian Naive Bayes: ", (model.predict(X_test) == y_test).mean()
clf = GaussianNB().fit(X_train, y_train)
print "sklearn Gaussian Naive Bayes: ", (clf.predict(X_test) == y_test).mean()
"""
Explanation: We can start by modeling each variable as Gaussians, like before, and see what accuracy we get.
End of explanation
"""
plt.figure(figsize=(14, 4))
plt.subplot(131)
plt.title("Mean")
plt.hist(X_train[y_train == 0, 0], color='c', alpha=0.5, bins=25)
plt.hist(X_train[y_train == 1, 0], color='m', alpha=0.5, bins=25)
plt.subplot(132)
plt.title("Standard Deviation")
plt.hist(X_train[y_train == 0, 1], color='c', alpha=0.5, bins=25)
plt.hist(X_train[y_train == 1, 1], color='m', alpha=0.5, bins=25)
plt.subplot(133)
plt.title("Duration")
plt.hist(X_train[y_train == 0, 2], color='c', alpha=0.5, bins=25)
plt.hist(X_train[y_train == 1, 2], color='m', alpha=0.5, bins=25)
plt.show()
"""
Explanation: We get identical values for sklearn and for pomegranate, which is good. However, let's take a look at the data itself to see whether a Gaussian distribution is the appropriate distribution for the data.
End of explanation
"""
model = NaiveBayes.from_samples(NormalDistribution, X_train, y_train)
print "Gaussian Naive Bayes: ", (model.predict(X_test) == y_test).mean()
clf = GaussianNB().fit(X_train, y_train)
print "sklearn Gaussian Naive Bayes: ", (clf.predict(X_test) == y_test).mean()
model = NaiveBayes.from_samples([NormalDistribution, LogNormalDistribution, ExponentialDistribution], X_train, y_train)
print "Heterogeneous Naive Bayes: ", (model.predict(X_test) == y_test).mean()
"""
Explanation: So, unsurprisingly (since you can see that I used non-Gaussian distributions to generate the data originally), it looks like only the mean follows a normal distribution, whereas the standard deviation seems to follow either a gamma or a log-normal distribution. We can take advantage of that by explicitly using these distributions instead of approximating them as normal distributions. pomegranate is flexible enough to allow for this, whereas sklearn currently is not.
End of explanation
"""
%timeit GaussianNB().fit(X_train, y_train)
%timeit NaiveBayes.from_samples(NormalDistribution, X_train, y_train)
%timeit NaiveBayes.from_samples([NormalDistribution, LogNormalDistribution, ExponentialDistribution], X_train, y_train)
"""
Explanation: It looks like we're able to get a small improvement in accuracy just by using appropriate distributions for the features, without any type of data transformation or filtering. This certainly seems worthwhile if you can determine what the appropriate underlying distribution is.
Next, there's obviously the issue of speed. Let's compare the speed of the pomegranate implementation and the sklearn implementation.
End of explanation
"""
pom_time, skl_time = [], []
n1, n2 = 15000, 60000,
for d in range(1, 101, 5):
X = numpy.concatenate([numpy.random.normal(3, 2, size=(n1, d)), numpy.random.normal(7, 1, size=(n2, d))])
y = numpy.concatenate([numpy.zeros(n1), numpy.ones(n2)])
tic = time.time()
for i in range(25):
GaussianNB().fit(X, y)
skl_time.append((time.time() - tic) / 25)
tic = time.time()
for i in range(25):
NaiveBayes.from_samples(NormalDistribution, X, y)
pom_time.append((time.time() - tic) / 25)
plt.figure(figsize=(14, 6))
plt.plot(range(1, 101, 5), pom_time, color='c', label="pomegranate")
plt.plot(range(1, 101, 5), skl_time, color='m', label="sklearn")
plt.xticks(fontsize=14)
plt.xlabel("Number of Dimensions", fontsize=14)
plt.yticks(fontsize=14)
plt.ylabel("Time (s)")
plt.legend(fontsize=14)
plt.show()
"""
Explanation: Looks as if on this small dataset they're all taking approximately the same time. This is pretty much expected, as the fitting step is fairly simple and both implementations use C-level numerics for the calculations. We can give a more thorough treatment of the speed comparison on larger datasets. Let's look at the average time it takes to fit a model to data of increasing dimensionality across 25 runs.
End of explanation
"""
tilt_a = [[-2, 0.5], [5, 2]]
tilt_b = [[-1, 1.5], [3, 3]]
X = numpy.concatenate((numpy.random.normal(4, 1, size=(250, 2)).dot(tilt_a), numpy.random.normal(3, 1, size=(800, 2)).dot(tilt_b)))
y = numpy.concatenate((numpy.zeros(250), numpy.ones(800)))
model_a = NaiveBayes.from_samples(NormalDistribution, X, y)
model_b = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
xx, yy = np.meshgrid(np.arange(-5, 30, 0.02), np.arange(0, 25, 0.02))
Z1 = model_a.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
Z2 = model_b.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.figure(figsize=(18, 8))
plt.subplot(121)
plt.contour(xx, yy, Z1)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c', alpha=0.3)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m', alpha=0.3)
plt.xlim(-5, 30)
plt.ylim(0, 25)
plt.subplot(122)
plt.contour(xx, yy, Z2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c', alpha=0.3)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m', alpha=0.3)
plt.xlim(-5, 30)
plt.ylim(0, 25)
plt.show()
"""
Explanation: It appears as if the two implementations are basically the same speed. This is unsurprising given the simplicity of the calculations, and as mentioned before, the low level implementation.
Bayes Classifiers
The natural generalization of the naive Bayes classifier is to allow any multivariate function take the place of $P(D|M)$ instead of it being the product of several univariate probability distributions. One immediate difference is that now instead of creating a Gaussian model with effectively a diagonal covariance matrix, you can now create one with a full covariance matrix. Let's see an example of that at work.
End of explanation
"""
print "naive training accuracy: {:4.4}".format((model_a.predict(X) == y).mean())
print "bayes classifier training accuracy: {:4.4}".format((model_b.predict(X) == y).mean())
"""
Explanation: It looks like we are able to get a better boundary between the two blobs of data. The primary for this is because the data don't form spherical clusters, like you assume when you force a diagonal covariance matrix, but are tilted ellipsoids, that can be better modeled by a full covariance matrix. We can quantify this quickly by looking at performance on the training data.
End of explanation
"""
X = numpy.empty(shape=(0, 2))
X = numpy.concatenate((X, numpy.random.normal(4, 1, size=(200, 2)).dot([[-2, 0.5], [2, 0.5]])))
X = numpy.concatenate((X, numpy.random.normal(3, 1, size=(350, 2)).dot([[-1, 2], [1, 0.8]])))
X = numpy.concatenate((X, numpy.random.normal(7, 1, size=(700, 2)).dot([[-0.75, 0.8], [0.9, 1.5]])))
X = numpy.concatenate((X, numpy.random.normal(6, 1, size=(120, 2)).dot([[-1.5, 1.2], [0.6, 1.2]])))
y = numpy.concatenate((numpy.zeros(550), numpy.ones(820)))
model_a = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
gmm_a = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, 2, X[y == 0])
gmm_b = GeneralMixtureModel.from_samples(MultivariateGaussianDistribution, 2, X[y == 1])
model_b = BayesClassifier([gmm_a, gmm_b], weights=numpy.array([1-y.mean(), y.mean()]))
xx, yy = np.meshgrid(np.arange(-10, 10, 0.02), np.arange(0, 25, 0.02))
Z1 = model_a.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
Z2 = model_b.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
centroids1 = numpy.array([distribution.mu for distribution in model_a.distributions])
centroids2 = numpy.concatenate([[distribution.mu for distribution in component.distributions] for component in model_b.distributions])
plt.figure(figsize=(18, 8))
plt.subplot(121)
plt.contour(xx, yy, Z1)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c', alpha=0.3)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m', alpha=0.3)
plt.scatter(centroids1[:,0], centroids1[:,1], color='k', s=100)
plt.subplot(122)
plt.contour(xx, yy, Z2)
plt.scatter(X[y == 0, 0], X[y == 0, 1], color='c', alpha=0.3)
plt.scatter(X[y == 1, 0], X[y == 1, 1], color='m', alpha=0.3)
plt.scatter(centroids2[:,0], centroids2[:,1], color='k', s=100)
plt.show()
"""
Explanation: Looks like there is a significant boost. Naturally you'd want to evaluate the performance of the model on separate validation data, but for the purposes of demonstrating the effect of a full covariance matrix this should be sufficient.
While using a full covariance matrix is certainly more complicated than using only the diagonal, there is no reason that the $P(D|M)$ has to even be a single simple distribution versus a full probabilistic model. After all, all probabilistic models, including general mixtures, hidden Markov models, and Bayesian networks, can calculate $P(D|M)$. Let's take a look at an example of using a mixture model instead of a single gaussian distribution.
End of explanation
"""
|
mlhhu2017/identifyDigit
|
bekcic/gaussian_multivar.ipynb
|
mit
|
training, test = load_sorted_data('data_notMNIST')
PRIORS = {'id': lambda data: [np.identity(len(data[0][0])) for _ in range(len(data))],
'var': lambda data: variance(data),
'var_1': lambda data: variance(data, axis=0),
'cov': lambda data: covariance(data)}
means = mean(training)
sigma = {}
for key in PRIORS:
sigma[key] = PRIORS[key](training)
"""
Explanation: Basic setup
Getting the training and test data and calculating means, variances (axis=None, axis=1) and covariances.
End of explanation
"""
plot_all_numbers(means, elements_per_line=5, plot_title="Means of the training dataset")
plot_all_numbers(variances1, elements_per_line=5, plot_title="Variances of the training dataset")
"""
Explanation: Plotting
Here you can see the means and variances of the numbers in the training dataset.
End of explanation
"""
pdfs = {}
for key in sigma:
pdfs[key] = multivariates(training, sigma[key])
"""
Explanation: PDFs
Using Gaussian multivariate normal distribution.
Here I am trying in total 4 different PDFs, where I swapped out the $\Sigma$ (Prior).
First one is $\Sigma =$ id.
Second one is $\Sigma =$ variances.
Third one is $\Sigma =$ variances1
Fourth one is $\Sigma =$ covariances
End of explanation
"""
tmp = flatten_lists([test[i][:20] for i in range(10)])
tmp = [np.array(x) for x in tmp]
plot_all_numbers(tmp, elements_per_line=20, plot_title="First 20 of each number from the test dataset")
preds = {}
for key in pdfs:
preds[key] = [tell_all_numbers(pdfs[key], nums) for nums in test]
for i in range(10):
print("Right guess: {0}".format(i))
for key in preds:
print("{0}:\t{1}\tERRORS: {2}".format(key, preds[key][i][:20], len([x for x in preds[key][i][:20] if x != i])))
print("")
"""
Explanation: First test run
Now I am plotting the first 20 numbers of each test dataset (0-9) and thereafter guess/predict the corresponding number with the four PDFs and then I will show the number of errors for each PDF for each number.
(The second next code snippet may take a while. It is processing the entire test data)
End of explanation
"""
class_names = [str(i) for i in range(10)]
confusion_matrix = {}
training_labels = flatten_lists([[i]*len(preds['id'][i]) for i in range(10)])
#plot_confusion_matrix(conf_matrix, classes=class_names)
for key in preds:
confusion_matrix[key] = conf_mat(flatten_lists(preds[key]), list(training_labels))
for key in confusion_matrix:
plot.figure(figsize=(10,10))
plot_confusion_matrix(normalize(confusion_matrix[key]), classes=class_names, title="{0} Confusion Matrix".format(key))
plot.show()
"""
Explanation: Visualizing the result
Now I am going to visualize the results with confusion matrices.
End of explanation
"""
|
wcmckee/ece-display
|
niktrans.ipynb
|
mit
|
import os
import json
os.system('python3 nikoladu.py')
os.chdir('/home/wcmckee/nik1/')
os.system('nikola build')
os.system('rsync -azP /home/wcmckee/nik1/* wcmckee@wcmckee.com:/home/wcmckee/github/wcmckee.com/output/minedujobs')
opccschho = open('/home/wcmckee/ccschool/cctru.json', 'r')
opcz = opccschho.read()
rssch = json.loads(opcz)
filrma = ('/home/wcmckee/ccschol/')
for rs in rssch.keys():
hythsc = (rs.replace(' ', '-'))
hylow = hythsc.lower()
hybrac = hylow.replace('(', '')
hybaec = hybrac.replace(')', '')
os.mkdir(filrma + hybaec)
os.system('nikola init -q ' + filrma + hybaec)
"""
Explanation: <h1>NikTrans</h1>
Python script to create Nikola sites from a list of schools. Edits conf.py file for site name and licence.
End of explanation
"""
lisschol = os.listdir('/home/wcmckee/ccschol/')
findwat = ('LICENSE = """')
def replacetext(findtext, replacetext):
for lisol in lisschol:
filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')
f = open(filereaz,'r')
filedata = f.read()
f.close()
newdata = filedata.replace(findtext, '"' + replacetext + '"')
#print (newdata)
f = open(filereaz,'w')
f.write(newdata)
f.close()
replacetext('LICENSE = """', 'LICENSE = """<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons Attribution 4.0 International License" style="border-width:0; margin-bottom:12px;" src="https://i.creativecommons.org/l/by/4.0/88x31.png"></a>"')
licfil = 'LICENSE = """<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons Attribution 4.0 International License" style="border-width:0; margin-bottom:12px;" src="https://i.creativecommons.org/l/by/4.0/88x31.png"></a>"'
opwcm = ('/home/wcmckee/github/wcm.com/conf.py')
for lisol in lisschol:
print (lisol)
rdwcm = open(opwcm, 'r')
filewcm = rdwcm.read()
newdata = filewcm.replace('wcmckee', lisol)
rdwcm.close()
#print (newdata)
f = open('/home/wcmckee/ccschol/' + lisol + '/conf.py','w')
f.write(newdata)
f.close()
for rdlin in rdwcm.readlines():
#print (rdlin)
if 'BLOG_TITLE' in rdlin:
print (rdlin)
for lisol in lisschol:
print (lisol)
hythsc = (lisol.replace(' ', '-'))
hylow = hythsc.lower()
hybrac = hylow.replace('(', '')
hybaec = hybrac.replace(')', '')
filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')
f = open(filereaz,'r')
filedata = f.read()
f.close()
newdata = filedata.replace('LICENCE = """', licfil )
#print (newdata)
f = open(filereaz,'w')
f.write(newdata)
f.close()
for lisol in lisschol:
print (lisol)
hythsc = (lisol.replace(' ', '-'))
hylow = hythsc.lower()
hybrac = hylow.replace('(', '')
hybaec = hybrac.replace(')', '')
filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')
f = open(filereaz,'r')
filedata = f.read()
f.close()
newdata = filedata.replace('"Demo Site"', '"' + hybaec + '"')
#print (newdata)
f = open(filereaz,'w')
f.write(newdata)
f.close()
for lisol in lisschol:
print (lisol)
hythsc = (lisol.replace(' ', '-'))
hylow = hythsc.lower()
hybrac = hylow.replace('(', '')
hybaec = hybrac.replace(')', '')
filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')
f = open(filereaz,'r')
filedata = f.read()
f.close()
newdata = filedata.replace('"Demo Site"', '"' + hybaec + '"')
#print (newdata)
f = open(filereaz,'w')
f.write(newdata)
f.close()
"""
Explanation: I want to open each of the conf.py files and replace the nanme of the site with hythsc.lower
Dir /home/wcmckee/ccschol has all the schools folders. Need to replace in conf.py Demo Name
with folder name of school.
Schools name missing characters - eg ardmore
End of explanation
"""
buildnik = input('Build school sites y/N ')
for lisol in lisschol:
print (lisol)
os.chdir('/home/wcmckee/ccschol/' + lisol)
if 'y' in buildnik:
os.system('nikola build')
makerst = open('/home/wcmckee/ccs')
for rs in rssch.keys():
hythsc = (rs.replace(' ', '-'))
hylow = hythsc.lower()
hybrac = hylow.replace('(', '-')
hybaec = hybrac.replace(')', '')
#print (hylow())
filereaz = ('/home/wcmckee/ccschol/' + hybaec + '/conf.py')
f = open(filereaz,'r')
filedata = f.read()
newdata = filedata.replace("Demo Site", hybaec)
f.close()
f = open(filereaz,'w')
f.write(newdata)
f.close()
"""
Explanation: Perform Nikola build of all the sites in ccschol folder
End of explanation
"""
|
ethen8181/machine-learning
|
recsys/ann_benchmarks/ann_benchmarks.ipynb
|
mit
|
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(css_style='custom2.css', plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import time
import nmslib # pip install nmslib>=1.7.3.2 pybind11>=2.2.3
import zipfile
import requests
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from joblib import dump, load
from sklearn.preprocessing import normalize
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
# change default style figure and font size
plt.rcParams['figure.figsize'] = 8, 6
plt.rcParams['font.size'] = 12
%watermark -a 'Ethen' -d -t -v -p numpy,sklearn,matplotlib,tqdm,nmslib
"""
Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Approximate-Nearest-Neighbor-Search" data-toc-modified-id="Approximate-Nearest-Neighbor-Search-1"><span class="toc-item-num">1 </span>Approximate Nearest Neighbor Search</a></span><ul class="toc-item"><li><span><a href="#Setting-Up-the-Data" data-toc-modified-id="Setting-Up-the-Data-1.1"><span class="toc-item-num">1.1 </span>Setting Up the Data</a></span></li><li><span><a href="#Benchmarking-ANN-Methods" data-toc-modified-id="Benchmarking-ANN-Methods-1.2"><span class="toc-item-num">1.2 </span>Benchmarking ANN Methods</a></span></li></ul></li><li><span><a href="#Reference" data-toc-modified-id="Reference-2"><span class="toc-item-num">2 </span>Reference</a></span></li></ul></div>
End of explanation
"""
def download(url, filename):
with open(filename, 'wb') as file:
response = requests.get(url)
file.write(response.content)
# we'll download the data to DATA_DIR location
DATA_DIR = './datasets/'
URL = 'http://nlp.stanford.edu/data/glove.twitter.27B.zip'
filename = os.path.join(DATA_DIR, 'glove.twitter.27B.zip')
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
if not os.path.exists(filename):
download(URL, filename)
def get_train_test_data(filename, dimension=25, test_size=0.2, random_state=1234):
"""
dimension : int, {25, 50, 100, 200}, default 25
The dataset contains embeddings of different size.
"""
with zipfile.ZipFile(filename) as f:
X = []
zip_filename = 'glove.twitter.27B.{}d.txt'.format(dimension)
for line in f.open(zip_filename):
# remove the first index, id field and only get the vectors
vector = np.array([float(x) for x in line.strip().split()[1:]])
X.append(vector)
X_train, X_test = train_test_split(
np.array(X), test_size=test_size, random_state=random_state)
# we can downsample for experimentation purpose
# X_train = X_train[:50000]
# X_test = X_test[:10000]
return X_train, X_test
X_train, X_test = get_train_test_data(filename)
print('training data shape: ', X_train.shape)
print('testing data shape: ', X_test.shape)
"""
Explanation: Approximate Nearest Neighbor Search
Approximate nearest neighbor (ANN) search is useful when we have a large dataset with hundred thousands/millions/billions of data-points, and for a given data point we wish to find its nearest neighbors. There are many use case for this type of methods and the one we'll be focusing on here is finding similar vector representations, so think algorithms such as matrix factorization or word2vec that compresses our original data into embeddings, or so called latent factors. And throughout the notebook, the notion of similar here will be referring to two vectors' cosine distance.
There are many open-source implementations already that we can use to see whether it solves our problem, but the question is always which one is better? The following github repo contains a thorough benchmarks of various open-sourced implementations. Github: Benchmarking nearest neighbors.
The goal of this notebook shows how to run a quicker benchmark ourselves without all the complexity. The repo listed above benchmarks multiple algorithms on multiple datasets using multiple hyperparameters, which can take a really long time. We will pick one of the open-source implementation that has been identified as a solid choice and walk through step-by-step of the process using one dataset.
Setting Up the Data
The first step is to get our hands on some data and split it into training and test set, here we'll be using the glove vector representation trained on twitter dataset.
End of explanation
"""
class BruteForce:
"""
Brute force way of computing cosine distance, this
is more of clarifying what we're trying to accomplish,
don't actually use it as it will take extremely long.
"""
def __init__(self):
pass
def fit(self, X):
lens = (X ** 2).sum(axis=-1)
index = X / np.sqrt(lens)[:, np.newaxis]
self.index_ = np.ascontiguousarray(index, dtype=np.float32)
return self
def query(self, vector, topn):
"""Find indices of most similar vectors for a given query vector."""
# argmax_a dot(a, b) / |a||b| = argmin_a -dot(a, b)
dists = -np.dot(self.index_, vector)
indices = np.argpartition(dists, topn)[:topn]
return sorted(indices, key=lambda index: dists[index])
class KDTree:
def __init__(self, topn=10, n_jobs=-1):
self.topn = topn
self.n_jobs = n_jobs
def fit(self, X):
# cosine distance is proportional to normalized euclidean distance,
# thus we normalize the item vectors and use euclidean metric so
# we can use the more efficient kd-tree for nearest neighbor search
X_normed = normalize(X)
index = NearestNeighbors(
n_neighbors=self.topn, metric='euclidean', n_jobs=self.n_jobs)
index.fit(X_normed)
self.index_ = index
return self
def query_batch(self, X):
X_normed = normalize(X)
_, indices = self.index_.kneighbors(X_normed)
return indices
def query(self, vector):
vector_normed = normalize(vector.reshape(1, -1))
_, indices = self.index_.kneighbors(vector_normed)
return indices.ravel()
def get_ground_truth(X_train, X_test, kdtree_params):
"""
Compute the ground truth or so called golden standard, during
which we'll compute the time to build the index using the
training set, time to query the nearest neighbors for all
the data points in the test set. The ground_truth returned
will be of type list[(ndarray, ndarray)], where the first
ndarray will be the query vector, and the second ndarray will
be the corresponding nearest neighbors.
"""
start = time.time()
kdtree = KDTree(**kdtree_params)
kdtree.fit(X_train)
build_time = time.time() - start
start = time.time()
indices = kdtree.query_batch(X_test)
query_time = time.time() - start
ground_truth = [(vector, index) for vector, index in zip(X_test, indices)]
return build_time, query_time, ground_truth
# we'll compute the ground truth for the first time and
# store it on disk to prevent computing it over and over again
MODEL_DIR = 'model'
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
ground_truth_filename = 'ground_truth.pkl'
ground_truth_filepath = os.path.join(MODEL_DIR, ground_truth_filename)
print('ground truth filepath: ', ground_truth_filepath)
if os.path.exists(ground_truth_filepath):
ground_truth = load(ground_truth_filepath)
else:
# using a setting of kdtree_params = {'topn': 10, 'n_jobs': -1},
# it took at least 1 hour to finish on a 8 core machine
kdtree_params = {'topn': 10, 'n_jobs': -1}
build_time, query_time, ground_truth = get_ground_truth(X_train, X_test, kdtree_params)
print('build time: ', build_time)
print('query time: ', query_time)
dump(ground_truth, ground_truth_filepath)
ground_truth[0]
"""
Explanation: Benchmarking an approximate nearest neighbor method involves looking at how much faster it is compared to exact nearest neighbor methods and how much precision/recall are we losing for the speed that was gained. To measure this, we first need to use an exact nearest neighbor methods to see how long it takes and store the ground truth. e.g. if out exact nearest neighbor methods, thinks that for data point 1, its top 3 nearest neighbors excluding itself are [2, 4, 1], and our approximate nearest neighbor method returns [2, 1, 5], then our precision/recall depending on which way we're looking at it would be 66%, since 2 and 1 are both in the ground truth set whereas 5 is not.
End of explanation
"""
class Hnsw:
def __init__(self, space='cosinesimil', index_params=None,
query_params=None, print_progress=True):
self.space = space
self.index_params = index_params
self.query_params = query_params
self.print_progress = print_progress
def fit(self, X):
index_params = self.index_params
if index_params is None:
index_params = {'M': 16, 'post': 0, 'efConstruction': 400}
query_params = self.query_params
if query_params is None:
query_params = {'ef': 90}
# this is the actual nmslib part, hopefully the syntax should
# be pretty readable, the documentation also has a more verbiage
# introduction: https://nmslib.github.io/nmslib/quickstart.html
index = nmslib.init(space=self.space, method='hnsw')
index.addDataPointBatch(X)
index.createIndex(index_params, print_progress=self.print_progress)
index.setQueryTimeParams(query_params)
self.index_ = index
self.index_params_ = index_params
self.query_params_ = query_params
return self
def query(self, vector, topn):
# the knnQuery returns indices and corresponding distance
# we will throw the distance away for now
indices, _ = self.index_.knnQuery(vector, k=topn)
return indices
"""
Explanation: Benchmarking ANN Methods
The library that we'll be leveraging here is nmslib, specifically the algorithm HNSW (Hierarchical Navigable Small World), a graph-based approximate nearest neighborhood search method, we will only be using the library and will not be introducing the details of the algorithm in this notebook.
End of explanation
"""
index_params = {'M': 5, 'post': 0, 'efConstruction': 100}
start = time.time()
hnsw = Hnsw(index_params=index_params)
hnsw.fit(X_train)
build_time = time.time() - start
build_time
"""
Explanation: Like a lot of machine learning algorithms, there are hyperparameters that we can tune. We will pick a random one for now and look at the influence of each hyperparameters in later section.
End of explanation
"""
topn = 10
query_vector, correct_indices = ground_truth[0]
start = time.time()
# use the query_vector to find its corresponding
# approximate nearest neighbors
found_indices = hnsw.query(query_vector, topn)
query_time = time.time() - start
print('query time:', query_time)
print('correct indices: ', correct_indices)
print('found indices: ', found_indices)
# compute the proportion of data points that overlap between the
# two sets
precision = len(set(found_indices).intersection(correct_indices)) / topn
precision
def run_algo(X_train, X_test, topn, ground_truth, algo_type='hnsw', algo_params=None):
"""
We can extend this benchmark across multiple algorithm or algorithm's hyperparameter
by adding more algo_type options. The algo_params can be a dictionary that is passed
to the algorithm's __init__ method.
Here only 1 method is included.
"""
if algo_type == 'hnsw':
algo = Hnsw()
if algo_params is not None:
algo = Hnsw(**algo_params)
start = time.time()
algo.fit(X_train)
build_time = time.time() - start
total_correct = 0
total_query_time = 0.0
n_queries = len(ground_truth)
for i in trange(n_queries):
query_vector, correct_indices = ground_truth[i]
start = time.time()
found_indices = algo.query(query_vector, topn)
query_time = time.time() - start
total_query_time += query_time
n_correct = len(set(found_indices).intersection(correct_indices))
total_correct += n_correct
avg_query_time = total_query_time / n_queries
avg_precision = total_correct / (n_queries * topn)
return build_time, avg_query_time, avg_precision
"""
Explanation: we'll first use the first element from the ground truth to show-case what we'll be doing before scaling it to all the data points.
End of explanation
"""
# we will be running four combinations, higher/lower
# efConstruction/M parameters and comparing the performance
algo_type = 'hnsw'
algo_params = {
'index_params': {'M': 16, 'post': 0, 'efConstruction': 100}
}
build_time1, avg_query_time1, avg_precision1 = run_algo(
X_train, X_test, topn, ground_truth, algo_type, algo_params)
print('build time: ', build_time1)
print('average search time: ', avg_query_time1)
print('average precision: ', avg_precision1)
algo_params = {
'index_params': {'M': 16, 'post': 0, 'efConstruction': 400}
}
build_time2, avg_query_time2, avg_precision2 = run_algo(
X_train, X_test, topn, ground_truth, algo_type, algo_params)
print('build time: ', build_time2)
print('average search time: ', avg_query_time2)
print('average precision: ', avg_precision2)
algo_params = {
'index_params': {'M': 5, 'post': 0, 'efConstruction': 100}
}
build_time3, avg_query_time3, avg_precision3 = run_algo(
X_train, X_test, topn, ground_truth, algo_type, algo_params)
print('build time: ', build_time3)
print('average search time: ', avg_query_time3)
print('average precision: ', avg_precision3)
algo_params = {
'index_params': {'M': 5, 'post': 0, 'efConstruction': 400}
}
build_time4, avg_query_time4, avg_precision4 = run_algo(
X_train, X_test, topn, ground_truth, algo_type, algo_params)
print('build time: ', build_time4)
print('average search time: ', avg_query_time4)
print('average precision: ', avg_precision4)
"""
Explanation: The next few code chunks experiments with different parameters to see which one works better for this use-case.
Recommended by the author of package, the most influential parameters are M and efConstruction.
efConstruction: Increasing this value improves the quality of the constructed graph and leads to a higher search accuracy, at the cost of longer indexing time. The same idea applies to the ef or efSearch parameter that we can pass to query_params. Reasonable range for this parameter is 100-2000.
M: This parameter controls the maximum number of neighbors for each layer. Increasing the values of this parameters (to a certain degree) leads to better recall and shorter retrieval times (at the expense of longer indexing time). Reasonable range for this parameter is 5-100.
Other parameters include indexThreadQty (we can explicitly set the number of threads) and post. The post parameter controls the amount of post-processing done to the graph. 0, which means no post-processing. Additional options are 1 and 2 (2 means more post-processing).
End of explanation
"""
|
wzxiong/DAVIS-Machine-Learning
|
labs/lab3-soln.ipynb
|
mit
|
# %load ../standard_import.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from sklearn.model_selection import LeaveOneOut
from sklearn.linear_model import LinearRegression, lars_path, Lasso, LassoCV
%matplotlib inline
n=100
p=1000
X = np.random.randn(n,p)
X = scale(X)
sprob = 0.02
Sbool = np.random.rand(p) < sprob
s = np.sum(Sbool)
print("Number of non-zero's: {}".format(s))
mu = 100.
beta = np.zeros(p)
beta[Sbool] = mu * np.random.randn(s)
eps = np.random.randn(n)
y = X.dot(beta) + eps
larper = lars_path(X,y,method="lasso")
S = set(np.where(Sbool)[0])
for j in S:
_ = plt.plot(larper[0],larper[2][j,:],'r')
for j in set(range(p)) - S:
_ = plt.plot(larper[0],larper[2][j,:],'k',linewidth=.5)
_ = plt.title('Lasso path for simulated data')
_ = plt.xlabel('lambda')
_ = plt.ylabel('Coef')
"""
Explanation: The Lasso
Modified from the github repo: https://github.com/JWarmenhoven/ISLR-python which is based on the book by James et al. Intro to Statistical Learning.
End of explanation
"""
# In R, I exported the dataset from package 'ISLR' to a csv file.
df = pd.read_csv('../data/Hitters.csv', index_col=0).dropna()
df.index.name = 'Player'
df.info()
df.head()
dummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])
dummies.info()
print(dummies.head())
y = df.Salary
# Drop the column with the independent variable (Salary), and columns for which we created dummy variables
X_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')
# Define the feature set X.
X = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)
X.info()
X.head(5)
"""
Explanation: Hitters dataset
Let's load the dataset from the previous lab.
End of explanation
"""
loo = LeaveOneOut()
looiter = loo.split(X)
hitlasso = LassoCV(cv=looiter)
hitlasso.fit(X,y)
print("The selected lambda value is {:.2f}".format(hitlasso.alpha_))
"""
Explanation: Exercise Compare the previous methods to the Lasso on this dataset. Tune $\lambda$ and compare the LOO risk to other methods (ridge, forward selection, etc.)
The following is a fast implementation of the lasso path cross-validated using LOO.
End of explanation
"""
hitlasso.coef_
np.mean(hitlasso.mse_path_[hitlasso.alphas_ == hitlasso.alpha_])
"""
Explanation: The following is the fitted coefficient vector for this chosen lambda.
End of explanation
"""
bforw = [-0.21830515, 0.38154135, 0. , 0. , 0. ,
0.16139123, 0. , 0. , 0. , 0. ,
0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,
0. , 0. , -0.19429699, 0. ]
print(", ".join(X.columns[(hitlasso.coef_ != 0.) != (bforw != 0.)]))
"""
Explanation: The above is the MSE for the selected model. The best performance for ridge regression was roughly 120,000, so this does not outperform ridge. We can also compare this to the selected model from forward stagewise regression:
[-0.21830515, 0.38154135, 0. , 0. , 0. ,
0.16139123, 0. , 0. , 0. , 0. ,
0.09994524, 0.56696569, -0.16872682, 0.16924078, 0. ,
0. , 0. , -0.19429699, 0. ]
This is not exactly the same model with differences in the inclusion or exclusion of AtBat, HmRun, Runs, RBI, Years, CHmRun, Errors, League_N, Division_W, NewLeague_N
End of explanation
"""
|
ogaway/Econometrics
|
SimultaneousEquation.ipynb
|
gpl-3.0
|
%matplotlib inline
# -*- coding:utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.regression.gmm import IV2SLS
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# データ読み込み
data = pd.read_csv('example/k1001.csv')
# 式1説明変数設定
X1 = data[['P', 'E']].as_matrix().reshape(-1, 2)
X1 = sm.add_constant(X1)
# 式2説明変数設定
X2 = data[['P', 'A']].as_matrix().reshape(-1, 2)
X2 = sm.add_constant(X2)
# 被説明変数設定
Y = data[['Q']].as_matrix().reshape(-1)
# OLSの実行(Ordinary Least Squares: 最小二乗法)
model1 = sm.OLS(Y, X1)
model2 = sm.OLS(Y, X2)
result1 = model1.fit()
result2 = model2.fit()
print(result1.summary())
print(result2.summary())
"""
Explanation: 同時方程式体系
『Rによる計量経済学』第10章「同時方程式体系」をPythonで実行する。
テキスト付属データセット(「k1001.csv」等)については出版社サイトよりダウンロードしてください。
また、以下の説明は本書の一部を要約したものですので、より詳しい説明は本書を参照してください。
例題10.1
次のような供給関数と需要関数を推定する。
$Q_{t} = \alpha_{0} + \alpha_{1} P_{t} + \alpha_{2} E_{t} + u_{t}$
$Q_{t} = \beta_{0} + \beta_{1} P_{t} + \beta_{2} A_{t} + v_{t}$
ただし、$Q_{t}$ は数量、$P_{t}$ は価格、$E_{t}$ は供給関数シフト要因、$A_{t}$ は需要関数シフト要因とする。
End of explanation
"""
# 外生変数設定
inst = data[[ 'A', 'E']].as_matrix()
inst = sm.add_constant(inst)
# 2SLSの実行(Two Stage Least Squares: 二段階最小二乗法)
model1 = IV2SLS(Y, X1, inst)
model2 = IV2SLS(Y, X2, inst)
result1 = model1.fit()
result2 = model2.fit()
print(result1.summary())
print(result2.summary())
"""
Explanation: この結果から古典的最小二乗法による推定式をまとめると、
[供給関数]
$\hat Q_{i} = 4.8581 + 1.5094 P_{i} - 1.5202 E_{i} $
[需要関数]
$\hat Q_{i} = 16.6747 - 0.9088 P_{i} - 1.0369 A_{i}$
となる。
しかし、説明変数Pと誤差の間に関係があるため、同時方程式バイアスが生じてしまいます。
そこで、以下では同時方程式体系の推定法として代表的な二段階最小二乗法を用いて推定し直します。
End of explanation
"""
|
eaton-lab/toytree
|
sandbox/quartet-funcs.ipynb
|
bsd-3-clause
|
import toytree
import itertools
import numpy as np
"""
Explanation: toytree quartet functions (in progress)
End of explanation
"""
t0 = toytree.rtree.unittree(10, seed=0)
t1 = toytree.rtree.unittree(10, seed=1)
toytree.mtree([t0, t1]).draw(ts='p', height=200);
"""
Explanation: get two random trees
End of explanation
"""
t0.draw(
ts='p',
node_colors="lightgrey",
edge_widths=3,
edge_colors=t0.get_edge_values_mapped(
{11: 'red', 3: 'pink', 4: 'blue', 18: 'aqua', 12: 'black'},
),
);
"""
Explanation: Plan for counting quartets (Illustrated below)
We will traverse the tree visiting every node in turn. At each node we will select the edge above it (towards the root) to be the focal 'split'. Each split can represent many possible quartets, where at least one tip can be sampled from each of the four edges leading from the split. In the example below, we are visiting node 12, and the focal split is shown in black. The four edges leaving this split are shown in red, pink, blue, and aqua. To get all quartets from this split we must sample all possible combinations of one sample from each colored set.
End of explanation
"""
# focal node
nidx = 12
# get all tips as a set
fullset = set(i for i in t0.get_tip_labels())
# get tips from each child of a given node
down0 = set(t0.idx_dict[nidx].children[0].get_leaf_names())
down1 = set(t0.idx_dict[nidx].children[1].get_leaf_names())
up0 = set(t0.idx_dict[nidx].up.get_leaf_names()) - down0 - down1
up1 = fullset - down0 - down1 - up0
print(down0)
print(down1)
print(up0)
print(up1)
"""
Explanation: Example to sample tips from each quartet edge
End of explanation
"""
set(itertools.product(down0, down1, up0, up1))
"""
Explanation: Example to get all quartet sets from sampled tips
End of explanation
"""
def get_quartets(ttre):
# store all quartets in this SET
qset = set([])
# get a SET with all tips in the tree
fullset = set(ttre.get_tip_labels())
# get a SET of the descendants from each internal node
for node in ttre.idx_dict.values():
# skip leaf nodes
if not node.is_leaf():
children = set(node.get_leaf_names())
prod = itertools.product(
itertools.combinations(children, 2),
itertools.combinations(fullset - children, 2),
)
quartets = set([tuple(itertools.chain(*i)) for i in prod])
qset = qset.union(quartets)
# order tups in sets
sorted_set = set()
for qs in qset:
if np.argmin(qs) > 1:
tup = tuple(sorted(qs[2:]) + sorted(qs[:2]))
sorted_set.add(tup)
else:
tup = tuple(sorted(qs[:2]) + sorted(qs[2:]))
sorted_set.add(tup)
return sorted_set
get_quartets(t1)
"""
Explanation: Combine into a function
End of explanation
"""
q0 = get_quartets(t0)
q1 = get_quartets(t1)
# quartets that are in one tree but not the other
q0.symmetric_difference(q1)
"""
Explanation: Compare quartet sets
End of explanation
"""
|
sastels/Onboarding
|
6.5 - Baby names.ipynb
|
mit
|
import sys
import re
"""
Explanation: Baby names
End of explanation
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
return
def baby_names(file_list, summary=False):
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
baby_names(['data/babynames/baby1990.html'])
wordcount('topcount', 'data/wiki.txt')
baby_names(['data/babynames/baby1996.html'], summary=True)
baby_names(['data/babynames/baby2000.html', 'data/babynames/baby2002.html'])
"""
Explanation: Define the extract_names() function below and change baby_names()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
Suggested milestones for incremental development:
* Extract the year and print it
* Extract the names and rank numbers and just print them
* Get the names data into a dict and print it
* Build the [year, 'name rank', ... ] list and print it
* Fix baby_names() to use the extract_names list
End of explanation
"""
|
dhhagan/py-openaq
|
docs/tutorial/delhi.ipynb
|
mit
|
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import openaq
import warnings
warnings.simplefilter('ignore')
%matplotlib inline
# Set major seaborn asthetics
sns.set("notebook", style='ticks', font_scale=1.0)
# Increase the quality of inline plots
mpl.rcParams['figure.dpi']= 500
"""
Explanation: Evaluating Delhi's AQ Using OpenAQ
Most of my own atmospheric chemistry research as a PhD student at MIT is based in Delhi. Thus, for this tutorial, we will take a deeper look at the air quality data made available to us through OpenAQ. We will begin by figuring out exactly what data is available to us, and then further examine the most relevant and up-to-date sources. We will take a look at longer trends for some pollutants where possible.
End of explanation
"""
api = openaq.OpenAQ()
locations = api.locations(city='Delhi', df=True)
locations.location
"""
Explanation: Choosing Locations
First, let's figure out which locations we should use for our analysis. Let's grab all locations from Delhi for all parametrs:
End of explanation
"""
locations = locations.query("count > 100").query("lastUpdated >= '2017-03-01'")
locations.location
"""
Explanation: Let's go ahead and filter our results to only grab locations that have been updated in 2017 and have at least 100 data points.
End of explanation
"""
params = []
for i, r in locations.iterrows():
[params.append(x) for x in r.parameters if x not in params]
params
"""
Explanation: Now that we have several up-to-date locations in Delhi we can use, let's see what parameters we have to play with!
End of explanation
"""
|
QuantEcon/QuantEcon.notebooks
|
ddp_ex_career_py.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
import quantecon as qe
from quantecon.markov import DiscreteDP
# matplotlib settings
plt.rcParams['axes.xmargin'] = 0
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['patch.force_edgecolor'] = True
from cycler import cycler
plt.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')
"""
Explanation: DiscreteDP Example: Modeling Career Choice
Daisuke Oyama
Faculty of Economics, University of Tokyo
We use DiscreteDP to solve the carrer-job model considered in
http://quant-econ.net/py/career.html.
End of explanation
"""
# Number of possible realizations for both theta and epsilon
N = 50
# Upper bound for both theta and epsilon
B = 5
theta = np.linspace(0, B, N) # set of theta values
epsilon = np.linspace(0, B, N) # set of epsilon values
# Set of indices for theta-epsilon pairs
s_indices2d = qe.cartesian([np.arange(N), np.arange(N)])
"""
Explanation: Setup
Construct the state space:
End of explanation
"""
print(s_indices2d)
# Number of states
n = N * N
"""
Explanation: States are ordered as follows:
End of explanation
"""
F_a, F_b = 1, 1
F_probs = qe.distributions.BetaBinomial(N-1, F_a, F_b).pdf()
F_mean = np.sum(theta * F_probs)
G_a, G_b = 1, 1
G_probs = qe.distributions.BetaBinomial(N-1, G_a, G_b).pdf()
G_mean = np.sum(epsilon * G_probs)
"""
Explanation: Distrubtions of theta and epsilon:
End of explanation
"""
# Number of actions; 0: stay put, 1: new job, 2: new life
m = 3
# Reward and transition probability arrays
R = np.empty((n, m))
Q = np.zeros((n, m, n))
# Stay put
R[:, 0] = theta[s_indices2d[:, 0]] + epsilon[s_indices2d[:, 1]]
Q[np.arange(n), 0, np.arange(n)] = 1
# New job
R[:, 1] = theta[s_indices2d[:, 0]] + G_mean
for i in range(N):
Q[i*N:(i+1)*N, 1, i*N:(i+1)*N] = G_probs
# New life
R[:, 2] = F_mean + G_mean
Q[:, 2] = F_probs.reshape(N, 1).dot(G_probs.reshape(1, N)).ravel()
"""
Explanation: Construct the reward array R and the transition probability array Q:
End of explanation
"""
beta = 0.95
"""
Explanation: Discount factor:
End of explanation
"""
ddp = DiscreteDP(R, Q, beta)
"""
Explanation: Create a DiscreteDP instance:
End of explanation
"""
res = ddp.solve()
"""
Explanation: Solving the model
Solve the Markov decision problem:
End of explanation
"""
res.num_iter
"""
Explanation: Number of iterations:
End of explanation
"""
v_2d = res.v.reshape(N, N)
sigma_2d = res.sigma.reshape(N, N)
"""
Explanation: The returned value function res.v and res.sigma are 1-dimenstional arrays.
To convert them to 2-dimensional arrays:
End of explanation
"""
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
tg, eg = np.meshgrid(theta, epsilon)
ax.plot_surface(tg,
eg,
v_2d.T,
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.5,
linewidth=0.25)
ax.set_zlim(150, 200)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.set_zlabel('value', fontsize=14)
ax.view_init(ax.elev, 225)
ax.set_title(r'Optimal value function: $\beta = {0}$'
.format(ddp.beta), y=1.05)
plt.show()
"""
Explanation: Plot the optimal value function:
End of explanation
"""
fig, ax = plt.subplots(figsize=(6, 6))
tg, eg = np.meshgrid(theta, epsilon)
lvls=(-0.5, 0.5, 1.5, 2.5)
ax.contourf(tg, eg, sigma_2d.T, levels=lvls, cmap=cm.winter, alpha=0.5)
ax.contour(tg, eg, sigma_2d.T, colors='k', levels=lvls, linewidths=2)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.text(1.8, 2.5, 'new life', fontsize=14)
ax.text(4.5, 2.5, 'new job', fontsize=14, rotation='vertical')
ax.text(4.0, 4.5, 'stay put', fontsize=14)
ax.set_title(r'Optimal policy function: $\beta = {0}$'.format(ddp.beta))
plt.show()
"""
Explanation: Plot the optimal policy function:
End of explanation
"""
ts_length = 20
seed = 3210 # for replication
# seed = None # to randomly initialize the random number generator
X = res.mc.simulate(ts_length=ts_length, num_reps=2, random_state=seed)
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
for x, ax in zip(X, axes):
theta_path, epsilon_path = theta[s_indices2d[x, 0]], epsilon[s_indices2d[x, 1]]
ax.plot(epsilon_path, label='epsilon')
ax.plot(theta_path, label='theta')
ax.legend(loc='lower right')
ax.set_ylim(0, B)
axes[0].set_title(r'Sample paths: $\beta = {0}$'.format(ddp.beta))
plt.show()
"""
Explanation: Simulate the controlled Markov chain:
End of explanation
"""
M = 25000 # Number of samples
ts_length = 100
seed = 42
X = res.mc.simulate(ts_length=ts_length, init=0, num_reps=M, random_state=seed)
T_stars = (res.sigma[X] != 0).sum(axis=1)
"""
Explanation: Generate sample paths and compute the first passage times for the stay-put region:
End of explanation
"""
all(T_stars < ts_length)
fig, ax = plt.subplots(figsize=(8, 5))
hist = np.histogram(T_stars, bins=T_stars.max(), range=(0, T_stars.max()))
ax.bar(np.arange(T_stars.max()), hist[0], align='center')
ax.set_xlim(0, ts_length)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title(r'First passage time: $\beta = {0}$'.format(ddp.beta))
plt.show()
np.mean(T_stars)
np.median(T_stars)
"""
Explanation: Check that the state enters the regions before ts_length:
End of explanation
"""
ddp.beta = 0.99
res99 = ddp.solve()
res99.num_iter
v99_2d = res99.v.reshape(N, N)
sigma99_2d = res99.sigma.reshape(N, N)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
tg, eg = np.meshgrid(theta, epsilon)
ax.plot_surface(tg,
eg,
v99_2d.T,
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.5,
linewidth=0.25)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.set_zlabel('value', fontsize=14)
ax.view_init(ax.elev, 225)
ax.set_title(r'Optimal value function: $\beta = {0}$'
.format(ddp.beta), y=1.05)
plt.show()
fig, ax = plt.subplots(figsize=(6, 6))
tg, eg = np.meshgrid(theta, epsilon)
lvls=(-0.5, 0.5, 1.5, 2.5)
ax.contourf(tg, eg, sigma99_2d.T, levels=lvls, cmap=cm.winter, alpha=0.5)
ax.contour(tg, eg, sigma99_2d.T, colors='k', levels=lvls, linewidths=2)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.text(1.8, 2.5, 'new life', fontsize=14)
ax.text(4.6, 2.5, 'new job', fontsize=14, rotation='vertical')
ax.text(4.5, 4.7, 'stay put', fontsize=14)
ax.set_title(r'Optimal policy function: $\beta = {0}$'.format(ddp.beta))
plt.show()
ts_length = 20
seed = 3210 # for replication
# seed = None # to randomly initialize the random number generator
X = res99.mc.simulate(ts_length=ts_length, num_reps=2, random_state=seed)
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
for x, ax in zip(X, axes):
theta_path, epsilon_path = theta[s_indices2d[x, 0]], epsilon[s_indices2d[x, 1]]
ax.plot(epsilon_path, label='epsilon')
ax.plot(theta_path, label='theta')
ax.legend(loc='lower right')
ax.set_ylim(0, B)
axes[0].set_title(r'Sample paths: $\beta = {0}$'.format(ddp.beta))
plt.show()
M = 25000 # Number of samples
ts_length = 120
seed = 42 # for replication
# seed = None # to randomly initialize the random number generator
x = res99.mc.simulate(ts_length=ts_length, init=0, num_reps=M, random_state=seed)
T_stars = (res99.sigma[x] != 0).sum(axis=1)
all(T_stars < ts_length)
fig, ax = plt.subplots(figsize=(8, 5))
hist = np.histogram(T_stars, bins=T_stars.max(), range=(0, T_stars.max()))
ax.bar(np.arange(T_stars.max()), hist[0], align='center')
ax.set_xlim(0, ts_length)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title(r'First passage time: $\beta = {0}$'.format(ddp.beta))
plt.show()
np.mean(T_stars)
np.median(T_stars)
"""
Explanation: Increased discount factor
Repeat the above exercises with $\beta = 0.99$.
End of explanation
"""
class CareerWorkerProblemDiscreteDP():
"""
Class to solve the career-job choice model.
Parameters
----------
See `CareerWorkerProblem`.
"""
def __init__(self, B=5.0, beta=0.95, N=50, F_a=1, F_b=1, G_a=1, G_b=1):
self.beta, self.N, self.B = beta, N, B
self.theta = np.linspace(0, B, N) # set of theta values
self.epsilon = np.linspace(0, B, N) # set of epsilon values
self.F_probs = qe.distributions.BetaBinomial(N-1, F_a, F_b).pdf()
self.G_probs = qe.distributions.BetaBinomial(N-1, G_a, G_b).pdf()
self.F_mean = np.sum(self.theta * self.F_probs)
self.G_mean = np.sum(self.epsilon * self.G_probs)
self.s_indices2d = qe.cartesian((np.arange(N), np.arange(N)))
self.s_values2d = qe.cartesian((self.theta, self.epsilon))
n = N * N # Number of states
m = 3 # Number of actions; 0: stay put, 1: new job, 2: new life
# Reward and transition probability arrays
R = np.empty((n, m))
Q = np.zeros((n, m, n))
# Stay put
R[:, 0] = self.s_values2d.sum(axis=-1)
Q[np.arange(n), 0, np.arange(n)] = 1
# New job
R[:, 1] = self.s_values2d[:, 0] + self.G_mean
for i in range(N):
Q[i*N:(i+1)*N, 1, i*N:(i+1)*N] = self.G_probs
# New life
R[:, 2] = self.F_mean + self.G_mean
Q[:, 2] = self.F_probs.reshape(N, 1).dot(self.G_probs.reshape(1, N)).ravel()
self.ddp = DiscreteDP(R, Q, self.beta)
self._mc = None
self.num_iter = None
@property
def mc(self):
if self._mc is None:
self.solve()
return self._mc
def solve(self, *args, **kwargs):
"""
Solve the model.
"""
res = self.ddp.solve(*args, **kwargs)
v = res.v.reshape(self.N, self.N)
sigma = res.sigma.reshape(self.N, self.N)
self._mc = res.mc
self.num_iter = res.num_iter
return v, sigma
def simulate(self, ts_length, init=None, num_reps=None, random_state=None,
ret='state_value'):
"""
Simulate the controlled Markov chain.
"""
if init is not None:
init = init[0]*self.N + init[1]
X = self.mc.simulate(ts_length, init, num_reps, random_state)
if ret == 'state_index':
paths_index = self.s_indices2d[X]
return paths_index
elif ret == 'state_value':
paths_value = self.s_values2d[X]
return paths_value
else:
raise ValueError()
"""
Explanation: Wrapping the procedure in a class
End of explanation
"""
G_a, G_b = 100, 100
wp = CareerWorkerProblemDiscreteDP(G_a=G_a, G_b=G_b)
v, sigma = wp.solve()
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
tg, eg = np.meshgrid(wp.theta, wp.epsilon)
ax.plot_surface(tg,
eg,
v.T,
rstride=2, cstride=2,
cmap=cm.jet,
alpha=0.5,
linewidth=0.25)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.set_zlabel('value', fontsize=14)
ax.view_init(ax.elev, 225)
ax.set_title(r'Optimal value function: '
r'$G_a = {G_a}$, $G_b = {G_b}$, $\beta = {beta}$'
.format(G_a=G_a, G_b=G_b, beta=wp.beta), y=1.05)
plt.show()
fig, ax = plt.subplots(figsize=(6, 6))
tg, eg = np.meshgrid(wp.theta, wp.epsilon)
lvls=(-0.5, 0.5, 1.5, 2.5)
ax.contourf(tg, eg, sigma.T, levels=lvls, cmap=cm.winter, alpha=0.5)
ax.contour(tg, eg, sigma.T, colors='k', levels=lvls, linewidths=2)
ax.set_xlabel('theta', fontsize=14)
ax.set_ylabel('epsilon', fontsize=14)
ax.text(1.8, 2.5, 'new life', fontsize=14)
ax.text(4.5, 2.5, 'new job', fontsize=14, rotation='vertical')
ax.text(4.0, 4.5, 'stay put', fontsize=14)
ax.set_title(r'Optimal policy function: '
r'$G_a = {G_a}$, $G_b = {G_b}$, $\beta = {beta}$'
.format(G_a=G_a, G_b=G_b, beta=wp.beta))
plt.show()
ts_length = 20
seed = 3210 # for replication
# seed = None # to randomly initialize the random number generator
paths = wp.simulate(ts_length=ts_length, num_reps=2, random_state=seed)
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
for path, ax in zip(paths, axes):
theta_path, epsilon_path = path[:, 0], path[:, 1]
ax.plot(epsilon_path, label='epsilon')
ax.plot(theta_path, label='theta')
ax.legend(loc='lower right')
ax.set_ylim(0, wp.B)
axes[0].set_title(r'Sample paths: '
r'$G_a = {G_a}$, $G_b = {G_b}$, $\beta = {beta}$'
.format(G_a=G_a, G_b=G_b, beta=wp.beta))
plt.show()
M = 25000 # Number of samples
ts_length = 100
seed = 42 # for replication
# seed = None # to randomly initialize the random number generator
X = wp.simulate(ts_length=ts_length, init=(0, 0), num_reps=M, random_state=seed,
ret='state_index')
T_stars = (sigma[X[..., 0], X[..., 1]] != 0).sum(axis=1)
all(T_stars < ts_length)
fig, ax = plt.subplots(figsize=(8, 5))
hist = np.histogram(T_stars, bins=T_stars.max(), range=(0, T_stars.max()))
ax.bar(np.arange(T_stars.max()), hist[0], align='center')
ax.set_xlim(0, ts_length)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title(r'First passage time: '
r'$G_a = {G_a}$, $G_b = {G_b}$, $\beta = {beta}$'
.format(G_a=G_a, G_b=G_b, beta=wp.beta))
plt.show()
np.mean(T_stars)
np.median(T_stars)
"""
Explanation: Different set of parameter values
Let G_a = G_b = 100:
End of explanation
"""
# Download the module file and then import it
module = "career"
folder = "career"
object_name = "CareerWorkerProblem"
file = module + ".py"
repo = "https://github.com/QuantEcon/QuantEcon.lectures.code"
qe.util.fetch_nb_dependencies(files=[file], repo=repo, folder=folder)
exec("from {0} import {1}".format(module, object_name))
"""
Explanation: Comparison with CareerWorkerProblem
Let us compare the above CareerWorkerProblemDiscreteDP with the original CareerWorkerProblem:
We need to download the script file career.py from the career directory
in the QuantEcon.lectures.code repo.
End of explanation
"""
wp_orig = CareerWorkerProblem()
wp_ddp = CareerWorkerProblemDiscreteDP()
"""
Explanation: We are now ready to use CareerWorkerProblem.
End of explanation
"""
v_init = np.ones((wp_orig.N, wp_orig.N))*100
v_orig = qe.compute_fixed_point(wp_orig.bellman_operator, v_init, print_skip=25)
sigma_orig = wp_orig.get_greedy(v_orig)
v_init_1d = np.ones(wp_orig.N*wp_orig.N)*100
error_tol = 1e-3
epsilon = error_tol * (2*wp_orig.beta) / (1-wp_orig.beta)
v_ddp, sigma_ddp = \
wp_ddp.solve(method='vi', v_init=v_init_1d, epsilon=epsilon, max_iter=50)
np.abs(v_orig - v_ddp).max()
np.array_equal(sigma_orig-1, sigma_ddp)
"""
Explanation: Solve the model by value iteration with max_iter = 50,
which is the default value in qe.compute_fixed_point:
End of explanation
"""
wp_ddp.num_iter
"""
Explanation: max_iter = 50 is binding:
End of explanation
"""
v_ddp_pi, sigma_ddp_pi = \
wp_ddp.solve(method='pi', v_init=v_init_1d)
np.abs(v_orig - v_ddp_pi).max()
np.array_equal(sigma_orig-1, sigma_ddp_pi)
(sigma_orig-1 != sigma_ddp_pi).sum()
np.where(sigma_orig-1 != sigma_ddp_pi)
"""
Explanation: Solve by policy iteration:
End of explanation
"""
v_ddp2, sigma_ddp2 = \
wp_ddp.solve(method='vi', v_init=v_init_1d, epsilon=epsilon, max_iter=200)
wp_ddp.num_iter
v_init = np.ones((wp_orig.N, wp_orig.N))*100
v_orig2 = qe.compute_fixed_point(wp_orig.bellman_operator, v_init, print_skip=25,
max_iter=200)
sigma_orig2 = wp_orig.get_greedy(v_orig2)
np.array_equal(sigma_orig2-1, sigma_ddp_pi)
"""
Explanation: Increse max_iter:
End of explanation
"""
beta = 0.99
wp_orig99 = CareerWorkerProblem(beta=beta)
wp_ddp99 = CareerWorkerProblemDiscreteDP(beta=beta)
v_init = np.ones((wp_orig99.N, wp_orig99.N))*100
v_orig99 = qe.compute_fixed_point(wp_orig99.bellman_operator, v_init, max_iter=2000, print_skip=100)
sigma_orig99 = wp_orig99.get_greedy(v_orig99)
v_init_1d = np.ones(wp_orig.N*wp_orig.N)*100
error_tol = 1e-3
epsilon = error_tol * (2*wp_orig.beta) / (1-wp_orig.beta)
v_ddp99, sigma_ddp99 = \
wp_ddp99.solve(method='vi', v_init=v_init_1d, epsilon=epsilon, max_iter=2000)
np.array_equal(sigma_orig99-1, sigma_ddp99)
v_ddp99_pi, sigma_ddp99_pi = \
wp_ddp99.solve(method='pi', v_init=v_init_1d)
np.array_equal(sigma_orig99-1, sigma_ddp99_pi)
(sigma_orig99-1 != sigma_ddp99_pi).sum()
"""
Explanation: Consider the case of $\beta = 0.99$.
End of explanation
"""
F = qe.DiscreteRV(wp_orig99.F_probs)
G = qe.DiscreteRV(wp_orig99.G_probs)
def gen_first_passage_time(sigma):
t = 0
i = j = 0
while 1:
if sigma[i, j] == 1: # Stay put
return t
elif sigma[i, j] == 2: # New job
j = int(G.draw())
else: # New life
i, j = int(F.draw()), int(G.draw())
t += 1
M = 25000 # Number of samples
samples = np.empty(M)
for i in range(M):
samples[i] = gen_first_passage_time(sigma=sigma_orig99)
print(np.median(samples))
"""
Explanation: Compute the median first passage time as in the lecture:
End of explanation
"""
M = 25000 # Number of samples
ts_length = 120
seed = 42 # for replication
# seed = None # to randomly initialize the random number generator
X = wp_ddp99.simulate(ts_length=ts_length, init=(0, 0), num_reps=M, random_state=seed,
ret='state_index')
T_stars = (sigma_ddp99[X[..., 0], X[..., 1]] != 0).sum(axis=1)
np.median(T_stars)
"""
Explanation: And by the simulate method of our CareerWorkerProblemDiscreteDP class:
End of explanation
"""
|
flowersteam/explauto
|
notebook/goal_babbling_direct_optimization.ipynb
|
gpl-3.0
|
from explauto import Environment
environment = Environment.from_configuration('simple_arm', 'mid_dimensional')
environment.noise = 0.01
print "Motor bounds", environment.conf.m_bounds
print "Sensory bounds", environment.conf.s_bounds
"""
Explanation: Goal Babbling with direct optimization
In our previous implementations of goal babbling, when a goal is chosen, the robotic agent performs only one movement in order to reach this goal. This movement, however could be found through optimization of a fast surrogate forward model (or simulations of movements in the head of the robot), e.g. with the Covariance Matrix Adaptation Evolutionary Strategy (CMAES) optimization method, and the Locally Weighted Linear Regression (LWLR) surrogate forward model (the combination of which we call "CMAES-LWLR").
We've shown how to define and use sensorimotor models in this notebook, and summarized the different available sensorimotor models here.
In this tutorial, however, we explain how an agent can explore a sensorimotor environment with multiple exploration steps for one particular goal. A surrogate inverse model is used to find a good motor command to reach the new goal and serves to bootstrap an optimization method that directly executes motor commands and receives feedback from the environment, with a budget of robot experiments for each new goal.
We will use CMAES as the direct optimization method to reach the current goal, and the nearest neighbor (NN) algorithm as a fast surrogate inverse model. We test this idea on a simple 2D simulated 7-DOF robotic arm.
Let's first define the robotic environment:
End of explanation
"""
from explauto import SensorimotorModel
model = SensorimotorModel.from_configuration(environment.conf, 'nearest_neighbor', 'default')
model.mode = "exploit" # We don't want the sensorimotor model to add exploration noise
"""
Explanation: The arm has 7 joints and has total lenght 1. The motor bounds here do not allow joints to rotate all around but only in the range [-60°, 60°], or [-1.05, 1.05rad]. The position of the end-effector will be the variables to optimize, they fairly cover the intervals X in [-0.5, 1] and Y in [-1, 1].
Now, we define the surrogate model using exact nearest neighbor lookup:
End of explanation
"""
%pylab inline
ax = axes()
plt.axis('equal')
for m in environment.random_motors(n=100):
s = environment.compute_sensori_effect(m)
environment.plot_arm(ax, m, alpha=0.2)
model.update(m, s)
"""
Explanation: Let's have a look at what could be random motor configurations, and at the same time we bootstrap the surrogate model with 100 points:
End of explanation
"""
from explauto.interest_model.random import RandomInterest
im_model = RandomInterest(environment.conf, environment.conf.s_dims)
"""
Explanation: We can see that most random motor commands explore the border of the reachable space on the bottom, right and top regions.
Now we define the interest model, that will choose goals. We use random goals in this tutorial but any interest model can be used instead:
End of explanation
"""
import numpy as np
from explauto.sensorimotor_model.inverse.cma import fmin as cma_fmin
%pylab inline
ax = axes()
plt.axis('equal')
n_goals = 50 # Number of goals
cma_maxfevals = 50 # Maximum error function evaluations by CMAES (actually CMAES will slightly overshoot it)
cma_sigma0 = 0.2 # Standard deviation in initial covariance matrix
for i in range(n_goals):
s_g = im_model.sample() # Sample a random goal
m0 = model.inverse_prediction(s_g) # Find the nearest neighbor of s_g and output the corresponding m
def error_f(m_): # Error function corresponding to the new goal s_g.
environment.plot_arm(ax, m_, alpha=0.1)
s_ = environment.compute_sensori_effect(m_) # Execute a motor command
model.update(m_, s_) # Update the surrogate model
return np.linalg.norm(s_ - s_g) # Output the distance between the reached point s_ and the goal s_g
# Call CMAES with the error function for the new goal and use m0 to bootstrap exploration
m = cma_fmin(error_f, m0, cma_sigma0, options={'bounds':[environment.conf.m_mins, environment.conf.m_maxs],
'verb_log':0, # don't flood my output...
'verb_disp':False, # ...seriously
'maxfevals':cma_maxfevals})[0]
s = environment.compute_sensori_effect(m) # Execute best motor command found by CMAES (optional)
model.update(m, s) # Update the surrogate model
plt.plot(s_g[0], s_g[1], "or", alpha=0.8) # Plot goal in red
plt.plot(s[0], s[1], "ob", alpha=0.8) # Plot reached point in blue
print "Goal:", s_g, "Reaching Error:", np.linalg.norm(s_g - s)
"""
Explanation: We have all the building blocks to do goal babbling with direct optimization:
End of explanation
"""
%pylab inline
ax = axes()
plt.axis('equal')
print "# Points in database:", len(model.model.imodel.fmodel.dataset)
for i in range(len(model.model.imodel.fmodel.dataset)):
m = model.model.imodel.fmodel.dataset.get_x(i)
environment.plot_arm(ax, m, alpha=0.01)
"""
Explanation: The red points are the goals, and the blue points are the corresponding best reached point.
For each of the 50 goals, CMAES was told to explore 50 points (but actually explored 50 points) starting with the nearest reached point in the past.
Let's only print all the reached points (the 100 motor babbling points + the 50*(55 CMAES + 1 test) = 2900 points):
End of explanation
"""
%pylab inline
ax = axes()
plt.axis('equal')
for m in environment.random_motors(n=len(model.model.imodel.fmodel.dataset)):
environment.plot_arm(ax, m, alpha=0.01)
"""
Explanation: Now we can compare the area explored with Goal Babbling and direct CMAES optimization, versus the area reached only with random motor commands:
End of explanation
"""
|
uliang/First-steps-with-the-Python-language
|
Day 1 - Unit 2.3 Data Manipulations.ipynb
|
mit
|
import numpy as np
import pandas as pd
"""
Explanation: 2.3 Data Manipulations
Content:
- 2.3.1 Groupby: split-apply-combine
- 2.3.2 Merging dataframes
- 2.3.3 Melting dataframes (wide-form to long-form)
- 2.3.4 Exercises
Import libraries
End of explanation
"""
user = pd.read_csv('http://files.grouplens.org/datasets/movielens/ml-100k/u.user',sep='|',header=None)
user.columns = ['user_id','age','gender','occupation','zip_code']
user.tail()
# Any missing values?
user.isnull().sum()
# How many unique occupations are there in the data set?
user['occupation'].unique()
# The number of unique values.
user['occupation'].nunique()
# How many users for each occupation?
user['occupation'].value_counts()
"""
Explanation: In this Unit 2.3, we will be using the MovieLens datasets: http://files.grouplens.org/datasets/movielens/ml-100k/README
Citation: F. Maxwell Harper and Joseph A. Konstan. 2015. The MovieLens Datasets:
History and Context. ACM Transactions on Interactive Intelligent
Systems (TiiS) 5, 4, Article 19 (December 2015), 19 pages.
DOI=http://dx.doi.org/10.1145/2827872
2.3.1 Groupby: split-apply-combine
Import data set from url: http://files.grouplens.org/datasets/movielens/ml-100k/u.user
Description: Demographic information about MovieLens users. This is a tab separated list of user_id | age | gender | occupation | zip_code
End of explanation
"""
# How many users for each occupation? Use of groupby method:
user.groupby('occupation').count()
# What is the average age for all users?
user['age'].mean()
# What is the average age for each occupation? Sort by descending order.
user.groupby('occupation')['age'].mean().sort_values(ascending=False)
# Get all summarized statistics about the age for each occupation.
user.groupby('occupation')['age'].describe()
# Apply customised function
# Example: Find the range of age for each occupation
user.groupby('occupation')['age'].apply(lambda x: x.max() - x.min())
# Calculate the mean age for each combination of occupation and gender.
user.groupby(['occupation','gender'])['age'].mean()
# Output is a multi-index series
# Selecting data in a multi-index series
user1 = user.groupby(['occupation','gender'])['age'].mean()
user1['student']['M']
# Unstacking the multi-index series
user.groupby(['occupation','gender'])['age'].mean().unstack()
# Unstacking multi-index series will give a data frame
user2 = user.groupby(['occupation','gender'])['age'].mean().unstack()
user2.loc['student', 'M']
"""
Explanation: Groupby method allows us to group rows of data together and call aggregate functions.
End of explanation
"""
# How many male and female users?
user.groupby('gender')['age'].count()
# What are the percentages of male and female users?
100*user.groupby('gender')['age'].count()/len(user)
# What are the percentages of male users for each occupation?
# First, create a new column to convert string to numbers/boolean
user['gen'] = user['gender'].map({'M': 1, 'F': 0})
user.head()
user.groupby(['occupation'])['gen'].mean()*100
"""
Explanation: Analysing categorical variables.
End of explanation
"""
# Find the percentages of male and female users for each occupation.
'''users = 100*user.groupby(['occupation','gender'])['gender'].count()/user.groupby(['occupation'])['gender'].count()
users.unstack()'''
table = pd.pivot_table(user, values="gen", index="occupation", columns="gender",
aggfunc="count", margins_name="Total", margins=True)
(table.div(table.Total, axis="rows")*100).applymap(lambda x: round(x,2))
"""
Explanation: Using pivot_table to create table showing the percentages of male and female users for each occupation
End of explanation
"""
# Write a function to return a new column with 'Region'
def get_region(x):
x = pd.to_numeric(x, errors='coerce')
if x <= 1:
return 'North'
elif x <= 3:
return 'South'
elif x <= 5:
return 'East'
elif x <= 7:
return 'West'
elif x <= 9:
return 'Central'
else:
return None
user['zip'] = user['zip_code'].str[0]
user['Region'] = user['zip'].apply(get_region)
user.head()
user.groupby(['Region','gender'])['age'].count().unstack()
def get_age_group(x):
if x < 25:
return 'Young Adults'
elif x <= 60:
return 'Adults'
else:
return 'Senior Citizens'
user['age_group'] = user['age'].apply(get_age_group)
user.head()
#user.groupby(['age_group','gender'])['age'].describe().columns
"""
Explanation: The apply method applies a callable to be applied to each column/entry of a dataframe/series.
Suppose that the zip_code can be classified as follows:
First digit of zip_code $\Rightarrow$ Region
| Digit | Region|
|:------:|:----:|
| 0, 1| North|
|2, 3| South|
|4, 5| East|
|6, 7| West|
|8, 9| Central|
End of explanation
"""
data = pd.read_csv('http://files.grouplens.org/datasets/movielens/ml-100k/u.data',sep='\t',header=None)
data.columns = ['user_id','movie_id','rating','timestamp']
data.tail()
"""
Explanation: 2.3.2 Merging dataframes
Import data from url: http://files.grouplens.org/datasets/movielens/ml-100k/u.data
This is the data set of 100,000 ratings by 943 users on 1682 movies. Each user has rated at least 20 movies.
The data is randomly ordered. This is a tab separated list of user_id | movie_id | rating | timestamp.
End of explanation
"""
movie = pd.read_csv('http://files.grouplens.org/datasets/movielens/ml-100k/u.item',sep='|',header=None,encoding='latin-1')
movie = movie.loc[:,0:2]
movie.columns = ['movie_id','movie_title','release_date']
movie.tail()
"""
Explanation: Import data from url: http://files.grouplens.org/datasets/movielens/ml-100k/u.item
This dataset contains information about the movies. There are 24 columns but we will be using the first 3 columns only: movie_id | movie_title | release date
End of explanation
"""
df = pd.merge(data,movie,on='movie_id')
df.tail()
"""
Explanation: Merge two dataframes based on 'movie_id'.
End of explanation
"""
df1 = df.groupby('movie_title')['rating'].agg([np.mean,np.size])
df1.head()
"""
Explanation: Find the average and the number of ratings for all movies.
End of explanation
"""
df1.sort_values(by='size',ascending=False)[:101]["mean"].idxmax()
"""
Explanation: Challenge! Out of the top-100 most rated movies, which movie has the highest average rating?
Challenge!! Can you get the answer in one line of code?
End of explanation
"""
quiz = pd.read_excel('Quiz.xlsx')
quiz.head()
# This data frame is in wide format
# Melt the wide-form df to long-form
quizlong = pd.melt(quiz,id_vars=['ID'],value_vars=['Quiz1','Quiz2','Quiz3','Quiz4','Quiz5'],var_name='Quiz',value_name='Score')
quizlong.head()
# Shape of the long-form df
quizlong.shape
# Change long-form to wide-form by using pivot
quizwide = quizlong.pivot(index='ID',columns='Quiz',values='Score')
quizwide.reset_index().head()
# movie id | movie title | release date | video release date |
# IMDb URL | unknown | Action | Adventure | Animation |
# Children's | Comedy | Crime | Documentary | Drama | Fantasy |
# Film-Noir | Horror | Musical | Mystery | Romance | Sci-Fi |
# Thriller | War | Western |
#mv = pd.read_csv('http://files.grouplens.org/datasets/movielens/ml-100k/u.item',sep='|',header=None,encoding='latin-1')
#mv.tail()
#mv.columns= ['movie id' , 'movie title' , 'release date', 'video release date', 'IMDB URL' , 'Unknown', 'Action',
# 'Adventure' , 'Animation', "Children" , 'Comedy' , 'Crime' , 'Documentary' , 'Drama' , 'Fantasy' ,
# 'Film-Noir' , 'Horror' , 'Musical' , 'Mystery' , 'Romance' , 'Sci-Fi' , 'Thriller' , 'War' , 'Western']
#mv.head()
#mvlong = pd.melt(mv, id_vars=['movie id','movie title','release date','video release date','IMDB URL'],
# value_vars=['Unknown', 'Action', 'Adventure' , 'Animation', "Children" , 'Comedy' , 'Crime' ,
# 'Documentary' , 'Drama' , 'Fantasy' , 'Film-Noir' , 'Horror' , 'Musical' , 'Mystery' ,
# 'Romance' , 'Sci-Fi' , 'Thriller' , 'War' , 'Western'],
# var_name='Genre', value_name='Value')
#mvlong.head(10)
#mvlong.shape
#mv[mv['movie title'] == 'Star Wars (1977)']
#mvlong[mvlong['movie title'] == 'Star Wars (1977)']
# Which is the most popular movie genre?
# mvlong.groupby('Genre')['Value'].sum()
"""
Explanation: 2.3.3 Wide-form vs Long-form Dataframe
End of explanation
"""
df = pd.read_excel('Marks.xlsx')
df.head()
# Insert a new column Total = 0.25*ICA + 0.25*MST + 0.5* Exam
df['Total'] = 0.25*df['ICA']+0.25*df['MST']+0.5*df['Exam']
df.sample(5)
df['Course'] = df['Student_Class'].str.split('/').str[0]
df.sample(5)
df['Year'] = df['Student_Class'].str.split('/').str[2].str[0]
df.sample(5)
school = pd.DataFrame({'Course':['DARE','DASE','DCHE','DAPC','DCPE','DEEE','DME','DMRO','DBEN'],
'School':['MAE','EEE','CLS','CLS','EEE','EEE','MAE','MAE','MAE']})
school
# Merge the two data frames to match Course to School
df1 = pd.merge(df,school,on='Course')
df1.sample(5)
# Basic descriptive stats
df1.describe()
# Basic descriptive stats groupby School & Course
df1.groupby(['School','Course'])['Total'].describe()
# Customising aggregated values
def rates(s, threshold):
return np.sum(s>= threshold)/s.size*100
def PassRate(s):
return rates(s, 45.5)
def BPlusRate(s):
return rates(s, 74.5)
listOfSummaryStatistics = [np.size, np.mean, np.min, np.max, np.std, PassRate, BPlusRate]
columnNames = ["N", "Mean", "Min", "Max", "Standard\nDeviation", "Pass\nRate (%)", "B+ and above\nrate (%)"]
summaryBySubjectClass = df1.groupby(['Subject_Class']).Total.agg(listOfSummaryStatistics)
summaryBySubjectClass.columns = columnNames
summaryBySubjectClass
summaryBySchoolCourse = df1.groupby(['School','Course']).Total.agg(listOfSummaryStatistics)
summaryBySchoolCourse.columns = columnNames
summaryBySchoolCourse
summaryByYear = df1.groupby(['Year']).Total.agg(listOfSummaryStatistics)
summaryByYear.columns = columnNames
summaryByYear
# Compute correlations among the variables
df1.corr()
# Export cleaned data frame to Excel
df1.to_excel('Marks_Clean.xlsx')
#def pass_rate(x):
# return 100*np.mean(x>=50)
#x=pd.Series([1,50,100])
#pass_rate(x)
#def BPlus(x):
# return 100*sum(x>=75)/len(x)
#df1.groupby(['Year'])['Marks'].agg([pass_rate,BPlus]).rename(columns={'pass_rate':'Pass Rate','BPlus':'B+&Above'})
"""
Explanation: 2.3.4 Exercises
End of explanation
"""
|
AllenDowney/ModSimPy
|
soln/chap05soln.ipynb
|
mit
|
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
"""
Explanation: Modeling and Simulation in Python
Chapter 5
Copyright 2017 Allen Downey
License: Creative Commons Attribution 4.0 International
End of explanation
"""
from pandas import read_html
"""
Explanation: Reading data
Pandas is a library that provides tools for reading and processing data. read_html reads a web page from a file or the Internet and creates one DataFrame for each table on the page.
End of explanation
"""
filename = 'data/World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
len(tables)
"""
Explanation: The data directory contains a downloaded copy of https://en.wikipedia.org/wiki/World_population_estimates
The arguments of read_html specify the file to read and how to interpret the tables in the file. The result, tables, is a sequence of DataFrame objects; len(tables) reports the length of the sequence.
End of explanation
"""
table2 = tables[2]
table2.head()
"""
Explanation: We can select the DataFrame we want using the bracket operator. The tables are numbered from 0, so tables[2] is actually the third table on the page.
head selects the header and the first five rows.
End of explanation
"""
table2.tail()
"""
Explanation: tail selects the last five rows.
End of explanation
"""
table2.columns = ['census', 'prb', 'un', 'maddison',
'hyde', 'tanton', 'biraben', 'mj',
'thomlinson', 'durand', 'clark']
"""
Explanation: Long column names are awkard to work with, but we can replace them with abbreviated names.
End of explanation
"""
table2.head()
"""
Explanation: Here's what the DataFrame looks like now.
End of explanation
"""
census = table2.census
census.head()
census.tail()
"""
Explanation: The first column, which is labeled Year, is special. It is the index for this DataFrame, which means it contains the labels for the rows.
Some of the values use scientific notation; for example, 2.544000e+09 is shorthand for $2.544 \cdot 10^9$ or 2.544 billion.
NaN is a special value that indicates missing data.
Series
We can use dot notation to select a column from a DataFrame. The result is a Series, which is like a DataFrame with a single column.
End of explanation
"""
un = table2.un / 1e9
un.head()
census = table2.census / 1e9
census.head()
"""
Explanation: Like a DataFrame, a Series contains an index, which labels the rows.
1e9 is scientific notation for $1 \cdot 10^9$ or 1 billion.
From here on, we will work in units of billions.
End of explanation
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
decorate(xlabel='Year',
ylabel='World population (billion)')
savefig('figs/chap05-fig01.pdf')
"""
Explanation: Here's what these estimates look like.
End of explanation
"""
max(abs(census - un) / un) * 100
"""
Explanation: The following expression computes the elementwise differences between the two series, then divides through by the UN value to produce relative errors, then finds the largest element.
So the largest relative error between the estimates is about 1.3%.
End of explanation
"""
# Solution
census - un
# Solution
abs(census - un)
# Solution
abs(census - un) / un
# Solution
max(abs(census - un) / census) * 100
"""
Explanation: Exercise: Break down that expression into smaller steps and display the intermediate results, to make sure you understand how it works.
Compute the elementwise differences, census - un
Compute the absolute differences, abs(census - un)
Compute the relative differences, abs(census - un) / un
Compute the percent differences, abs(census - un) / un * 100
End of explanation
"""
census[1950]
"""
Explanation: max and abs are built-in functions provided by Python, but NumPy also provides version that are a little more general. When you import modsim, you get the NumPy versions of these functions.
Constant growth
We can select a value from a Series using bracket notation. Here's the first element:
End of explanation
"""
census[2016]
"""
Explanation: And the last value.
End of explanation
"""
t_0 = get_first_label(census)
t_end = get_last_label(census)
elapsed_time = t_end - t_0
"""
Explanation: But rather than "hard code" those dates, we can get the first and last labels from the Series:
End of explanation
"""
p_0 = get_first_value(census)
p_end = get_last_value(census)
"""
Explanation: And we can get the first and last values:
End of explanation
"""
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
"""
Explanation: Then we can compute the average annual growth in billions of people per year.
End of explanation
"""
results = TimeSeries()
"""
Explanation: TimeSeries
Now let's create a TimeSeries to contain values generated by a linear growth model.
End of explanation
"""
results[t_0] = census[t_0]
results
"""
Explanation: Initially the TimeSeries is empty, but we can initialize it so the starting value, in 1950, is the 1950 population estimated by the US Census.
End of explanation
"""
for t in linrange(t_0, t_end):
results[t+1] = results[t] + annual_growth
"""
Explanation: After that, the population in the model grows by a constant amount each year.
End of explanation
"""
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(results, color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title='Constant growth')
savefig('figs/chap05-fig02.pdf')
"""
Explanation: Here's what the results looks like, compared to the actual data.
End of explanation
"""
# Solution
def compute_annual_growth(t_0, t_end):
"""Average annual growth over given period.
t_0: start date
t_end: end_date
returns: average annual growth
"""
elapsed_time = t_end - t_0
p_0 = census[t_0]
p_end = census[t_end]
total_growth = p_end - p_0
annual_growth = total_growth / elapsed_time
return annual_growth
# compute annual growth using data from 1970 to the end
t_0 = 1970
t_end = get_last_label(census)
annual_growth = compute_annual_growth(t_0, t_end)
# Run the simulation over the whole time range.
# I subtract 0.45 from the initial value to shift
# the fitted curve down so it fits the data better.
t_0 = get_first_label(census)
t_end = get_last_label(census)
p_0 = get_first_value(census) - 0.45
# initialize the result
results = TimeSeries()
results[t_0] = p_0
# run the simulation
for t in linrange(t_0, t_end):
results[t+1] = results[t] + annual_growth
# plot the results
plot(census, ':', label='US Census')
plot(un, '--', label='UN DESA')
plot(results, '--', color='gray', label='model')
decorate(xlabel='Year',
ylabel='World population (billion)',
title='Constant growth')
census.loc[1960:1970]
"""
Explanation: The model fits the data pretty well after 1990, but not so well before.
Exercises
Optional Exercise: Try fitting the model using data from 1970 to the present, and see if that does a better job.
Hint:
Copy the code from above and make a few changes. Test your code after each small change.
Make sure your TimeSeries starts in 1950, even though the estimated annual growth is based on later data.
You might want to add a constant to the starting value to match the data better.
End of explanation
"""
|
PMEAL/OpenPNM
|
examples/simulations/percolation/B_invasion_percolation.ipynb
|
mit
|
import sys
import openpnm as op
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
import porespy as ps
from ipywidgets import interact, IntSlider
from openpnm.topotools import trim
%matplotlib inline
ws = op.Workspace()
ws.settings["loglevel"] = 50
"""
Explanation: Invasion Percolation
The next percolation algorithm to be demonstrated is known as Invasion Percolation. Instead of identifying connected clusters and invading them all in one go, as Ordinary Percolation does, this algorithm progresses one invasion step at a time. This is a more dynamic process and better simulates scenarios where instead of controlling the pressure at the network boundaries something else such as mass flow rate is controlled as the pressure is allowed to fluctuate up and down in order to meet the lowest available entry pressure for the growing cluster(s).
End of explanation
"""
spacing=2.5e-5
net = op.network.Cubic([20, 20, 1], spacing=spacing)
geo = op.geometry.SpheresAndCylinders(network=net, pores=net.Ps, throats=net.Ts)
"""
Explanation: In order to also showcase some other network generation options we first start with a small 2D network with SpheresAndCylinders geometry.
End of explanation
"""
net.labels()
net.num_throats('surface')
trim(network=net, throats=net.throats('surface'))
h = net.check_network_health()
trim(network=net, pores=h['trim_pores'])
"""
Explanation: We then trim all the surface pores to obtain disctint sets of boundary edge pores.
End of explanation
"""
im = op.topotools.generate_voxel_image(net, max_dim=1000)
print(im.shape)
"""
Explanation: Then we use a function from our porespy package to generate a tomography style image of the abstract network providing the number of pixels in each dimension.
End of explanation
"""
fig, ax = plt.subplots(figsize=(5, 5))
ax.imshow(im[25:-25, 25:-25, 18].T)
"""
Explanation: This creates a 3D image but we can crop it to get the central slice in 2D for visualization.
End of explanation
"""
crop = im[25:-25, 25:-25, :]
snow_out = ps.networks.snow2(crop > 0, voxel_size=4e-7)
print(snow_out.regions.shape)
"""
Explanation: Next the snow algorithm is used to do network extraction on the tomography style image. Of course if you have your own tomogrpahy image this can be used instead.
End of explanation
"""
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots(figsize=(5, 5))
reg = snow_out.regions.astype(float) - 1
reg[reg == -1] = np.nan
region_slice = snow_out.regions[:, :, 18] - 1
mask = region_slice >= 0
ax.imshow(region_slice.T);
"""
Explanation: The SNOW algorithm provides a labelled region image containing the pore index. As zero is used for the background it is actually the pore index + 1 because python references arrays with first element as zero and we do not explicitly store the pore index.
End of explanation
"""
wrk = op.Workspace()
wrk.clear()
net, geo = op.io.PoreSpy.import_data(snow_out.network)
"""
Explanation: Now our new network is extracted we can fill a network object with all the properties and begin simulation.
End of explanation
"""
def update_image(data):
data = data.astype(float)
out_im = np.ones(region_slice.shape, dtype=float)*-1
out_im[mask] = data[region_slice[mask]]
out_im[~mask] = np.nan
return out_im
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots(figsize=(5, 5))
out = update_image(net['pore.diameter'])
ax.imshow(out.T);
"""
Explanation: A helper function is defined for plotting a particular data set.
End of explanation
"""
water = op.phases.Water(network=net)
phys = op.physics.Basic(network=net, geometry=geo, phase=water)
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots(figsize=[5, 5])
ax.hist(phys['throat.entry_pressure'], bins=10);
"""
Explanation: Again, stadard physics is used to define the capillary entry pressures. And these are shown as a histogram for all the throats in the network.
End of explanation
"""
# NBVAL_IGNORE_OUTPUT
alg_ip = op.algorithms.InvasionPercolation(network=net, phase=water)
alg_ip.set_inlets(pores=net.pores('xmin'))
alg_ip.run()
# NBVAL_IGNORE_OUTPUT
fig, ax = plt.subplots(figsize=(5, 5))
out = update_image(alg_ip['pore.invasion_sequence'])
plt.imshow(out.T);
def plot_invasion(seq):
data = alg_ip['pore.invasion_sequence'] < seq
fig, ax = plt.subplots(figsize=(5, 5))
out = update_image(data)
plt.imshow(out.T);
"""
Explanation: Next, the algorithm is defined and run with no arguments or outlets defined. This will proceed step by step assessing which pores are currently invaded (i.e. inlets first), which throats connect to an uninvaded pore and of these, which throat has the lowest capillary entry pressure for invasion. Invasion then proceeds along the path of least capillary resistance.
End of explanation
"""
max_seq = alg_ip['pore.invasion_sequence'].max()
interact(plot_invasion, seq=IntSlider(min=0, max=max_seq, step=1, value=200));
"""
Explanation: Using the slider below we can interactively plot the saturation at each invasion step (this works best using the left and right arrow keys).
End of explanation
"""
fig, ax = plt.subplots(figsize=(5, 5))
alg_ip.plot_intrusion_curve(ax)
"""
Explanation: As with Ordinary Percolation we can plot a drainage or intrusion curve but this time the capillary pressure is plotted from one step to the next as a continuous process with dynamic pressure boundary conditions and so is allowed to increase and decrease to meet the next lowest entry pressure for the invading cluster.
End of explanation
"""
fig, ax = plt.subplots(figsize=(5, 5))
alg_op = op.algorithms.OrdinaryPercolation(network=net, phase=water)
alg_op.set_inlets(net.pores('xmin'))
alg_op.settings._update({'pore_volume': 'pore.volume',
'throat_volume': 'throat.volume'})
alg_op.run(points=1000)
alg_op.plot_intrusion_curve(ax)
alg_ip.plot_intrusion_curve(ax)
"""
Explanation: We can compare the results of the two algorithms and see that the pressure envelope, i.e. maximum pressure reached historically by the invasion process is the same as the ordinary percolation value.
End of explanation
"""
alg_ip_t = op.algorithms.InvasionPercolation(network=net, phase=water)
alg_ip_t.set_inlets(pores=net.pores('xmin'))
alg_ip_t.run()
alg_ip_t.apply_trapping(outlets=net.pores(['boundary']))
fig, ax = plt.subplots(figsize=(5, 5))
out = update_image(alg_ip_t['pore.trapped'])
ax.imshow(out.T);
"""
Explanation: An additional feature of the algorithm is the ability to identify where the defending phase becomes trapped. Whether this happens in reality in-fact relies on the connectivity of the defending phase and whether it can reside in the invaded pores as thin wetting films. If not then the defending phase is completely pushed out of a pore when invaded and it can become isolated and trapped when encircled by the invading phase. OpenPNM actually calculates this trapping as a post-process, employing some clever logic described by Masson 2016.
End of explanation
"""
fig, ax = plt.subplots(figsize=(5, 5))
alg_ip.plot_intrusion_curve(ax)
alg_ip_t.plot_intrusion_curve(ax)
"""
Explanation: Here a reasonable fraction of the pore space is not invaded due to trapping of the defending phase. Generally this fraction will be lower in truly 3D networks as there are more routes out of the network because pores have higher connectivity. Also, typically if a defending phase is considered to be wetting then film flow is assumed to allow residual defending phase to escape. However, we can show the differences on one plot with and without trapping below.
End of explanation
"""
|
ChristopherHogan/cython
|
docs/src/quickstart/cython_in_jupyter.ipynb
|
apache-2.0
|
%load_ext cython
"""
Explanation: Installation
pip install cython
Using inside Jupyter notebook
Load th cythonmagic extension.
End of explanation
"""
%%cython
cdef int a = 0
for i in range(10):
a += i
print(a)
"""
Explanation: Then, simply use the magic function to start writing cython code.
End of explanation
"""
%%cython --annotate
cdef int a = 0
for i in range(10):
a += i
print(a)
"""
Explanation: Add --annotate or -a for showing a code analysis of the compiled code
End of explanation
"""
|
wanderer2/pymc3
|
docs/source/notebooks/lda-advi-aevb.ipynb
|
apache-2.0
|
%matplotlib inline
import sys, os
import theano
theano.config.floatX = 'float64'
from collections import OrderedDict
from copy import deepcopy
import numpy as np
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.datasets import fetch_20newsgroups
import matplotlib.pyplot as plt
import seaborn as sns
from theano import shared
import theano.tensor as tt
from theano.sandbox.rng_mrg import MRG_RandomStreams
import pymc3 as pm
from pymc3 import Dirichlet
from pymc3.distributions.transforms import t_stick_breaking
from pymc3.variational.advi import advi, sample_vp
"""
Explanation: Automatic autoencoding variational Bayes for latent dirichlet allocation with PyMC3
For probabilistic models with latent variables, autoencoding variational Bayes (AEVB; Kingma and Welling, 2014) is an algorithm which allows us to perform inference efficiently for large datasets with an encoder. In AEVB, the encoder is used to infer variational parameters of approximate posterior on latent variables from given samples. By using tunable and flexible encoders such as multilayer perceptrons (MLPs), AEVB approximates complex variational posterior based on mean-field approximation, which does not utilize analytic representations of the true posterior. Combining AEVB with ADVI (Kucukelbir et al., 2015), we can perform posterior inference on almost arbitrary probabilistic models involving continuous latent variables.
I have implemented AEVB for ADVI with mini-batch on PyMC3. To demonstrate flexibility of this approach, we will apply this to latent dirichlet allocation (LDA; Blei et al., 2003) for modeling documents. In the LDA model, each document is assumed to be generated from a multinomial distribution, whose parameters are treated as latent variables. By using AEVB with an MLP as an encoder, we will fit the LDA model to the 20-newsgroups dataset.
In this example, extracted topics by AEVB seem to be qualitatively comparable to those with a standard LDA implementation, i.e., online VB implemented on scikit-learn. Unfortunately, the predictive accuracy of unseen words is less than the standard implementation of LDA, it might be due to the mean-field approximation. However, the combination of AEVB and ADVI allows us to quickly apply more complex probabilistic models than LDA to big data with the help of mini-batches. I hope this notebook will attract readers, especially practitioners working on a variety of machine learning tasks, to probabilistic programming and PyMC3.
End of explanation
"""
# The number of words in the vocaburary
n_words = 1000
print("Loading dataset...")
t0 = time()
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data
print("done in %0.3fs." % (time() - t0))
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_words,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
feature_names = tf_vectorizer.get_feature_names()
print("done in %0.3fs." % (time() - t0))
"""
Explanation: Dataset
Here, we will use the 20-newsgroups dataset. This dataset can be obtained by using functions of scikit-learn. The below code is partially adopted from an example of scikit-learn (http://scikit-learn.org/stable/auto_examples/applications/topics_extraction_with_nmf_lda.html). We set the number of words in the vocabulary to 1000.
End of explanation
"""
plt.plot(tf[:10, :].toarray().T);
"""
Explanation: Each document is represented by 1000-dimensional term-frequency vector. Let's check the data.
End of explanation
"""
n_samples_tr = 10000
n_samples_te = tf.shape[0] - n_samples_tr
docs_tr = tf[:n_samples_tr, :]
docs_te = tf[n_samples_tr:, :]
print('Number of docs for training = {}'.format(docs_tr.shape[0]))
print('Number of docs for test = {}'.format(docs_te.shape[0]))
n_tokens = np.sum(docs_tr[docs_tr.nonzero()])
print('Number of tokens in training set = {}'.format(n_tokens))
print('Sparsity = {}'.format(
len(docs_tr.nonzero()[0]) / float(docs_tr.shape[0] * docs_tr.shape[1])))
"""
Explanation: We split the whole documents into training and test sets. The number of tokens in the training set is 480K. Sparsity of the term-frequency document matrix is 0.025%, which implies almost all components in the term-frequency matrix is zero.
End of explanation
"""
def logp_lda_doc(beta, theta):
"""Returns the log-likelihood function for given documents.
K : number of topics in the model
V : number of words (size of vocabulary)
D : number of documents (in a mini-batch)
Parameters
----------
beta : tensor (K x V)
Word distributions.
theta : tensor (D x K)
Topic distributions for documents.
"""
def ll_docs_f(docs):
dixs, vixs = docs.nonzero()
vfreqs = docs[dixs, vixs]
ll_docs = vfreqs * pm.math.logsumexp(
tt.log(theta[dixs]) + tt.log(beta.T[vixs]), axis=1).ravel()
# Per-word log-likelihood times num of tokens in the whole dataset
return tt.sum(ll_docs) / tt.sum(vfreqs) * n_tokens
return ll_docs_f
"""
Explanation: Log-likelihood of documents for LDA
For a document $d$ consisting of tokens $w$, the log-likelihood of the LDA model with $K$ topics is given as
\begin{eqnarray}
\log p\left(d|\theta_{d},\beta\right) & = & \sum_{w\in d}\log\left[\sum_{k=1}^{K}\exp\left(\log\theta_{d,k} + \log \beta_{k,w}\right)\right]+const,
\end{eqnarray}
where $\theta_{d}$ is the topic distribution for document $d$ and $\beta$ is the word distribution for the $K$ topics. We define a function that returns a tensor of the log-likelihood of documents given $\theta_{d}$ and $\beta$.
End of explanation
"""
n_topics = 10
minibatch_size = 128
# Tensor for documents
doc_t = shared(np.zeros((minibatch_size, n_words)), name='doc_t')
with pm.Model() as model:
theta = Dirichlet('theta', a=(1.0 / n_topics) * np.ones((minibatch_size, n_topics)),
shape=(minibatch_size, n_topics), transform=t_stick_breaking(1e-9))
beta = Dirichlet('beta', a=(1.0 / n_topics) * np.ones((n_topics, n_words)),
shape=(n_topics, n_words), transform=t_stick_breaking(1e-9))
doc = pm.DensityDist('doc', logp_lda_doc(beta, theta), observed=doc_t)
"""
Explanation: In the inner function, the log-likelihood is scaled for mini-batches by the number of tokens in the dataset.
LDA model
With the log-likelihood function, we can construct the probabilistic model for LDA. doc_t works as a placeholder to which documents in a mini-batch are set.
For ADVI, each of random variables $\theta$ and $\beta$, drawn from Dirichlet distributions, is transformed into unconstrained real coordinate space. To do this, by default, PyMC3 uses a centered stick-breaking transformation. Since these random variables are on a simplex, the dimension of the unconstrained coordinate space is the original dimension minus 1. For example, the dimension of $\theta_{d}$ is the number of topics (n_topics) in the LDA model, thus the transformed space has dimension (n_topics - 1). It shuold be noted that, in this example, we use t_stick_breaking, which is a numerically stable version of stick_breaking used by default. This is required to work ADVI for the LDA model.
The variational posterior on these transformed parameters is represented by a spherical Gaussian distributions (meanfield approximation). Thus, the number of variational parameters of $\theta_{d}$, the latent variable for each document, is 2 * (n_topics - 1) for means and standard deviations.
In the last line of the below cell, DensityDist class is used to define the log-likelihood function of the model. The second argument is a Python function which takes observations (a document matrix in this example) and returns the log-likelihood value. This function is given as a return value of logp_lda_doc(beta, theta), which has been defined above.
End of explanation
"""
def create_minibatch(data):
rng = np.random.RandomState(0)
while True:
# Return random data samples of a size 'minibatch_size' at each iteration
ixs = rng.randint(data.shape[0], size=minibatch_size)
yield [data[ixs]]
minibatches = create_minibatch(docs_tr.toarray())
"""
Explanation: Mini-batch
To perform ADVI with stochastic variational inference for large datasets, whole training samples are splitted into mini-batches. PyMC3's ADVI function accepts a Python generator which send a list of mini-batches to the algorithm. Here is an example to make a generator.
TODO: replace the code using the new interface
End of explanation
"""
# The value of doc_t will be replaced with mini-batches
minibatch_tensors = [doc_t]
"""
Explanation: The ADVI function replaces the values of Theano tensors with samples given by generators. We need to specify those tensors by a list. The order of the list should be the same with the mini-batches sent from the generator. Note that doc_t has been used in the model creation as the observation of the random variable named doc.
End of explanation
"""
# observed_RVs = OrderedDict([(doc, n_samples_tr / minibatch_size)])
observed_RVs = OrderedDict([(doc, 1)])
"""
Explanation: To tell the algorithm that random variable doc is observed, we need to pass them as an OrderedDict. The key of OrderedDict is an observed random variable and the value is a scalar representing the scaling factor. Since the likelihood of the documents in mini-batches have been already scaled in the likelihood function, we set the scaling factor to 1.
End of explanation
"""
class LDAEncoder:
"""Encode (term-frequency) document vectors to variational means and (log-transformed) stds.
"""
def __init__(self, n_words, n_hidden, n_topics, p_corruption=0, random_seed=1):
rng = np.random.RandomState(random_seed)
self.n_words = n_words
self.n_hidden = n_hidden
self.n_topics = n_topics
self.w0 = shared(0.01 * rng.randn(n_words, n_hidden).ravel(), name='w0')
self.b0 = shared(0.01 * rng.randn(n_hidden), name='b0')
self.w1 = shared(0.01 * rng.randn(n_hidden, 2 * (n_topics - 1)).ravel(), name='w1')
self.b1 = shared(0.01 * rng.randn(2 * (n_topics - 1)), name='b1')
self.rng = MRG_RandomStreams(seed=random_seed)
self.p_corruption = p_corruption
def encode(self, xs):
if 0 < self.p_corruption:
dixs, vixs = xs.nonzero()
mask = tt.set_subtensor(
tt.zeros_like(xs)[dixs, vixs],
self.rng.binomial(size=dixs.shape, n=1, p=1-self.p_corruption)
)
xs_ = xs * mask
else:
xs_ = xs
w0 = self.w0.reshape((self.n_words, self.n_hidden))
w1 = self.w1.reshape((self.n_hidden, 2 * (self.n_topics - 1)))
hs = tt.tanh(xs_.dot(w0) + self.b0)
zs = hs.dot(w1) + self.b1
zs_mean = zs[:, :(self.n_topics - 1)]
zs_std = zs[:, (self.n_topics - 1):]
return zs_mean, zs_std
def get_params(self):
return [self.w0, self.b0, self.w1, self.b1]
"""
Explanation: Encoder
Given a document, the encoder calculates variational parameters of the (transformed) latent variables, more specifically, parameters of Gaussian distributions in the unconstrained real coordinate space. The encode() method is required to output variational means and stds as a tuple, as shown in the following code. As explained above, the number of variational parameters is 2 * (n_topics) - 1. Specifically, the shape of zs_mean (or zs_std) in the method is (minibatch_size, n_topics - 1). It should be noted that zs_std is defined as log-transformed standard deviation and this is automativally exponentiated (thus bounded to be positive) in advi_minibatch(), the estimation function.
To enhance generalization ability to unseen words, a bernoulli corruption process is applied to the inputted documents. Unfortunately, I have never see any significant improvement with this.
End of explanation
"""
encoder = LDAEncoder(n_words=n_words, n_hidden=100, n_topics=n_topics, p_corruption=0.0)
local_RVs = OrderedDict([(theta, (encoder.encode(doc_t), n_samples_tr / minibatch_size))])
"""
Explanation: To feed the output of the encoder to the variational parameters of $\theta$, we set an OrderedDict of tuples as below.
End of explanation
"""
encoder_params = encoder.get_params()
"""
Explanation: theta is the random variable defined in the model creation and is a key of an entry of the OrderedDict. The value (encoder.encode(doc_t), n_samples_tr / minibatch_size) is a tuple of a theano expression and a scalar. The theano expression encoder.encode(doc_t) is the output of the encoder given inputs (documents). The scalar n_samples_tr / minibatch_size specifies the scaling factor for mini-batches.
ADVI optimizes the parameters of the encoder. They are passed to the function for ADVI.
End of explanation
"""
def run_advi():
with model:
v_params = pm.variational.advi_minibatch(
n=3000, minibatch_tensors=minibatch_tensors, minibatches=minibatches,
local_RVs=local_RVs, observed_RVs=observed_RVs, encoder_params=encoder_params,
learning_rate=2e-2, epsilon=0.1, n_mcsamples=1
)
return v_params
%time v_params = run_advi()
plt.plot(v_params.elbo_vals)
"""
Explanation: AEVB with ADVI
advi_minibatch() can be used to run AEVB with ADVI on the LDA model.
End of explanation
"""
def print_top_words(beta, feature_names, n_top_words=10):
for i in range(len(beta)):
print(("Topic #%d: " % i) + " ".join([feature_names[j]
for j in beta[i].argsort()[:-n_top_words - 1:-1]]))
doc_t.set_value(docs_te.toarray()[:minibatch_size, :])
with model:
samples = sample_vp(v_params, draws=100, local_RVs=local_RVs)
beta_pymc3 = samples['beta'].mean(axis=0)
print_top_words(beta_pymc3, feature_names)
"""
Explanation: We can see ELBO increases as optimization proceeds. The trace of ELBO looks jaggy because at each iteration documents in the mini-batch are replaced.
Extraction of characteristic words of topics based on posterior samples
By using estimated variational parameters, we can draw samples from the variational posterior. To do this, we use function sample_vp(). Here we use this function to obtain posterior mean of the word-topic distribution $\beta$ and show top-10 words frequently appeared in the 10 topics.
End of explanation
"""
from sklearn.decomposition import LatentDirichletAllocation
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
%time lda.fit(docs_tr)
beta_sklearn = lda.components_ / lda.components_.sum(axis=1)[:, np.newaxis]
print_top_words(beta_sklearn, feature_names)
"""
Explanation: We compare these topics to those obtained by a standard LDA implementation on scikit-learn, which is based on an online stochastic variational inference (Hoffman et al., 2013). We can see that estimated words in the topics are qualitatively similar.
End of explanation
"""
def calc_pp(ws, thetas, beta, wix):
"""
Parameters
----------
ws: ndarray (N,)
Number of times the held-out word appeared in N documents.
thetas: ndarray, shape=(N, K)
Topic distributions for N documents.
beta: ndarray, shape=(K, V)
Word distributions for K topics.
wix: int
Index of the held-out word
Return
------
Log probability of held-out words.
"""
return ws * np.log(thetas.dot(beta[:, wix]))
def eval_lda(transform, beta, docs_te, wixs):
"""Evaluate LDA model by log predictive probability.
Parameters
----------
transform: Python function
Transform document vectors to posterior mean of topic proportions.
wixs: iterable of int
Word indices to be held-out.
"""
lpss = []
docs_ = deepcopy(docs_te)
thetass = []
wss = []
total_words = 0
for wix in wixs:
ws = docs_te[:, wix].ravel()
if 0 < ws.sum():
# Hold-out
docs_[:, wix] = 0
# Topic distributions
thetas = transform(docs_)
# Predictive log probability
lpss.append(calc_pp(ws, thetas, beta, wix))
docs_[:, wix] = ws
thetass.append(thetas)
wss.append(ws)
total_words += ws.sum()
else:
thetass.append(None)
wss.append(None)
# Log-probability
lp = np.sum(np.hstack(lpss)) / total_words
return {
'lp': lp,
'thetass': thetass,
'beta': beta,
'wss': wss
}
"""
Explanation: Predictive distribution
In some papers (e.g., Hoffman et al. 2013), the predictive distribution of held-out words was proposed as a quantitative measure for goodness of the model fitness. The log-likelihood function for tokens of the held-out word can be calculated with posterior means of $\theta$ and $\beta$. The validity of this is explained in (Hoffman et al. 2013).
End of explanation
"""
n_docs_te = docs_te.shape[0]
doc_t = shared(docs_te.toarray(), name='doc_t')
with pm.Model() as model:
theta = Dirichlet('theta', a=(1.0 / n_topics) * np.ones((n_docs_te, n_topics)),
shape=(n_docs_te, n_topics), transform=t_stick_breaking(1e-9))
beta = Dirichlet('beta', a=(1.0 / n_topics) * np.ones((n_topics, n_words)),
shape=(n_topics, n_words), transform=t_stick_breaking(1e-9))
doc = pm.DensityDist('doc', logp_lda_doc(beta, theta), observed=doc_t)
# Encoder has already been trained
encoder.p_corruption = 0
local_RVs = OrderedDict([(theta, (encoder.encode(doc_t), 1))])
"""
Explanation: To apply the above function for the LDA model, we redefine the probabilistic model because the number of documents to be tested changes. Since variational parameters have already been obtained, we can reuse them for sampling from the approximate posterior distribution.
End of explanation
"""
def transform_pymc3(docs):
with model:
doc_t.set_value(docs)
samples = sample_vp(v_params, draws=100, local_RVs=local_RVs)
return samples['theta'].mean(axis=0)
"""
Explanation: transform() function is defined with sample_vp() function. This function is an argument to the function for calculating log predictive probabilities.
End of explanation
"""
%time result_pymc3 = eval_lda(transform_pymc3, beta_pymc3, docs_te.toarray(), np.arange(100))
print('Predictive log prob (pm3) = {}'.format(result_pymc3['lp']))
"""
Explanation: The mean of the log predictive probability is about -7.00.
End of explanation
"""
def transform_sklearn(docs):
thetas = lda.transform(docs)
return thetas / thetas.sum(axis=1)[:, np.newaxis]
%time result_sklearn = eval_lda(transform_sklearn, beta_sklearn, docs_te.toarray(), np.arange(100))
print('Predictive log prob (sklearn) = {}'.format(result_sklearn['lp']))
"""
Explanation: We compare the result with the scikit-learn LDA implemented The log predictive probability is significantly higher (-6.04) than AEVB-ADVI, though it shows similar words in the estimated topics. It may because that the mean-field approximation to distribution on the simplex (topic and/or word distributions) is less accurate. See https://gist.github.com/taku-y/f724392bc0ad633deac45ffa135414d3.
End of explanation
"""
|
prk327/CoAca
|
Investment Case Group Project/1_Data_Cleaning.ipynb
|
gpl-3.0
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# reading data files
# using encoding = "ISO-8859-1" to avoid pandas encoding error
rounds = pd.read_csv("rounds2.csv", encoding = "ISO-8859-1")
companies = pd.read_csv("companies.txt", sep="\t", encoding = "ISO-8859-1")
# Look at rounds head
print(rounds.head())
# inspect the structure etc.
print(rounds.info(), "\n")
print(rounds.shape)
"""
Explanation: Part 1: Data Cleaning
Let's start with getting the datafiles rounds.csv and companies.txt.
End of explanation
"""
# look at companies head
companies.head()
# companies structure
companies.info()
"""
Explanation: The variables funding_round_code and raised_amount_usd contain some missing values, as shown above. We'll deal with them after we're done with understanding the data - column names, primary keys of tables etc.
End of explanation
"""
# identify the unique number of permalinks in companies
len(companies.permalink.unique())
"""
Explanation: Ideally, the permalink column in the companies dataframe should be the unique_key of the table, having 66368 unique company names (links, or permalinks). Also, these 66368 companies should be present in the rounds file.
Let's first confirm that these 66368 permalinks (which are the URL paths of companies' websites) are not repeating in the column, i.e. they are unique.
End of explanation
"""
# converting all permalinks to lowercase
companies['permalink'] = companies['permalink'].str.lower()
companies.head()
# look at unique values again
len(companies.permalink.unique())
"""
Explanation: Also, let's convert all the entries to lowercase (or uppercase) for uniformity.
End of explanation
"""
# look at unique company names in rounds df
# note that the column name in rounds file is different (company_permalink)
len(rounds.company_permalink.unique())
"""
Explanation: Thus, there are 66368 unique companies in the table and permalink is the unique primary key. Each row represents a unique company.
Let's now check whether all of these 66368 companies are present in the rounds file, and if some extra ones are present.
End of explanation
"""
# converting column to lowercase
rounds['company_permalink'] = rounds['company_permalink'].str.lower()
rounds.head()
# Look at unique values again
len(rounds.company_permalink.unique())
"""
Explanation: There seem to be 90247 unique values of company_permalink, whereas we expected only 66368. May be this is because of uppercase/lowercase issues.
Let's convert the column to lowercase and look at unique values again.
End of explanation
"""
# companies present in rounds file but not in (~) companies file
rounds.loc[~rounds['company_permalink'].isin(companies['permalink']), :]
"""
Explanation: There seem to be 2 extra permalinks in the rounds file which are not present in the companies file. Let's hope that this is a data quality issue, since if this were genuine, we have two companies whose investment round details are available but their metadata (company name, sector etc.) is not available in the companies table.
Let's have a look at the company permalinks which are in the 'rounds' file but not in 'companies'.
End of explanation
"""
# looking at the indices with weird characters
rounds_original = pd.read_csv("rounds2.csv", encoding = "ISO-8859-1")
rounds_original.iloc[[29597, 31863, 45176, 58473], :]
"""
Explanation: All the permalinks have weird non-English characters. Let's see whether these characters are present in the original df as well.
End of explanation
"""
# import chardet
# rawdata = open('rounds2.csv', 'rb').read()
# result = chardet.detect(rawdata)
# charenc = result['encoding']
# print(charenc)
# print(result)
"""
Explanation: The company weird characters appear when you import the data file. To confirm whether these characters are actually present in the given data or whether python has introduced them while importing into pandas, let's have a look at the original CSV file in Excel.
The figure below shows the filtered rows - they have the usual English characters.
<img src="./weird_names.PNG">
Thus, this is most likely a data quality issue we have introduced while reading the data file into python. Specifically, this is most likely caused because of encoding.
First, let's try to figure out the encoding type of this file. Then we can try specifying the encoding type at the time of reading the file. The chardet library shows the encoding type of a file.
End of explanation
"""
# trying different encodings
# encoding="cp1254" throws an error
# rounds_original = pd.read_csv("rounds2.csv", encoding="cp1254")
# rounds_original.iloc[[29597, 31863, 45176], :]
"""
Explanation: Now let's try telling pandas (at the time of importing) the encoding type. Here's a list of various encoding types python can handle: https://docs.python.org/2/library/codecs.html#standard-encodings.
End of explanation
"""
rounds['company_permalink'] = rounds.company_permalink.str.encode('utf-8').str.decode('ascii', 'ignore')
rounds.loc[~rounds['company_permalink'].isin(companies['permalink']), :]
"""
Explanation: Apparently, pandas cannot decode "cp1254" in this case. After searching a lot on stackoverflow and Google, the best conclusion that can be drawn is that this file is encoded using multiple encoding types (may be because the company_permalink column contains names of companies in various countries, and hence various languages).
After trying various other encoding types (in vain), this answer suggested an alternate (and a more intelligent) way: https://stackoverflow.com/questions/45871731/removing-special-characters-in-a-pandas-dataframe.
End of explanation
"""
# Look at unique values again
len(rounds.company_permalink.unique())
"""
Explanation: This seems to work fine.
Let's now look at the number of unique values in rounds dataframe again.
End of explanation
"""
# companies present in companies df but not in rounds df
companies.loc[~companies['permalink'].isin(rounds['company_permalink']), :]
"""
Explanation: Now it makes sense - there are 66368 unique companies in both the rounds and companies dataframes.
It is possible that a similar encoding problems are present in the companies file as well. Let's look at the companies which are present in the companies file but not in the rounds file - if these have special characters, then it is most likely because the companies file is encoded (while rounds is not).
End of explanation
"""
# remove encoding from companies df
companies['permalink'] = companies.permalink.str.encode('utf-8').str.decode('ascii', 'ignore')
"""
Explanation: Thus, the companies df also contains special characters. Let's treat those as well.
End of explanation
"""
# companies present in companies df but not in rounds df
companies.loc[~companies['permalink'].isin(rounds['company_permalink']), :]
"""
Explanation: Let's now look at the companies present in the companies df but not in rounds df - ideally there should be none.
End of explanation
"""
# write rounds file
rounds.to_csv("rounds_clean.csv", sep=',', index=False)
# write companies file
companies.to_csv("companies_clean.csv", sep='\t', index=False)
"""
Explanation: Thus, the encoding issue seems resolved now. Let's write these (clean) dataframes into separate files so we don't have to worry about encoding problems again.
End of explanation
"""
|
graphistry/pygraphistry
|
demos/more_examples/graphistry_features/encodings-icons.ipynb
|
bsd-3-clause
|
# ! pip install --user graphistry
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
graphistry.__version__
import datetime, pandas as pd
e_df = pd.DataFrame({
's': ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'd', 'e'],
'd': ['b', 'c', 'a', 'b', 'c', 'a', 'c', 'e', 'd'],
'time': [datetime.datetime(1987, 10, 1), datetime.datetime(1987, 10, 2), datetime.datetime(1987, 10, 3),
datetime.datetime(1988, 10, 1), datetime.datetime(1988, 10, 2), datetime.datetime(1988, 10, 3),
datetime.datetime(1989, 10, 1), datetime.datetime(1989, 10, 2), datetime.datetime(1989, 10, 3)]
})
n_df = pd.DataFrame({
'n': ['a', 'b', 'c', 'd', 'e'],
'score': [ 1, 30, 50, 70, 90 ],
'palette_color_int32': pd.Series(
[0, 1, 2, 3, 4],
dtype='int32'),
'hex_color_int64': pd.Series(
[0xFF000000, 0xFFFF0000, 0xFFFFFF00, 0x00FF0000, 0x0000FF00],
dtype='int64'),
'type': ['mac', 'macbook', 'mac', 'macbook', 'sheep'],
'assorted': ['Canada', 'mac', 'macbook', 'embedded_smile', 'external_logo'],
'origin': ['Canada', 'England', 'Russia', 'Mexico', 'China']
})
g = graphistry.edges(e_df, 's', 'd').nodes(n_df, 'n')
"""
Explanation: Icons encodings tutorial
See the examples below for common ways to map data to node icon in Graphistry.
You can add a main icon. The glyph system supports text, icons, flags, and images, as well as multiple mapping and style controls. When used with column type, the icon will also appear in the legend.
Icons are often used with node color, label, size, and badges to provide more visual information. Most encodings work both for points and edges. The PyGraphistry Python client makes it easier to use the URL settings API and the REST upload API. For dynamic control, you can use also use the JavaScript APIs.
Setup
Mode api=3 is recommended. It is required for complex_encodings (ex: .encode_point_size(...)). Mode api=1 works with the simpler .bind(point_size='col_a') form.
End of explanation
"""
g.encode_point_icon(
'assorted',
shape="circle", #clip excess
categorical_mapping={
'macbook': 'laptop', #https://fontawesome.com/v4.7.0/icons/
'Canada': 'flag-icon-ca', #ISO3611-Alpha-2: https://github.com/datasets/country-codes/blob/master/data/country-codes.csv
'embedded_smile': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAARkAAAC0CAMAAACXO6ihAAAAgVBMVEUAAAD////T09P4+PiYmJihoaHf39/7+/vm5uYLCwsRERHy8vJ+fn7v7+83Nze2trbZ2dlRUVFGRka9vb1xcXEYGBhbW1tWVlbq6uq7u7usrKxgYGA8PDwWFhbNzc14eHhpaWmoqKiPj48vLy8oKCggICCIiIiUlJRBQUFKSkokJCTGfDo0AAAOxklEQVR4nO1d6YKyOgxFNkFEUFwQFFEUdd7/AS9L06ZlkZm5I37i+acUSA9dkjRNJal/JPNYjfxRBlvW1guzb3leBPupJo8wLGfat0wvgaOrjEQo0b1vsfrGTKvSUiLtW7RekaRNvGRQV32L1x9it5mX0Wh86lu+3uCM24jJEPYtYT+468Kg6xuGz3cuu28Ze0FqYA7c4/Wwud3WUy3ihpq+pewBB9yTnPWMXjivfURYjxL2hIDV3j+KFx3Wwwan8TFixl5VpdtvswuWHEXGKOhBuD4xZS0mntRcPxvRYT1Lkvt0OSwLiql38qy+xP65Ar0K5nRWCgas5dZgQvWYtkEkdDzPG5gSTGeeoHkMmRDzWx+SFhx3aDGhRbXg+GmC9Y0E+pKbNJa5IfXYf6Js/eIEVW4mRuIMhKH0pztM2MvmMjtMzEh+nnC9QiP1jb6ayzgcM0PpTsSOVA4tZfQhMgNmQWsX4dwTI/1ZsvUL8G7GbYV8jplhuGjupNJKayl+7en8JNn6hUpq67WWWmJitk8SrWfArPNAR0Hdybg9R7KecQYVbtFebsuI2TxHsr5xh1mnvSEwTc9qUZTfCiHRZqwGf1WJxB7YvCQhM7utzczQ1BQ/SbDesezCDLYN5k8TrWdQZprNyT0mRm/tde8E2psaF9hmnJbX4vR7M2zo2NrQnea8MTmYARi1iKj2uhBMMx7QAiVbmnSqHSXmXHkZjAEFpF1ZtR3hUlgNpqlvWO+JBLle3DW6EMqWyMuwmOEdmXYabubzTZjy3ciFYXg4A3AOMWBRqUQwOmfCjPLA7nwzpNVOwyOV9p3cW+8HtZWXKFN0DqQZGX2L+mRMtGZe3CI4GqZ2rW9Rn45rIy+lN6aj4+8dsa6Lj9bjS3k1AUV5MOYkRhjhNaWxLmtsyfLuyZHjOMdTXZjaAHBPtSCSM3ialg5ren4M85xk+FY82uoTvVaDS1z4MOy2FfE3xuoWp6qqaiVU7TQNb+V4k1ATQhvcALRfOrJsWLzJrdiGHMmOGiKf1qDiGs93zX+0pYfCH4zjXIqvxmM+EIYy1MRRjW+mFcOwG9Z6y07KBoj+wHfEWvT9ftpMgUnw3X5U4u3XFMSNlARjX/e2GVyjibe4b8n/GBVHja9H14Wgxu3n6dbV+SQA2/deutxvBVq2aksU0QkpO9Z7O272HsdLdHigvSGXznuPMgmek6wILXObl2S2u1VtatAFlfh5UvaAL0QMzhoyWV4D2bdqx1g1shU7Ct7c04e6ko5jaWZkQcWu61tmuAzfPTgYbdbmd1LeYdhp2a/xzqBbnEa2EHwFoUeVve3DwJkGQlfmX2hMw1rRpmCjb0WBIZxZLdvC3hg0WK9mmCVX3PdWchuwAp1NqTaMBTDTg1z9YwomUI0rAaIBhuB/qYCaS1bNRVi3fW/DqAFzaDJx9doKvBLPFuolcABdrmYXytJqbk7vD+gwdRrLEfTip0v1CiCVrwv1ncGsNZzlJASTVL5uVzIoOv7u6WK9AEJS+7otpmCAD9NogtDOmqEE9gi2bv9/XwTN7QLMKXlwoQ4FnMY2swQf+EDtbND/K1sK6MQ0TGWG7RfURbcl3ZAwqOAYhITUfywkjKZB98ZgI/GAAZdbA6AG+JsvJrWBLmVvGTUTlRIzSP9DCZax04XmsWZrLNaAI4MXLL5BMZw4uasyS3YwTMcMoG0Xz7Dz+M/qg2by+Sq49C1cv9g0MeMMnBghWRNrMQOeligOdpUYf7iKDMbGE4lxBpK16SH2sY7Cy8bRepCLkg0w1Uh2DUOPInWgScfbcFkshpLi64MPPvjggw8++OCDDz744IMPPvjggw8++OCDDz744IM/wWTF0FrwHERe/ByZXgNbxQa07vc6Fou90ZunPsDAqVZaikGgljycEJtuzFxo7OdwNnJ3Y+ZOywzlQFMuqUgLMwdaZiCHU2ZII5ZmsLnUENuMeTlPrYfMnCkzncaZxNd14w12dpgddmlD0KPcab9Gse/lDbIRdmHGTIssI1634HtlQMxI0nyapnE3beY4GhQz3bH2/4iZr83ykGE5b0m2tciKLDeVeMxbXNwZtpwceAmLW/EmUp6ZW/72uGP0XhIWL7zj0WdGdqq2MbMp7ur2DoDmRb4yHo8VxZA9Jy7/3J1UQP7PMtKzIooRBZiCdeDp9ji/1dY9TyV1ozeq+W6LtRPZ5a0aqztm5hS5+dtt2UPxwXP2EBX3p+lWtgtZ9chLIT50Cfsxo2txg5Y9aEZv13KJr5FR3nXMnhayZ5Paxuwfuj/6HAjB3YpV2HAHtvPGkxKPRfJaNF717gtZfi3jlL13z8rakrlFZey4yswdJVpU2L5lljAMn5sR2viN4zJboWFXkg1P8Z6qUcrtCIlMrIST8H6UuA30SvQA9MopOxwng7fBebEV0ozPlZjwHFnd9uyXfRMyasOWHcrMkk/tGpktzHzVvPEq3WqkCHlmQvzxZU4JJ8ygw7fK04C+6s/IUXhmZG4PDmHmLFduy6FxzCjiYSqQzYkyI+5G0JqZ2dclXD6yFFoIc44Zh/s83ZhpODxIYEZIuV8yU3eAzEhkpgr9xjNTefWlkZna87GOtbvKFhwzfGfrxAzK/qfngJx3OTPLmr0l5cWCGfb9jOJWlMhqX+n2mNpTlRnbMFDXbWQGfupemgYukfUo1bRda4cNUgEyx0N1nMmZgaxkI8Ur9zLeSH1zZmZTVcPtwgucUpiCGdqijFMh9+VIap/3hvSqoa/iBupJY0T6e4EZVwtXyXIL7BHz8a4GHrxjxnGllTNAaBBmrk4QwOPdoEA+myWnq4Y+rr0NiBmbvWCtasAnYSZk/+TMgPhjmpVBZcwUwzP9lO5yBc05Z+YMnPmgIEBRMk7MqEhBkVjmwtrAjGcmIolnoHI6NQZWEb4DCthwOSDM5ICxRtBnWEeL5hM4d7ikHjbpsa1nS3YeGx2xWSYLjWdmDhOHXogfUmbW8MYY7hSYgWQ8LBXGkfuHMqNDGtMV+QOlYoQvxzFDk0tgZsD4FJiZU2L2rHZyMf1Bf2HMEDU6ZwYYRcf5NTFTDg4kw86KfWD2XIGZCSnAdvXPYYLQOWbYzlvSndC5txwzE+jpcLVoAYrahRmSKrQrMzTJHXpcAzNEmjg/C0bPVY4xX66ZGZSmCOppccyw63I3ZmgrXJ7SdLrcdWHGK4emoJC/bGVtzNygM6Gd5A3MkL69uu1myfnC3G0oOchjZo7kL9+sZ8ZrZ4bNQV41JdYDZkj1z4X8q4fMwG0+8n80MCOm1AQ1CLkhHzMD05ny9TNm0HQnT2OenW7MYLQxA6OzjFa6OjJTHWY6MLODrjn/GTP8vmY3wPt2/19mYKbDSa47MgMNGyUueMzMhPReZfMzZiTh8AwF7Wl+OWbQCNWBGTjZ9qfMzCrnihggwKswQ6eJ7zFj/JIZaVMxtuzwT5lBvrS/ZEaC3nT7KTPSLhUPT9P/ghkYgW3kAezGjPkjZjaQ1HX/Y2YyxWGq85au8wfMUNUZHSB/7MQM9UCgxImPmYHvXShHP2VGyt2w24iZ5tHld8ywI35iG5jZgXBo+oOD0Tsyg5IqPmYGTP8mHbgrMxnOMVVuSgfqz5mJqLIKpzkb0oQTtcSoGzNgV3xH07tAF4x+xswsiFzbkol/3gT3y/jwO2Ys8MpT+Qx05DWsBl9gNn7EDPjLdDatpcg/U8sM9STFP2NmWrzAoCsXnE71fWZuY3y/hNybGTN3egZQWnS2TQSlHzFzgQ/GVD3hTPUKM3TUHp1/yEzRKHXKDLxwipkRcqa2MJNQD1Ph9dgwp17uuaLCjg1HDVC6MmAmVOqZAVt75MKMP0fezlpmqBOp/KcmL3uVGei0CWKG2ktwgE+MmZH5lUT4uy4zEm0jlh5F1M9LmBH1bYGZVQKi6bNFZqSix1L3s15M+UlIbzUWixliZuQu57fkNqcj5jiviXmmaQazJxcN9rIA/4x6u+XzjXmhp96Gi90KBrKg/BYJ+AijYuWTvj/YTcxZKBf0r2jCVG+XyS+sjx+azpUVPOQ8Cr9a6jE3sG3rMjb+Z+z8mKPKXKgF/PwYkAn6w5DRqoa3ynh12A3ZkzMLc+NQp29+HPA1q6vj0TZs2caJDvGuli87UtFKfyTVQEa+4+TX8sMdPSrk2PZdWWg4l8oajWIxZqRjPTN+puKY4rKJhXOoNyzHlJjwzIgPMYUFtWVlwcRB00MJD/mkMZSyCZ3FaqbUe0ph88xIc6HR2Gn5KUpmpBR1sNHYIaNBwYy4WsGdFSPWjlvUMRuZGRceCIf/s5YZoTlvG5gBLVxcdHKllVjeF0eaBVfCvUkcM9LX1iXc+d4SmoIfPmKm0hhdJFsjM+Vw+UNmasYFdkbWTmjhVmw+ZEa60cVLK8o68IpnJnvoIb2q6ikNWSepbTMWfyLBmavfdodWvpp6k1vaIaZwKGUXZjzpXl3UjpBds+HFle8TkUmxNxV3pUXERxrn4/gejzMiyKxu5A7QwJU5iO5Xc+HB4qMem9KcFsQjcHSkAhpLmB007slupoXG/D/XPPCD+ydTnFaLqz5mDUGJQi6J2OVAFzrHbnyRJrz0svvwEA6y6kxUlFVyx1YJId7tmrjsNj0etWVN9jekz2w0x3GC6f+TNvqyWJ6O2TsPdYFM5iF7VXDqnDv2Mptj0cnUn1kv5tVxPJmLFCUf+PcBuHX+mZfCwgmcyPXx9zqWIh/pvIxsDVhZrCSZ/zZenhnSQHDnIHPKoY4Zzhb5Ff4VZlDsIKz6bGqY2YDW+fsDK/4VZtisAiamsaPMGNQBDbP7/3CWXfKPMEPlm4ORlk9hVP93r/E6VJlj/rdHKd3mB1CtDe00fclza8EGVaJ0vY6P1BQoViMqlhGB89scrOIDXzELcFhXcxhbGpjRf33GlPjEVzyBqIGZclauZ8b/fdv/d5nZksqLMdI5L9v/4a3/AjMLt+q2c5nSG6p8qGykrlse1hniG19xnMksMy3CLcOmmwNKmLv5Oj7k0frTeN22H+M72PBYv+LclONrsQnjqVpW/kbV4f8AHefeSC51gZgAAAAASUVORK5CYII=',
'external_logo': 'https://awsmp-logos.s3.amazonaws.com/4675c3b9-6053-4a8c-8619-6519b83bbbfd/536ec8b5c79de08fcac1086fdf74f91b.png'
},
default_mapping="question").plot()
"""
Explanation: Icons as categorical mappings + glyph types
The most common form is mapping distinct values to icons.
Graphistry supports built-in + custom glyphs:
Built-in general glyphs: Use values from Font Awesome 4 or, more explicitly, fa-thename
Built-in flag icons: Use ISO3611-Alpha-2 values
Custom image URL
Custom image data URI (embedded)
End of explanation
"""
g.encode_point_icon(
'score',
as_text=True,
continuous_binning=[
[33, 'low'],
[66, 'mid'],
[200, 'high']
]).plot()
"""
Explanation: Icons as continuous mappings and text
You can also use value ranges to pick the glyph, and use text as the glyph
End of explanation
"""
g.encode_point_icon(
'score',
as_text=True,
continuous_binning=[
[33, 'low'],
[66, 'mid'],
[None, 'high']
],
default_mapping='?'
).plot()
"""
Explanation: Special continuous bins
For values bigger than the last bin, use None
For nulls, use the default mapping
End of explanation
"""
codes = pd.read_csv('https://raw.githubusercontent.com/datasets/country-codes/master/data/country-codes.csv')
codes.columns
country_to_iso_flag = {
o['CLDR display name']: 'flag-icon-' + o['ISO3166-1-Alpha-2'].lower()
for o in codes[['CLDR display name', 'ISO3166-1-Alpha-2']].dropna().to_dict('records')
}
g.encode_point_icon(
'origin',
shape="circle",
categorical_mapping=country_to_iso_flag,
default_mapping="question").plot()
"""
Explanation: Flag inference
The below code generates ISO3166 mappings from different conventions to Alpha-2
End of explanation
"""
|
yuhao0531/dmc
|
notebooks/week-3/01-basic ann.ipynb
|
apache-2.0
|
%matplotlib inline
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set(style="ticks", color_codes=True)
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import shuffle
"""
Explanation: Lab 3 - Basic Artificial Neural Network
In this lab we will build a very rudimentary Artificial Neural Network (ANN) and use it to solve some basic classification problems. This example is implemented with only basic math and linear algebra functions using Python's scientific computing library numpy. This will allow us to study how each aspect of the network works, and to gain an intuitive understanding of its functions. In future labs we will use higher-level libraries such as Keras and Tensorflow which automate and optimize most of these functions, making the network much faster and easier to use.
The code and MNIST test data is taken directly from http://neuralnetworksanddeeplearning.com/ by Michael Nielsen. Please review the first chapter of the book for a thorough explanation of the code.
First we import the Python libraries we will be using, including the random library for generating random numbers, numpy for scientific computing, matplotlib and seaborn for creating data visualizations, and several helpful modules from the sci-kit learn machine learning library:
End of explanation
"""
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs for later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward (self, a):
"""Return the output of the network if "a" is input. The np.dot()
function computes the matrix multiplication between the weight and input
matrices for each set of layers. When used with numpy arrays, the '+'
operator performs matrix addition."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The "training_data" is a list of tuples
"(x, y)" representing the training inputs and the desired
outputs. The other non-optional parameters specify the number
of epochs, size of each mini-batch, and the learning rate.
If "test_data" is provided then the network will be evaluated
against the test data after each epoch, and partial progress
printed out. This is useful for tracking progress, but slows
things down substantially."""
# create an empty array to store the accuracy results from each epoch
results = []
n = len(training_data)
if test_data:
n_test = len(test_data)
# this is the code for one training step, done once for each epoch
for j in xrange(epochs):
# before each epoch, the data is randomly shuffled
random.shuffle(training_data)
# training data is broken up into individual mini-batches
mini_batches = [ training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size) ]
# then each mini-batch is used to update the parameters of the
# network using backpropagation and the specified learning rate
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
# if a test data set is provided, the accuracy results
# are displayed and stored in the 'results' array
if test_data:
num_correct = self.evaluate(test_data)
accuracy = "%.2f" % (100 * (float(num_correct) / n_test))
print "Epoch", j, ":", num_correct, "/", n_test, "-", accuracy, "% acc"
results.append(accuracy)
else:
print "Epoch", j, "complete"
return results
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The "mini_batch" is a list of tuples "(x, y)", and "eta"
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
"""Note that the variable l in the loop below is used a little
differently to the notation in Chapter 2 of the book. Here,
l = 1 means the last layer of neurons, l = 2 is the
second-last layer, and so on. It's a renumbering of the
scheme in the book, used here to take advantage of the fact
that Python can use negative indices in lists."""
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation.
Numpy's argmax() function returns the position of the
largest element in an array. We first create a list of
predicted value and target value pairs, and then count
the number of times those values match to get the total
number correct."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
"""
Explanation: Next, we will build the artificial neural network by defining a new class called Network. This class will contain all the data for our neural network, as well as all the methods we need to compute activations between each layer, and train the network through backpropagation and stochastic gradient descent (SGD).
End of explanation
"""
def sigmoid(z):
# The sigmoid activation function.
return 1.0/(1.0 + np.exp(-z))
def sigmoid_prime(z):
# Derivative of the sigmoid function.
return sigmoid(z)*(1-sigmoid(z))
"""
Explanation: Finally, we define two helper functions which compute the sigmoid activation function and it's derivative which is used in backpropagation.
End of explanation
"""
iris_data = sns.load_dataset("iris")
# randomly shuffle data
iris_data = shuffle(iris_data)
# print first 5 data points
print iris_data[:5]
# create pairplot of iris data
g = sns.pairplot(iris_data, hue="species")
"""
Explanation: Iris dataset example
Now we will test our basic artificial neural network on a very simple classification problem. First we will use the seaborn data visualization library to load the 'iris' dataset,
which consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor), with four features measuring the length and the width of each flower's sepals and petals. After we load the data we will vizualize it using a pairwise plot using a buit-in function in seaborn. A pairwise plot is a kind of exploratory data analysis that helps us to find relationships between pairs of features within a multi-dimensional data set. In this case, we can use it to understand which features might be most useful for determining the species of the flower.
End of explanation
"""
# convert iris data to numpy format
iris_array = iris_data.as_matrix()
# split data into feature and target sets
X = iris_array[:, :4].astype(float)
y = iris_array[:, -1]
# normalize the data per feature by dividing by the maximum value in each column
X = X / X.max(axis=0)
# convert the textual category data to integer using numpy's unique() function
_, y = np.unique(y, return_inverse=True)
# convert the list of targets to a vertical matrix with the dimensions [1 x number of samples]
# this is necessary for later computation
y = y.reshape(-1,1)
# combine feature and target data into a new python array
data = []
for i in range(X.shape[0]):
data.append(tuple([X[i].reshape(-1,1), y[i][0]]))
# split data into training and test sets
trainingSplit = int(.7 * len(data))
training_data = data[:trainingSplit]
test_data = data[trainingSplit:]
# create an instance of the one-hot encoding function from the sci-kit learn library
enc = OneHotEncoder()
# use the function to figure out how many categories exist in the data
enc.fit(y)
# convert only the target data in the training set to one-hot encoding
training_data = [[_x, enc.transform(_y.reshape(-1,1)).toarray().reshape(-1,1)] for _x, _y in training_data]
# define the network
net = Network([4, 32, 3])
# train the network using SGD, and output the results
results = net.SGD(training_data, 30, 10, 0.2, test_data=test_data)
# visualize the results
plt.plot(results)
plt.ylabel('accuracy (%)')
plt.ylim([0,100.0])
plt.show()
"""
Explanation: Next, we will prepare the data set for training in our ANN. Here is a list of operations we need to perform on the data set so that it will work with the Network class we created above:
Convert data to numpy format
Normalize the data so that each features is scaled from 0 to 1
Split data into feature and target data sets by extracting specific rows from the numpy array. In this case the features are in the first four columns, and the target is in the last column, which in Python we can access with a negative index
Recombine the data into a single Python array, so that each entry in the array represents one sample, and each sample is composed of two numpy arrays, one for the feature data, and one for the target
Split this data set into training and testing sets
Finally, we also need to convert the targets of the training set to 'one-hot' encoding (OHE). OHE takes each piece of categorical data and converts it to a list of binary values the length of which is equal to the number of categories, and the position of the current category denoted with a '1' and '0' for all others. For example, in our dataset we have 3 possible categories: versicolor, virginica, and setosa. After applying OHE, versicolor becomes [1,0,0], virginica becomes [0,1,0], and setosa becomes [0,0,1]. OHE is often used to represent target data in neural networks because it allows easy comparison to the output coming from the network's final layer.
End of explanation
"""
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
"""
Explanation: MNIST dataset example
Next, we will test our ANN on another, slightly more difficult classification problem. The data set we'll be using is called MNIST, which contains tens of thousands of scanned images of handwritten digits, classified according to the digit type from 0-9. The name MNIST comes from the fact that it is a Modified (M) version of a dataset originally developed by the United States' National Institute of Standards and Technology (NIST). This is a very popular dataset used to measure the effectiveness of Machine Learning models for image recongnition. This time we don't have to do as much data management since the data is already provided in the right format here.
We will get into more details about working with images and proper data formats for image data in later labs, but you can already use this data to test the effectiveness of our network. With the default settings you should be able to get a classification accuracy of 95% in the test set.
note: since this is a much larger data set than the Iris data, the training will take substantially more time.
End of explanation
"""
img = training_data[0][0][:,0].reshape((28,28))
fig = plt.figure()
plt.imshow(img, interpolation='nearest', vmin = 0, vmax = 1, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
net = Network([784, 30, 10])
results = net.SGD(training_data, 30, 10, 3.0, test_data=test_data)
plt.plot(results)
plt.ylabel('accuracy (%)')
plt.ylim([0,100.0])
plt.show()
"""
Explanation: We can use the matplotlib library to visualize one of the training images. In the data set, the pixel values of each 28x28 pixel image is encoded in a straight list of 784 numbers, so before we visualize it we have to use numpy's reshape function to convert it back to a 2d matrix form
End of explanation
"""
%matplotlib inline
import random
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set(style="ticks", color_codes=True)
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import shuffle
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward (self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
results = []
n = len(training_data)
if test_data:
n_test = len(test_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [ training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size) ]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
num_correct = self.evaluate(test_data)
accuracy = "%.2f" % (100 * (float(num_correct) / n_test))
print "Epoch", j, ":", num_correct, "/", n_test, "-", accuracy, "% acc"
results.append(accuracy)
else:
print "Epoch", j, "complete"
return results
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
def sigmoid(z):
return 1.0/(1.0 + np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
wine_data = np.loadtxt(open("./data/wine.csv","rb"),delimiter=",")
wine_data = shuffle(wine_data)
X = wine_data[:,1:]
y = wine_data[:, 0]
X = X / X.max(axis=0)
_, y = np.unique(y, return_inverse=True)
y = y.reshape(-1,1)
data = []
for i in range(X.shape[0]):
data.append(tuple([X[i].reshape(-1,1), y[i][0]]))
trainingSplit = int(.8 * len(data))
training_data = data[:trainingSplit]
test_data = data[trainingSplit:]
enc = OneHotEncoder()
enc.fit(y)
training_data = [[_x, enc.transform(_y.reshape(-1,1)).toarray().reshape(-1,1)] for _x, _y in training_data]
net = Network([13, 30, 3])
results = net.SGD(training_data, 30, 2, 1.5, test_data=test_data)
plt.plot(results)
plt.ylabel('accuracy (%)')
plt.ylim([0,100.0])
plt.show()
"""
Explanation: Assignment 3 - classification
Now that you have a basic understanding of how an artificial neural network works and have seen it applied to a classification task using two types of data, see if you can use the network to solve another classification problem using another data set.
In the week-3 folder there is a data set called wine.csv which is another common data set used to test classification capabilities of machine learning algorithms. You can find a description of the data set here:
https://archive.ics.uci.edu/ml/datasets/Wine
The code below uses numpy to import this .csv file as a 2d numpy array. As before, we first shuffle the data set, and then split it into feature and target sets. This time, the target is in the first column of the data, with the rest of the columns representing the 13 features.
From there you should be able to go through and format the data set in a similar way as we did for the Iris data above. Remember to split the data into both training and test sets, and encode the training targets as one-hot vectors. When you create the network, make sure to specify the proper dimensions for the input and output layer so that it matches the number of features and target categories in the data set. You can also experiment with different sizes for the hidden layer. If you are not achieving good results, try changing some of the hyper-parameters, including the size and quantity of hidden layers in the network specification, and the number of epochs, the size of a mini-batch, and the learning rate in the SGD function call. With a training/test split of 80/20 you should be able to achieve 100% accuracy Within 30 epochs.
Remeber to commit your changes and submit a pull request when you are done.
Hint: do not be fooled by the category labels that come with this data set! Even though the labels are already integers (1,2,3) we need to always make sure that our category labels are sequential integers and start with 0. To make sure this is the case you should always use the np.unique() function on the target data as we did with the Iris example above.
End of explanation
"""
|
zerothi/sids
|
docs/tutorials/tutorial_es_2.ipynb
|
lgpl-3.0
|
graphene = geom.graphene()
H = Hamiltonian(graphene)
H.construct([(0.1, 1.44), (0, -2.7)])
"""
Explanation: Berry phase calculation for graphene
This tutorial will describe a complete walk-through of how to calculate the Berry phase for graphene.
Creating the geometry to investigate
Our system of interest will be the pristine graphene system with the on-site terms shifted by $\pm\delta$.
End of explanation
"""
H_bp = H.copy() # an exact copy
H_bp[0, 0] = 0.1
H_bp[1, 1] = -0.1
"""
Explanation: H now contains the pristine graphene tight-binding model. The anti-symmetric Hamiltonian is constructed like this:
End of explanation
"""
band = BandStructure(H, [[0, 0.5, 0], [1/3, 2/3, 0], [0.5, 0.5, 0]], 400, [r"$M$", r"$K$", r"$M'$"])
band.set_parent(H)
band_array = band.apply.array
bs = band_array.eigh()
band.set_parent(H_bp)
bp_bs = band_array.eigh()
lk, kt, kl = band.lineark(True)
plt.xticks(kt, kl)
plt.xlim(0, lk[-1])
plt.ylim([-.3, .3])
plt.ylabel('$E-E_F$ [eV]')
for bk in bs.T:
plt.plot(lk, bk)
for bk in bp_bs.T:
plt.plot(lk, bk)
"""
Explanation: Comparing electronic structures
Before proceeding to the Berry phase calculation lets compare the band structure and DOS of the two models. The anti-symmetric Hamiltonian opens a gap around the Dirac cone. A zoom on the Dirac cone shows this.
End of explanation
"""
bz = MonkhorstPack(H, [41, 41, 1], displacement=[1/3, 2/3, 0], size=[.125, .125, 1])
bz_average = bz.apply.average # specify the Brillouin zone to perform an average
"""
Explanation: The gap opened is equal to the difference between the two on-site terms. In this case it equals $0.2\mathrm{eV}$. Lets, for completeness sake calculate the DOS close to the Dirac point for the two systems. To resolve the gap the distribution function (in this case the Gaussian) needs to have a small smearing value to ensure the states are not too spread and the gap smeared out.
End of explanation
"""
plt.scatter(bz.k[:, 0], bz.k[:, 1], 2);
plt.xlabel(r'$k_x$ [$b_x$]');
plt.ylabel(r'$k_y$ [$b_y$]');
"""
Explanation: The above MonkhorstPack grid initialization is creating a Monkhorst-Pack grid centered on the $K$ point with a reduced Brillouin zone size of $1/8$th of the entire Brillouin zone. Essentially this only calculates the DOS in a small $k$-region around the $K$-point. Since in this case we know the electronic structure of our system we can neglect all contributions from $k$-space away from the $K$-point because we are only interested in energies close to the Dirac-point.
Here the sampled $k$-points are plotted. Note how they are concentrated around $[1/3, -1/3]$ which is the $K$-point.
End of explanation
"""
E = np.linspace(-0.5, 0.5, 1000)
dist = get_distribution('gaussian', 0.03)
bz.set_parent(H)
plt.plot(E, bz_average.DOS(E, distribution=dist), label='Graphene');
bz.set_parent(H_bp)
plt.plot(E, bz_average.DOS(E, distribution=dist), label='Graphene anti');
plt.legend()
plt.ylim([0, None])
plt.xlabel('$E - E_F$ [eV]');
plt.ylabel('DOS [1/eV]');
"""
Explanation: Before proceeding to the Berry phase calculation we calculate the DOS in an energy region around the Dirac-point to confirm the band-gap.
End of explanation
"""
# Number of discretizations
N = 50
# Circle radius in 1/Ang
kR = 0.01
# Normal vector (in units of reciprocal lattice vectors)
normal = [0, 0, 1]
# Origo (in units of reciprocal lattice vectors)
origin = [1/3, 2/3, 0]
circle = BrillouinZone.param_circle(H, N, kR, normal, origin)
plt.plot(circle.k[:, 0], circle.k[:, 1]);
plt.xlabel(r'$k_x$ [$b_x$]')
plt.ylabel(r'$k_y$ [$b_y$]')
plt.gca().set_aspect('equal');
"""
Explanation: Berry phase calculation
To calculate the Berry phase we are going to perform a discretized integration of the Bloch states on a closed loop. We are going to calculate it around the $K$-point with a given radius. After having created the
End of explanation
"""
k = circle.tocartesian(circle.k)
plt.plot(k[:, 0], k[:, 1]);
plt.xlabel(r'$k_x$ [1/Ang]')
plt.ylabel(r'$k_y$ [1/Ang]')
plt.gca().set_aspect('equal');
"""
Explanation: The above plot shows a skewed circle because the $k$-points in the Brillouin zone object is stored in units of reciprocal lattice vectors. I.e. the circle is perfect in the reciprocal space. Note that the below Berry phase calculation ensures the loop is completed by also taking into account the first and last point.
To confirm that the circle is perfect in reciprocal space, we convert the $k$-points and plot again. Note also that the radius of the circle is $0.01\mathrm{Ang}^{-1}$.
End of explanation
"""
circle.set_parent(H)
print('Pristine graphene (0): {:.5f} rad'.format(electron.berry_phase(circle, sub=0)))
print('Pristine graphene (1): {:.5f} rad'.format(electron.berry_phase(circle, sub=1)))
print('Pristine graphene (:): {:.5f} rad'.format(electron.berry_phase(circle)))
circle.set_parent(H_bp)
print('Anti-symmetric graphene (0): {:.5f} rad'.format(electron.berry_phase(circle, sub=0)))
print('Anti-symmetric graphene (1): {:.5f} rad'.format(electron.berry_phase(circle, sub=1)))
print('Anti-symmetric graphene (:): {:.5f} rad'.format(electron.berry_phase(circle)))
"""
Explanation: Now we are ready to calculate the Berry phase. We calculate it for both graphene and the anti-symmetric graphene using only the first, second and both bands:
End of explanation
"""
kRs = np.linspace(0.001, 0.2, 70)
dk = 0.0001
bp = np.empty([2, len(kRs)])
for i, kR in enumerate(kRs):
circle = BrillouinZone.param_circle(H_bp, dk, kR, normal, origin)
bp[0, i] = electron.berry_phase(circle, sub=0)
circle_other = BrillouinZone.param_circle(utils.mathematics.fnorm(H_bp.rcell), dk, kR, normal, origin)
circle.k[:, :] = circle_other.k[:, :]
bp[1, i] = -electron.berry_phase(circle, sub=0)
plt.plot(kRs, bp[0, :], label=r'1/Ang');
plt.plot(kRs, bp[1, :], label=r'$b_i$');
plt.legend()
plt.xlabel(r'Integration radius [1/Ang]');
plt.ylabel(r'Berry phase [$\phi$]');
"""
Explanation: We now plot the Berry phase as a function of integration radius with a somewhat constant discretization. In addition we calculate the Berry phase in the skewed circle in reciprocal space and perfectly circular in the units of the reciprocal lattice vectors. This enables a comparison of the integration path.
End of explanation
"""
|
MilweeScience/Turner
|
Erosion_Turner.ipynb
|
mit
|
#importing what we'll need to use our data.
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt; plt.rcdefaults()
import matplotlib.pyplot as plt
"""
Explanation: Erosion: Here Today, Gone Tomorrow
This Juptyer Notebooks will allow for the graphing of erosion data. We will use the <b>Discovery Science - Erosion: Here Today, gone Tomorrow Module</b> to test soil erosion (Flash Player must be Enabled on the computer to run the Module).
Step 1 - Creating a Checkpoint
Create a checkpoint by clicking <b>File</b> ==> <b>Save and Checkpoint</b>. If you make a major mistake, you can click <u>File</u> ==> <u>Revert to Checkpoint</u> to reset the Jupyter Notebook online on Binder.org.
<span style="color:magenta">Pre-Lab Questions</span>
1. What is erosion? How is it different from weathering?
2. How does deposition relate to erosion and weathering?
Using the Erosion Module
We will use the Discovery Science - Erosion: Here Today, gone Tomorrow Module to test soil erosion. The erosion module allows you to adjust and see how different variables impact soil erosion. Variables you can adjust include <u>soil treatment</u>, <u>amount of water</u>, and <u>incline</u>. To start testing, make sure that you click on <b>Investigate</b>, type in your name, and select <b>Level 1</b>.
<u>Part 1: Determining the Effect of Plants</u>
<span style="color:blue">Making a Bar Graph of the Data for Soil Erosion with No Treatment and Low Incline</span>
Importing the Data
The next block of code imports the data that we will need for Juptyer Notebooks to construct graphs of the data you collect. You can begin to execute the cells using <b> Shift + Enter </b> to import the data set and continue.
End of explanation
"""
objects1 = ('Low Water', 'Medium Water', 'High Water')
y_pos = np.arange(len(objects1))
notreatment1 = [4,6,7]
plt.bar(y_pos, notreatment1, align='center', alpha=0.5, color ="b")
plt.xticks(y_pos, objects1)
plt.ylim(0,11)
plt.ylabel('Soil Runoff in Litres')
plt.title('Soil Erosion with No Soil Treatment and Low Incline')
plt.show()
"""
Explanation: This graph will display the results for soil erosion with no soil treatment and low incline, testing the effects of 'low', 'medium' and 'high' water. To run the block of code and all of the code blocks below, you will hit Shift + Enter.
End of explanation
"""
objects2 = ('Low Water', 'Medium Water', 'High Water')
y_pos = np.arange(len(objects2))
#Insert your data below. Make sure that the data is entered in the correct order.
#All the data is preset to 1 Litre. Order:
# Low Water
# Medium Water
# High Water
planterosion = [1,1,1]
plt.bar(y_pos, planterosion, align='center', alpha=0.5, color ="g")
plt.xticks(y_pos, objects2)
plt.ylim(0,11)
plt.ylabel('Soil Runoff in Litres')
plt.title('Soil Erosion with Plants and Low Incline')
plt.show()
"""
Explanation: <span style="color:magenta">Part 1 Questions - Low Incline with No Treatment</span>
3. What impact did increasing the water have on the soil erosion?
4. What was the erosion difference between the low water and high water?
5. How do you think adding plants will impact the amount of soil erosion?
<span style="color:green">Making a Bar Graph of the Data for Soil Erosion with Plants and Low Incline</span>
Using the "Eroison: Here Today, Gone Tomorrow" Module run tests for soil erosion for the <b>PLANTS</b> soil treatment with <b>LOW INCLINE</b> for 'low', 'medium' and 'high' water. Write down your results in a data table, then enter them in the code below where it says planterosion = [1,1,1].
End of explanation
"""
objects3 = ('Low Water - Low Incline', 'Low Water - Medium Incline', 'Low Water - Steep Incline', 'Medium Water - Low Incline', 'Medium Water - Medium Incline','Medium Water - Steep Incline','High Water - Low Incline', 'High Water - Medium Incline','High Water - Steep Incline')
y_pos = np.arange(len(objects3))
notreatment2 = [4,5,6,6,7,8,7,8,10]
plt.bar(y_pos, notreatment2, align='center', alpha=0.5, color ="cry")
plt.xticks(y_pos, objects3, rotation=90)
plt.ylim(0,11)
plt.ylabel('Soil Runoff in Litres')
plt.title('Soil Erosion with No Soil Treatment')
plt.show()
"""
Explanation: <span style="color:magenta">Part 1 Questions (cont) - Low Incline Erosion with Plants</span>
6. Comparing this graph to the first one, what impact did the plants have on erosion? Can you quantify this impact?
7. What effect do plants have on the amount of erosion? What application might this serve in the real-world?
<u>Part 2: Comparing Two Variables</u>
<span style="color:blue">Making a Bar Graph of the Data for Soil Erosion with No Treatment (Changing Water and Incline)</span>
This graph will display all results for soil erosion with no soil treatment including 'low', 'medium' and 'high' water and 'low', 'medium', and 'steep' incline.
End of explanation
"""
objects4 = ('Low Water - Low Incline', 'Low Water - Medium Incline', 'Low Water - Steep Incline', 'Medium Water - Low Incline', 'Medium Water - Medium Incline','Medium Water - Steep Incline','High Water - Low Incline', 'High Water - Medium Incline','High Water - Steep Incline')
y_pos = np.arange(len(objects4))
#Insert your data below in the [ ]. Make sure that the data is entered in the correct order.
#All the data is preset to 1 Litre. Order:
# Low Water - Low Incline
# Low Water - Medium Incline
# Low Water - Steep Incline
# Medium Water - Low Incline
# Medium Water - Medium Incline
# Medium Water - Steep Incline
# High Water - Low Incline
# High Water - Medium Incline
# High Water - Steep Incline
planterosion2 = [1,1,1,1,1,1,1,1,1]
plt.bar(y_pos, planterosion2, align='center', alpha=0.5, color ="gmk")
plt.xticks(y_pos, objects4, rotation=90)
plt.ylim(0,11)
plt.ylabel('Soil Runoff in Litres')
plt.title('Soil Erosion with Plants')
plt.show()
"""
Explanation: <span style="color:magenta">Part 2 Questions - Comparing Two Variables (Water and Incline) with No Soil Treatment</span>
8. What impact do you think changing the incline will have on the amount of soil erosion? How might this change how landscapers and architects build on a slope?
9. What variable do you think has the biggest impact, water or incline? Explain.
10. Why did we change only one variable for each test run?
<span style="color:green">Making a Bar Graph of the Data for Soil Erosion with Plants (Changing Water and Incline)</span>
Using the "Eroison: Here Today, Gone Tomorrow" Module run tests for soil erosion for the <b>PLANTS</b> soil treatment with for 'low', 'medium' and 'high' water and 'low', 'medium', and 'steep' incline. Write down your results in a data table, then enter them in the code below where it says planterosion2 = [1,1,1,1,1,1,1,1,1].
End of explanation
"""
objectsb = ('Medium Water - Low Incline - Sand', 'Medium Water - Low Incline - Silt', 'Medium Water - Low Incline - Sand/Silt Mix','Medium Water - Medium Incline - Sand', 'Medium Water - Medium Incline - Silt', 'Medium Water - Medium Incline - Sand/Silt Mix', 'Medium Water - Steep Incline - Sand', 'Medium Water - Steep Incline - Silt', 'Medium Water - Steep Incline - Sand/Silt Mix')
y_pos = np.arange(len(objectsb))
#Insert your data below in the [ ]. Make sure that the data is entered in the correct order.
#All the data is preset to 1 Litre. Order:
# Medium Water - Low Incline - Sand
# Medium Water - Low Incline - Silt
# Medium Water - Low Incline - Sand/Silt Mix
# Medium Water - Medium Incline - Sand
# Medium Water - Medium Incline - Silt
# Medium Water - Medium Incline - Sand/Silt Mix
# Medium Water - Steep Incline - Sand
# Medium Water - Steep Incline - Silt
# Medium Water - Steep Incline - Sand/Silt Mix
level2 = [1,1,1,1,1,1,1,1,1]
plt.bar(y_pos, level2, align='center', alpha=0.5, color =["#C5B358", "#782F40", "k"])
plt.xticks(y_pos, objectsb, rotation=90)
plt.ylim(0,11)
plt.ylabel('Soil Runoff in Litres')
plt.title('Soil Erosion with Plants & Medium Water (Level 2)')
plt.show()
"""
Explanation: <span style="color:magenta">Part 2 Questions (cont) - Comparing Two Variables with Plants</span>
11. Which 3 test runs have the same amount of runoff?
12. Why do you believe that the amount of runoff is the same for those 3 test runs?
<u>Erosion Bonus Section - Changing the Type of Soil</u>
Open up Eroison: Here Today, Gone Tomorrow again in a new tab. This time click on <b>Level 2</b>, which now allows you to also adjust the type of soil. Run run tests for soil erosion for the <u>PLANTS</u> soil treatment with <u>medium incline</u> for 'low', 'medium' and 'high' water and 'sand', 'silt', and 'Sand/Soil Mixture' Soil Types. Write down your results in a data table, then enter them in the code below where it says level2 = [1,1,1,1,1,1,1,1,1].
End of explanation
"""
|
csaladenes/blog
|
kendo romania/scripts/.ipynb_checkpoints/cleanerÜold-checkpoint.ipynb
|
mit
|
import pandas as pd, numpy as np, json
import members_loader, matches_loader, clubs_loader, point_utils, save_utils
"""
Explanation: Romania Kendo Stats
25 years of Kendo History in Romania, visualized
Data cleaning workbook
Created by Dénes Csala | 2019 | MIT License
For any improvement suggestions and spotted processing mistakes drop me a message on Facebook.
If you would like to have your country/club data visualized in a similar manner, or any other data visualization and analytics consultancy inquiries contact me at mail@csaladen.es
This workbook guides you through the data cleaning stage for the Romania Kendo Stats visualization. This is a multi-stage process, you will need access to the raw data (liaise with Secretary or other member in charge of data the Romanian Kendo Association), Python and Excel installed. Any Python packages will also be installed on the way, but we recommend using the Anaconda distribution of Python 3. If you would like to edit the visualization part, then you will need PowerBI Desktop.
The general structure of the repository is the following:
- /data
- /raw: this where you place the downloaded data from the official data source, sorted by years and competitions, only keep those that have relevant data for matches only
- /ocr: this is where the data gets saved after an OCR has been performed - this is necessary for some older files in image format
- /manual: this is where manually extracted matches from old image files get placed - they should follow the 2018 CN format, i.e. all matches in one sheet
- /export: this is where we save the dataformatted for loading into the viz
- /clean: this is where all the processed, cleaned data ends up - they should follow the 2018 CN format, i.e. all matches in one sheet
- /scripts: this is the main code repository for all data processing scripts
- /viz: this is where the visualization files get saved - they are created using PowerBI and load data from /data/clean
1. Load and clean members
This section reads and clean the RKA members list. Save as baseline.
End of explanation
"""
members=members_loader.get_members('../data/manual/Evidenta membrilor.xlsm')
"""
Explanation: First, download members data (Evidenta membrilor.xlsx) from the official data source, and create a macro-enabled Excel file from the Google Sheet. Then write a simple macro to extract the cell comments from the Club column in order to get info about club Transfers. Follow the instructions here. Save the new file as Evidenta membrilor.xlsm in the /data/manual folder. Use the members_loader module to process this file.
End of explanation
"""
members.head(2)
members_clean=members_loader.cleaner(members).reset_index(drop=False)
members_clean.to_csv('../data/clean/members.csv')
"""
Explanation: Members are loaded but a bit messy.
End of explanation
"""
matches={i:{} for i in range(1993,2019)}
competitions={
2018:['CR','CN','SL'],
2017:['CR','CN','SL'],
2016:['CR','CN','SL'],
2015:['CR','CN','SL'],
2014:['CR','CN','SL'],
2013:['CR','CN','SL'],
2012:['CR','CN'],
2011:['CR','CN'],
2010:['CR','CN'],
2009:['CR','CN'],
1998:['CR'],
1997:['CR'],
1993:['CR']
}
"""
Explanation: 2. Load and clean matches
Matches are loaded from excel sheets in the /data folder, organized by year and competition. We are always looking for match list data,the cleaner the better, the more concentrated the better. While this is not possible all the time, we have several demo import routines. These are stored in the matches_loader.py function library. While not all matches have textual data available, these will need to be processed through OCR first. Raw excel data that can be processed right away can be found in the /data/raw folder, while the processed ones in /data/ocr. We use a separate workbook, ocr.ipynb to walk you through the OCR process.
End of explanation
"""
for year in competitions:
for competition in competitions[year]:
matches[year][competition]=matches_loader.get_matches(year,competition)
"""
Explanation: 2.1. Load matches
End of explanation
"""
name_exceptions={'Atanasovski':'Atanasovski A. (MAC)',
'Dobrovicescu (SON)':'Dobrovicescu T. (SON)',
'Ianăș':'Ianăș F.',
'Crăciun (Tamang) Sujata':'Crăciun S.',
'Abe (Carțiș) Emilia':'Abe E.',
'Dinu (Ioniță) Claudia-Andreea':'Dinu A.',
'Mureșan (Egri) Melinda':'Mureșan M.',
'Grădișteanu (Gușu) Rebeca':'Grădișteanu R.',
'Józsa (Gușu) Rodiana':'Józsa R.',
'Arabadjiyski': 'Arabadjiyski A.',
'Dudaș Francisc Andrei':'Dudaș F.',
'Dudaș Francisc':'Dudaș F.',
'Mandia':'Mandia F.',
'Stanev':'Stanev A.',
'Mochalov':'Mochalov O.',
'Sozzi':'Sozzi A.',
'Crăciunel':'Crăciunel I.',
'Craciunel':'Crăciunel I.',
'Sagaev':'Sagaev L.',
'Buzás':'Búzás C.',
'Csala':'Csala T.',
'Dimitrov':'Dimitrov M.',
'Józsa':'Józsa L.',
'Creangă':'Creangă A.',
'Duțescu':'Duțescu M.',
'Furtună':'Furtună G.',
'Gârbea':'Gârbea I.',
'Stupu':'Stupu I.',
'Mahika-Voiconi':'Mahika-Voiconi S.',
'Mahika':'Mahika-Voiconi S.',
'Stanciu':'Stanciu F.',
'Vrânceanu':'Vrânceanu R.',
'Wolfs':'Wolfs J.',
'Ducarme':'Ducarme A.',
'Sbârcea':'Sbârcea B.',
'Mocian':'Mocian A.',
'Hatvani':'Hatvani L.',
'Dusan':'Dusan N.',
'Borota':'Borota V.',
'Tsushima':'Tsushima K.',
'Tráser':'Tráser T.',
'Colțea':'Colțea A.',
'Brîcov':'Brîcov A.',
'Yamamoto':'Yamamoto M.',
'Crăciun':'Crăciun D.'}
"""
Explanation: 2.2. Standardize names
Names in name_exceptions get replaced with their right hand side values before processing.
End of explanation
"""
name_equals={'Chirea M.':'Chirea A.',
'Ghinet C.':'Ghineț C.',
'Anghelescu A.':'Anghelescu M.',
'Domnița M.':'Domniță M.',
'Bejgu N.':'Beygu N.',
'Canceu A.':'Canceu Ad.',
'Dinu C.':'Dinu A.',
'Grapa D.':'Grapă D.',
'Cristea C.':'Cristea Că.',
'Cismas O.':'Cismaș O.',
'Garbea I.':'Gârbea I.',
'Vitali O.':'Oncea V.',
'Ah-hu W.':'Ah-hu S.',
'Horvát M.':'Horváth M.',
'Ionita A.':'Ioniță A.',
'Medvedschi I.':'Medvețchi I.',
'Mahika S.':'Mahika-Voiconi S.',
'Mate L.':'Máté L.',
'Hentea L.':'Hentea A.',
'Stupu I.':'Stupu A.',
'Ah-Hu S.':'Ah-hu S.',
'Alexa I.':'Alexa A.',
'Albert V.':'Albert J.',
'Angelescu M.':'Angelescu M.',
'Apostu D.':'Apostu T.',
'Brâcov A.':'Brîcov A.',
'Zaporojan R.':'Zaporojan O.',
'Vasile C.':'Vasile I.',
'Dițu I.':'Dițu A.',
'Tudor-Duicu C.':'Tudor D.',
'Sandu M.':'Sandu Mar.',
'Radulescu A.':'Rădulescu An.',
'Péter C.':'Péter Cso.',
'Movatz E.':'Movatz V.',
'Molinger B.':'Molinger P.',
'Mitelea C.':'Mițelea C.',
'Macavei I.':'Macaveiu A.',
'Macavei A.' : 'Macaveiu A.',
'Macaveiu I.' : 'Macaveiu A.',
'Luca T.':'Luca Tr.',
'Leca L.':'Leca F.',
'Gutu E.':'Guțu E.',
'Angelescu A.':'Angelescu M.',
'Mehelean L.':'Mahalean L.',
'Catoriu D.':'Cantoriu D.',
'Călina A.':'Călina C.',
'Ștefu I.' : 'Ștefu L.',
'Țarălungă A.' : 'Țarălungă D.',
'Buzás C.':'Búzás C.',
'Korenshi E.':'Korenschi E.',
'Pleșa R.':'Pleșea R.',
'Galos A.':'Galoș A.',
'Győrfi G.':'Györfi G.',
'Győrfi S.':'Györfi S.',
'Ghineț G.':'Ghineț C.',
'Hostina E.':'Hoștină E.',
'Hostină E.':'Hoștină E.',
'Ianăs F.':'Ianăș F.',
'Ianas F.':'Ianăș F.',
'Tamang S.':'Crăciun S.',
'Taralunga D.':'Țarălungă D.',
'Lacatus M.':'Lăcătuș M.',
'Máthé L.':'Máté L.',
'Burinaru A.':'Burinaru Al.',
'Nastase M.':'Năstase E.',
'Oprisan A.':'Oprișan A.',
'Pârlea A.':'Pîrlea A.',
'Parlea A.':'Pîrlea A.',
'Sabau D.':'Sabău D.',
'Spriu C.':'Spiru C.',
'Crețiu T.':'Crețiu-Codreanu T.',
'Crețiu M.':'Crețiu-Codreanu M.',
'Bíró S.':'Biró S.',
'Oprișan B.':'Oprișan A.',
'Székely J.':'Székely P.',
'Bărbulescu M.' : 'Bărbulescu E.',
'Bejenariu G.' : 'Bejenaru G.',
'Bojan V.' : 'Bojan Vl.',
'Moise A.' : 'Moise Ad.',
'Măgirdicean R.' : 'Magirdicean Ră.',
'Pall D.':'Páll D.',
'Stănculascu C.':'Stănculescu C.',
'Vrânceanu M.': 'Vrânceanu L.',
'Georgescu A.':'Georgescu An.',
'Wasicek V.':'Wasicheck W.',
'Wasicsec W.':'Wasicheck W.',
'Wasichek W.' : 'Wasicheck W.',
'Wasicsek W.':'Wasicheck W.',
'Zolfoghari A.':'Zolfaghari A.'}
"""
Explanation: Names in name_equals get replaced with their right hand side values after processing.
End of explanation
"""
name_doubles={
'Cristea Cristina':'Cristea Cr.',
'Cristea Călin-Ștefan':'Cristea Că.',
'Sandu Marius-Cristian':'Sandu Mar.',
'Sandu Matei-Serban':'Sandu Mat.',
'Sandu Matei':'Sandu Mat.',
'Georgescu Andrei':'Georgescu An.',
'Georgescu Alexandra':'Georgescu Al.',
'Péter Csongor':'Péter Cso.',
'Péter Csanád':'Péter Csa.',
'Luca Mihnea':'Luca Mihn.',
'Luca Mihai-Cătălin':'Luca Miha.',
'Luca':'Luca Miha.',
'Luca M':'Luca Miha.',
'Luca M.':'Luca Miha.',
'Luca Mihai':'Luca Miha.',
'Luca Traian-Dan':'Luca Tr.',
'Luca Tudor':'Luca Tu.',
'Canceu Anamaria':'Canceu An.',
'Canceu Adriana-Maria':'Canceu Ad.',
'Cioată Daniel-Mihai':'Cioată M.',
'Cioată Dragoș':'Cioată D.',
'Burinaru Alexandra':'Burinaru Al.',
'Burinaru Andreea':'Burinaru An.',
'Kovács Andrei':'Kovács An.',
'Kovács Alexandru':'Kovács Al.',
'Cristea Adrian':'Cristea Ad.',
'Cristea Andrei':'Cristea An.',
'Cristea A.':'Cristea An.',
'Ungureanu Nicolae Marius':'Ungureanu M.',
'Ungureanu Nicoleta':'Ungureanu N.',
'Vincze Vlad':'Vincze Vl.',
'Vincze Valentina':'Vincze Va.',
'Bojan Vladimir':'Bojan Vl.',
'Bojan Voicu':'Bojan Vo.',
'Crețiu Codreanu Matei':'Crețiu-Codreanu M.',
'Crețiu Codreanu Tudor':'Crețiu-Codreanu T.',
'Pop Mugurel Voicu':'Pop-Mugurel V.',
'Pop Mihai':'Pop M.',
'Moise Alexandru':'Moise Al.',
'Moise Adrian':'Moise Ad.',
'Rădulescu Andrei-Savin':'Rădulescu An.',
'Rădulescu Adrian':'Rădulescu Ad.',
'Magirdicean Romeo':'Magirdicean Ro.',
'Magirdicean Răzvan Ionuț':'Magirdicean Ră.'}
"""
Explanation: Names in name_doubles handle situation where the default name abbreviation might lead to duplicates.
End of explanation
"""
letter_norm={'ţ':'ț','ş':'ș','Ş':'Ș'}
def name_cleaner(name):
name=str(name)
if name in name_doubles:
return name_doubles[name]
else:
for letter in letter_norm:
name=name.replace(letter,letter_norm[letter])
if name in name_exceptions:
name=name_exceptions[name]
nc=name.replace(' ',' ').split('(')
rname=nc[0].strip()
rnames=rname.split(' ')
sname=rnames[0]+' '+rnames[1][0]+'.'
if sname in name_equals:
sname=name_equals[sname]
if sname in name_doubles:
print(name,sname)
return sname
"""
Explanation: Normalize Romanian characters, define name cleaner function to get Name IDs. Name ID are unique competitor names in the form of: Surname, First letter of Name. If the First Letter of Name leads to a non-unique ID, the second letter is taken, and so forth, until a unique ID is found. It gets contructed as follows:
1. If name in doubles return the solution directly
2. Normalize characters
3. If name is in exceptions, clean
4. Replace any double spaces, then split at ( (to split away club, if embedded in the name)
5. Split into Surname and Name, store in rnames
6. Store Surname N. in sname
7. If sname is in equals, clean
8. Retrun sname
End of explanation
"""
redflags_names=['-','—','—',np.nan,'. ()','— ','- -.','- -. (-)','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','R','S',
'Kashi','Sankon','București','Victorii:','Sakura','Taiken','Ikada','Sonkei','CRK','Museido',
'Ichimon','Bushi Tokukai 1','Competitori – Shiai-sha','Echipa - roşu','Numele şi prenumele',
'Victorii:','Victorii: 0','Victorii: 1','Victorii: 2','Victorii: 3','Victorii: 4',
'Victorii: 5','?','Kyobukan','2/5','2/6','3/8','Finala','Kyobukan (0/0/0)','―',
'(clasament final după meci de baraj)','CRK (Bucuresti)','Kaybukan','Isshin (Cluj)',
'Ikada (Bucureşti)','Kyobukan (Braşov)','Puncte:','KASHI','Budoshin','Isshin',
'— (—)','4. B.','4. Baraj: Stupu M - Hostina','4. Baraj: Moise KM - Korenschi M',
'Bushi Tokukai (2/8/17)','CRK 2 (1/6/14)', 'CRK 2','CRK 1','Loc I.:','Loc',
'Bushi Tokukai 2 (M Ciuc)','Echipa suport']
redflags_names2=['Bushi Tokukai','Eliminatoriu','finala','Finala','Fianala','Ikada','Ichimon','Pool',
'Locul ','Lotul ','Loc ','Grupa ','Isshin','Meciul ','Victorii:','L1','1','2','3','4','5','6','7','8','9','0']
"""
Explanation: Names equalling any string in redflags_names get thrown out of the final dataset.
Names containing any string in redflags_names2 get thrown out of the final dataset.
End of explanation
"""
def name_ok(name):
name=str(name)
if name=='nan': return False
if name not in redflags_names:
if np.array([i not in name for i in redflags_names2]).all():
return True
return False
"""
Explanation: Check is name is not in redflags. Ignore these entries.
End of explanation
"""
all_players={}
all_players_r={}
all_players_unsorted=set()
for year in matches:
for competition in matches[year]:
for match in matches[year][competition]:
for color in ['aka','shiro']:
name=match[color]['name']
all_players_unsorted.add(name)
if name_ok(name):
name=name_cleaner(name)
rname=match[color]['name']
if rname not in all_players_r:all_players_r[rname]=name
if name not in all_players: all_players[name]={}
if year not in all_players[name]:all_players[name][year]={'names':set()}
all_players[name][year]['names'].add(rname)
if 'shinpan' in match:
for color in ['fukushin1','shushin','fukushin2']:
aka=match['aka']['name']
shiro=match['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
rname=match['shinpan'][color]
all_players_unsorted.add(rname)
if name_ok(rname):
name=name_cleaner(rname)
if rname not in all_players_r:all_players_r[rname]=name
if name not in all_players: all_players[name]={}
if year not in all_players[name]:all_players[name][year]={'names':set()}
all_players[name][year]['names'].add(rname)
"""
Explanation: Process all names for standardization. Create 3 variables:
1. all_players: forward relationship: unclean name -> cleaned name
2. all_players_r: reverse relationship
3. all_players_unsorted: unique set of all names processed
Process both competitor and shinpan names.
End of explanation
"""
name_linker={}
for i in members_clean.index:
name=members_clean.loc[i]['name']
try:
cname=name_cleaner(name)
except:
print(name)
if cname not in name_linker:name_linker[cname]=set()
name_linker[cname].add(name)
"""
Explanation: Link procesed to names in members. The name_linker dictionary contains Name IDs (short names) as keys and sets of long names as values. Ideally, this set should contain only one element, so that the mapping is unique.
End of explanation
"""
names_abbr={}
for name in name_linker:
if len(name_linker[name])>1:
#only for dev to create exceptions for duplicate person names.
print(name,name_linker[name])
for i in name_linker[name]:
names_abbr[i]=name
"""
Explanation: Do the opposite mapping in names_abbr: long->short. Create exceptions for duplicate names.
End of explanation
"""
names_abbr_list=[]
name_abbr2long={}
name_abbr2club={}
for i in members_clean.index:
name=members_clean.loc[i]['name']
club=members_clean.loc[i]['club']
year=members_clean.loc[i]['year']
names_abbr_list.append(names_abbr[name])
name_abbr2long[names_abbr[name]]=name
if names_abbr[name] not in name_abbr2club:name_abbr2club[names_abbr[name]]={}
if year not in name_abbr2club[names_abbr[name]]:
name_abbr2club[names_abbr[name]][year]=club
"""
Explanation: Save club mappings by short name, by year.
End of explanation
"""
members_clean['name_abbr']=names_abbr_list
"""
Explanation: Add short names to members_clean.
End of explanation
"""
for name in all_players:
if name not in name_abbr2long:
#infer using longest available name
names={len(j):j for i in all_players[name] for j in all_players[name][i]['names']}
if len(names)>0:
inferred_name=names[max(names.keys())]
if '(' in inferred_name:
inferred_name=inferred_name[:inferred_name.find('(')-1]
name_abbr2long[name]=inferred_name
"""
Explanation: Some names appear in the short form, we need to add them manually to the long list. We parse through all forms in which the name appears, and choose the longest. We call this the inferred name.
End of explanation
"""
def levenshteinDistance(s1, s2):
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
nkeys=np.sort(list(name_abbr2long.keys()))
for ii in range(len(name_abbr2long)):
i=nkeys[ii]
for jj in range(ii):
j=nkeys[jj]
if levenshteinDistance(name_abbr2long[i],name_abbr2long[j])<4:
print(name_abbr2long[i],':',name_abbr2long[j],' - ',i,':',j)
nkeys=np.sort(list(name_abbr2long.keys()))
for ii in range(len(name_abbr2long)):
i=nkeys[ii]
for jj in range(ii):
j=nkeys[jj]
if levenshteinDistance(i,j)<3:
print(i,':',j,' - ',name_abbr2long[i],':',name_abbr2long[j])
"""
Explanation: Infer duplicates
End of explanation
"""
redflags_clubs=['','N/A','RO1','RO2']
club_equals=clubs_loader.club_equals
"""
Explanation: 2.3. Infer clubs
Infer clubs from name if club is part of name in the competition. Club names in redflags_clubs get ignored. Clubs in club_equals get replaced after processing. The convention is to have 3 letter all-caps club names for Romanian clubs, 3 letter club names followed by a / and a two letter country code for foreign clubs.
End of explanation
"""
for name in all_players:
#if we dont already know the club for this year from the members register
if name not in name_abbr2club:
for year in all_players[name]:
for name_form in all_players[name][year]['names']:
if '(' in name_form:
club=name_form.split('(')[1].strip()[:-1]
if club not in redflags_clubs:
if name not in name_abbr2club:name_abbr2club[name]={}
name_abbr2club[name][year]=club
else:
for year in all_players[name]:
#else if no club info for particular year
if year not in name_abbr2club[name]:
for name_form in all_players[name][year]['names']:
if '(' in name_form:
club=name_form.split('(')[1].strip()[:-1]
if club not in redflags_clubs:
name_abbr2club[name][year]=club
"""
Explanation: Attach clubs to all_players who have it in their competition name data, but we don't already known from members.
End of explanation
"""
for name in name_abbr2club:
for year in name_abbr2club[name]:
if name_abbr2club[name][year] in club_equals:
name_abbr2club[name][year]=club_equals[name_abbr2club[name][year]]
for name in name_abbr2long:
name_abbr2long[name]=name_abbr2long[name].replace(' ',' ').strip()
"""
Explanation: Normalize club names and long names.
End of explanation
"""
manual_club_needed=set()
for name in all_players:
if name in name_abbr2club:
years=np.sort(list(all_players[name].keys()))
minyear1=min(years)
maxyear1=max(years)
minyear2=min(name_abbr2club[name].keys())
maxyear2=min(name_abbr2club[name].keys())
if len(years)>1:
for year in range(min(minyear1,minyear2),max(maxyear1,maxyear2)+1):
if year not in name_abbr2club[name]:
#get club from previous year
for y in range(years[0],year):
if y in name_abbr2club[name]:
name_abbr2club[name][year]=str(name_abbr2club[name][y])
break
if year not in name_abbr2club[name]:
#if still not found, get club from next year
for y in np.arange(years[-1],year,-1):
if y in name_abbr2club[name]:
name_abbr2club[name][year]=str(name_abbr2club[name][y])
break
if year not in name_abbr2club[name]:
#if still not found, get first known year
if year<minyear2:
name_abbr2club[name][year]=str(name_abbr2club[name][minyear2])
else:
name_abbr2club[name][year]=str(name_abbr2club[name][maxyear2])
else:
manual_club_needed.add(name)
"""
Explanation: If club still not found, fill the gaps between years. Forward fill first, then backward fill, if necessary.
End of explanation
"""
manual_name_needed=set()
#check if we dont have first name information, then flag for manual additions
for name in name_abbr2long:
names=name_abbr2long[name].split(' ')
if len(names)<2:
manual_name_needed.add(name)
elif len(names[1])<3:
manual_name_needed.add(name)
manual_data_override=pd.read_excel('../data/manual/members_manual.xlsx').set_index('name')
common_manual=set(manual_club_needed).intersection(set(manual_data_override.index))
manual_data_override=manual_data_override.loc[common_manual]
manual_data_needed=[]
for i in manual_name_needed.union(manual_club_needed):
if i not in list(manual_data_override.index):
dummy={'name':i,'long_name':'','club':''}
if i in name_abbr2club:
dummy['club']=name_abbr2club[name][max(list(name_abbr2club[name].keys()))]
if i in manual_club_needed:
if i in name_abbr2long:
dummy['long_name']=name_abbr2long[i]
manual_data_needed.append(dummy)
df=pd.DataFrame(manual_data_needed).set_index('name')
df=pd.concat([manual_data_override,df]).drop_duplicates().sort_index()
df.to_excel('../data/manual/members_manual.xlsx')
"""
Explanation: We have extracted what was possible from the data. Now we do a save of short name to long name and club mappings (by year). We then edit this file manually, if necessary.
2.4. Manual club and long name overrides
End of explanation
"""
for i in df['long_name'].replace('',np.nan).dropna().index:
name_abbr2long[i]=df.loc[i]['long_name']
all_players_r[name_abbr2long[i]]=i
manual_club_needed=set()
for name in all_players:
years=np.sort(list(all_players[name].keys()))
minyear=min(years)
maxyear=max(years)
for year in range(minyear,maxyear+1):
if name not in name_abbr2club:name_abbr2club[name]={}
if year not in name_abbr2club[name]:
if name in df['club'].replace('',np.nan).dropna().index:
name_abbr2club[name][year]=df.loc[name]['club']
else:
name_abbr2club[name][year]='XXX'
"""
Explanation: Extend with manual data
End of explanation
"""
unregistered_members=[]
for name in all_players:
if name not in set(members_clean['name_abbr'].values):
years=np.sort(list(name_abbr2club[name].keys()))
for year in range(min(years),max(years)+1):
if year in all_players[name]:
iyear=year
else:
iyear=max(years)
club,country=clubs_loader.club_cleaner(name_abbr2club[name][year])
if country=='RO':
activ='Active'
dan=''#dan=0
else:
activ='Abroad'
dan=''
unregistered_members.append({'name':name_abbr2long[name],'name_abbr':name,
'club':club,'active':activ,'year':year,'dan':dan,'country':country,'source':'matches'})
members_clean['country']='RO'
members_clean['source']='member list'
members_updated=pd.concat([members_clean,pd.DataFrame(unregistered_members)]).reset_index(drop=True)
"""
Explanation: Update and overwrite with club existence data
3. Update members
Extend members data with data mined from matches
Extend members with unregistered members. Probably inactive now, or from abroad. Only that one year when he appared in competition. But we only register them as known to be active that year. This is in ontrast with the Inactive members from the registry, for whom we know when did they go inactive.
End of explanation
"""
members_mu_dan_extensions=[]
members_by_name=members_updated.set_index(['name_abbr'])
for year in matches:
members_by_year=members_updated.set_index(['year']).loc[year]
for competition in matches[year]:
print(year,competition)
for k in matches[year][competition]:
aka=k['aka']['name']
shiro=k['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
for a in ['aka','shiro']:
for h in k[a]:
if h=='name':
name=k[a][h]
rname=all_players_r[name]
if rname in list(members_by_name.index):
if rname not in members_by_year['name_abbr'].values:
dummy=members_by_name.loc[[rname]]
minyear=min(dummy['year'])
maxyear=max(dummy['year'])
if year>maxyear:
dummy=dummy[dummy['year']==maxyear]
yeardiff=min(dummy['year'])-year
else:
dummy=dummy[dummy['year']==minyear]
yeardiff=year-max(dummy['year'])
dummy=dummy.reset_index()
dummy['year']=year
dummy['dan']=0
dummy['age']=dummy['age']+yeardiff
dummy['source']='matches, mu dan'
members_mu_dan_extensions.append(dummy)
#if only appears in competition in one year, then not in members table
else:
print(rname,year)
#fix in unregistered_members
"""
Explanation: Extend 0 dan down to starting year.
End of explanation
"""
members_mu_dan_extensions=pd.concat(members_mu_dan_extensions)
members_updated=pd.concat([members_updated,members_mu_dan_extensions]).reset_index(drop=True)
"""
Explanation: Update members
End of explanation
"""
clubs=[]
pclubs=[]
countries=[]
for i in members_updated.index:
club=members_updated.loc[i]['club']
country=members_updated.loc[i]['country']
year=members_updated.loc[i]['year']
club,country=clubs_loader.club_cleaner(club,country)
club,pclub=clubs_loader.club_year(club,country,year)
clubs.append(club)
pclubs.append(pclub)
countries.append(country)
members_updated['club']=clubs
members_updated['pretty_club']=pclubs
members_updated['country']=countries
"""
Explanation: Prettify club names, and IDs
End of explanation
"""
manual_mf_data_override=pd.read_excel('../data/manual/members_mf_manual.xlsx')
manual_mf_data_needed=members_updated[(members_updated['gen']!='M')&(members_updated['gen']!='F')][['name_abbr','name']]\
.drop_duplicates()
df=manual_mf_data_needed#.merge(manual_mf_data_override[['name_abbr','gen']],'outer').drop_duplicates()
df.to_excel('../data/manual/members_mf_manual.xlsx')
"""
Explanation: Fix unknwown genders
End of explanation
"""
members_updated=members_updated.reset_index(drop=True).drop_duplicates()
gens=[]
for i in members_updated.index:
name=members_updated.loc[i]['name_abbr']
if name in list(df.index):
gens.append(df.loc[name])
else:
gens.append(members_updated.loc[i]['gen'])
members_updated['gen']=gens
"""
Explanation: Update members with manual gender data.
End of explanation
"""
members_updated.to_csv('../data/export/members.csv')
clubs_updated=members_updated.groupby(['club','country','pretty_club','year'])[['name_abbr']].count()
clubs_updated=clubs_updated.reset_index().set_index('club').join(clubs_loader.club_year_df['Oraș'])
clubs_updated.to_csv('../data/export/clubs.csv')
"""
Explanation: Save to /data/export.
End of explanation
"""
master_matches=[]
for year in matches:
members_by_year=members_updated.set_index(['year']).loc[year].drop_duplicates()
for competition in matches[year]:
print(year,competition)
for k in matches[year][competition]:
good=True
match={'year':year,'competition':competition}
match['match_category'],match['match_teams'],match['match_phase']=point_utils.match_cleaner(year,k['match_type'])
if 'shinpan' in k:
for color in ['fukushin1','shushin','fukushin2']:
if color in k['shinpan']:
if k['shinpan'][color] in all_players_r:
#normalize shinpan names
match[color]=name_abbr2long[all_players_r[k['shinpan'][color]]]
aka=k['aka']['name']
shiro=k['shiro']['name']
if (name_ok(aka)) and\
(name_ok(shiro)) and\
(name_cleaner(aka) in all_players) and\
(name_cleaner(shiro) in all_players):
for a in ['aka','shiro']:
points=''
for h in k[a]:
if h=='name':
name=k[a][h]
#normalize competitor names
rname=all_players_r[name]
df=members_by_year[members_by_year['name_abbr']==rname]
match[a+' name']=name_abbr2long[rname]
else:
point=k[a][h]
if str(point)=='nan': point=''
points=points+point
good=point_utils.point_redflags(points)
if good:
match[a+' point1'],match[a+' point2'],match[a+' points'],\
match[a+' hansoku'],match['encho']=point_utils.points_cleaner(points)
else:
good=False
if good:
if 'outcome' in k:
match['encho']=point_utils.outcome_cleaner(k['outcome'])
else:
match['encho']=False
match['winner'],match['difference']=point_utils.outcome_from_points(match['aka points'],match['shiro points'])
master_matches.append(match)
"""
Explanation: 4. Update matches
Update and save cleaned match data
End of explanation
"""
data=pd.DataFrame(master_matches).reset_index(drop=True)
save_utils.save(data)
"""
Explanation: Clean up and save matches for display
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/nuist/cmip6/models/sandbox-3/atmoschem.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nuist', 'sandbox-3', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: NUIST
Source ID: SANDBOX-3
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:34
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
abhay1/tf_rundown
|
notebooks/Introduction.ipynb
|
mit
|
import tensorflow as tf
# Create a tensorflow constant
hello = tf.constant("Hello World!")
# Print this variable as is
print(hello)
"""
Explanation: Introduction
Hello world!
tf.constant: A tensor flow constant! Can be a string, number or a tensor. Once the value for a constant is set, it can never change!
End of explanation
"""
# Create a new session
sess = tf.Session()
# Print the constant
print("Printing using Session.run()")
print(sess.run(hello))
# Also
print("Printing using eval() function")
print(hello.eval(session=sess))
"""
Explanation: Oops! That is not what we wanted! This is because the variable hello hasn't been evaluated yet. Tensorflow needs a session to run the graph in!
End of explanation
"""
# run addition and multiplication operations
a = tf.constant(25, tf.float32)
b = tf.constant(5, tf.float32)
with tf.Session() as sess:
print("A = %f"%sess.run(a))
print("B = %f"%sess.run(b))
print("A + B = %f"%sess.run(a+b))
print("A * B = %f"%sess.run(a*b))
print("A / B = %f"%sess.run(a/b))
print("A + B using tf add = %f"%sess.run(tf.add(a, b)))
print("A * B using tf multiply = %f"%sess.run(tf.multiply(a, b)))
"""
Explanation: Tensorflow math operations!
End of explanation
"""
# Run addition and multiplication with placeholders
c = tf.placeholder(tf.float32, shape=())
d = tf.placeholder(tf.float32, shape=())
sum = tf.add(c, d)
prod = tf.multiply(c, d)
with tf.Session() as sess:
print("Operations by feeding values")
print("C = %f"%sess.run(c, feed_dict={c: 4}))
print("D = %f"%sess.run(d, feed_dict={d: 6}))
print("Sum = %f"%sess.run(sum, feed_dict={c: 4, d: 6}))
print("Prod = %f"%sess.run(prod, feed_dict={c: 4, d: 6}))
# Matrix operations with placeholders
import numpy as np
mat1 = tf.placeholder(tf.float32, shape=(2,2))
mat2 = tf.placeholder(tf.float32, shape=(2,1))
matmul = tf.matmul(mat1, mat2)
with tf.Session() as sess:
print("Matrix multiplication using python lists as feed dict values")
print(sess.run(matmul, feed_dict={ mat1: [[1,2],[2,1]], mat2: [[1],[2]]}))
print("Matrix multiplication using numpyarrays as feed dict values")
print(sess.run(matmul, feed_dict={ mat1: np.array([[1,2],[2,1]]), mat2: np.array([[1],[2]])}))
"""
Explanation: Tensorflow placeholder is a promise to provide a value later (supplied/fed at execution time). For placeholders, an optional argument shape can be used to make sure the input dimensions matches the required tensor dimensions.
If this is missing or None (default), then the placeholder can accept any shape.
End of explanation
"""
|
hackthemarket/pystrat
|
DEN2_features.ipynb
|
gpl-3.0
|
# imports
import collections
import pandas as pd
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
import sys
import tensorflow as tf
import time
import os
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
import sim
# can we still sim?
_,B = sim.sim(P)
# plot NAV
B.NAV.plot()
P.head()
"""
Explanation: Feature R&D
Last time, we loaded data, saw that it was basically usable for simulation and then stored it.
Now, we look a bit closer at the data and look at how to best use it given the off-the-shelf tensor flow tools we'll try to apply.
End of explanation
"""
def prep_ml( u, show_plots=False ) :
# given universe, prep for ML: scale, center & generate moments
t0 = time.time()
log.info('scaling & centering...')
u.reset_index( inplace=True)
u.sort_values(['Sym','Date'],inplace=True)
u.Date = pd.to_datetime(u.Date)
u.set_index('Date',inplace=True)
# scale & center prices & volume
raw_scaled = u.groupby('Sym').transform( lambda x : (x - x.mean())/x.std())
u = pd.concat([ u.Sym, raw_scaled], axis=1)
# graphical sanity check
if (show_plots):
log.info('Plotting scaled & centered prices')
fig, ax = plt.subplots()
u.groupby('Sym')['Close'].plot(ax=ax)
log.info('completed scaling & centering in %d...',(time.time()-t0))
return u
Z = prep_ml(P,show_plots=True)
Z.head()
"""
Explanation: Looks like it did yesterday.
Ok, now we want to use some portion of this data to train simple ml models.
Let's define and run a function to normalize the data.
End of explanation
"""
print Z.shape
Z.drop(['Multiplier','Expiry','Strike', 'Fwd_Open', 'Fwd_COReturn'],axis=1, inplace=True)
print Z.shape
Z.head()
# let's get rid of NaNs from rolling windows
K = Z.dropna()
K.shape
K.head()
"""
Explanation: Let's clean-out uninteresting columns
End of explanation
"""
# first a quick look at the distribution of returns.
K.hist('Return',bins=100)
# now, where do we partition our classes?
q = K.Return.quantile([.333333,.666666]).values
print q
# let's add-in a 1-day Garman-Klass vol
#K['GK'] = np.sqrt(np.abs( 252 * ( (1/2) * (np.log( K.High/ K.Low) )**2 - (2 * np.log(2) - 1 ) * (np.log(K.Close/K.Open))**2 )))
K['SD'] = np.abs(K.SD)
#K['VOLARATIO'] = np.divide( K.GK , K.SD )
# let's classify date by doy and day of week
K['DOY'] = K.index.dayofyear
K['DOW'] = K.index.dayofweek
# let's encode the symbols
K['FSYM'], _ = pd.factorize(K.Sym)
# let's represent vol as ratio with ADV
K['VARATIO'] = np.abs(np.divide( K.Volume , K.ADV))
# let's create column of labels based on these values
K['Label'] = np.where( K.Fwd_Return <= q[0], 0,
np.where( K.Fwd_Return <= q[1], 1, 2))
# let's make sure labels look reasonable
print K.groupby('Label').size()
# Now that we have labels, let's get rid of fwd-looking values
K.drop(['Fwd_Return', 'Fwd_Close'],axis=1, inplace=True)
K.head()
# do we hava NaNs in our data?
K[K.isnull().any(axis=1)]
"""
Explanation: data
The data is currently tainted with a few forward-looking values, all tagged Fwd_* they will need to be excised from the training set and perhaps used to create the 'labels' for classification purposes. The data breaks down:
prices & moments: open, high, low, close; return, 20-day SD
volume, deltaV, 20-day ADV
Including the open, high, & low prices seem a bit heavy to me - a lot of parameters for limited information. Perhaps we can represent them better. One type of information we might hope to glean from them is the localized volatility. This could be usefully transformed using Garman-Klass or something similar:
$$ \sigma = \sqrt{ \frac{Z}{n} \sum
\left[ \textstyle\frac{1}{2}\displaystyle
\left( \log \frac{H_i}{L_i} \right)^2 - (2\log 2-1)
\left( \log \frac{C_i}{O_i} \right)^2 \right] }. $$
Z = Number of closing prices in a year, n = number of historical prices used for the volatility estimate.
For now, let's use the data as-is to establish a baseline and then see what else we can do.
model
Let's start simple with a linear model as baseline and then see if an off-the-shelf DNN says anything (more) interesting.
classes
What classifications will we create?
Let's see if the data advises otherwise, but it seems that we could break the universe of returns into three segments:
down
flat
up
So, let's figure out what values should partition the classes and then convert our fwd-looking return to the labels and get rid of the fwd-looking values entirely.
End of explanation
"""
# we'll set our testing/validation divide
TVDIVDATE = '2013-01-01'
# let's define which cols go where
RAW_COLS = ['FSYM', 'Open','High','Low','DOY','DOW','Close','Volume'] #
CALCD_COLS = ['ADV', 'DeltaV', 'Return', 'SD', 'VARATIO' ]# 'GK', 'VOLARATIO',
RAWNCALCD_COLS = RAW_COLS + CALCD_COLS
SMRAW_COLS = ['DOW','Close','Volume'] #
SRAWNCALCD_COLS = SMRAW_COLS + CALCD_COLS
Dataset = collections.namedtuple('Dataset', ['data', 'target'])
Ktrain = K[K.index<=TVDIVDATE].reset_index()
Kvlad = K[K.index>TVDIVDATE].reset_index()
# raw training/validations data sets
raw_train = Dataset(data=Ktrain[RAW_COLS],target=Ktrain.Label )
raw_vlad = Dataset(data=Kvlad[RAW_COLS],target=Kvlad.Label )
# calcd training/validations data sets
calcd_train = Dataset(data=Ktrain[CALCD_COLS],target=Ktrain.Label )
calcd_vlad = Dataset(data=Kvlad[CALCD_COLS],target=Kvlad.Label )
# raw+calcd training/validations data sets
rc_train = Dataset(data=Ktrain[RAWNCALCD_COLS],target=Ktrain.Label )
rc_vlad = Dataset(data=Kvlad[RAWNCALCD_COLS],target=Kvlad.Label )
# small raw training/validations data sets
smraw_train = Dataset(data=Ktrain[SMRAW_COLS],target=Ktrain.Label )
smraw_vlad = Dataset(data=Kvlad[SMRAW_COLS],target=Kvlad.Label )
# small raw+calcd training/validations data sets
src_train = Dataset(data=Ktrain[SRAWNCALCD_COLS],target=Ktrain.Label )
src_vlad = Dataset(data=Kvlad[SRAWNCALCD_COLS],target=Kvlad.Label )
print raw_train.data.tail()
print calcd_train.data.tail()
print rc_train.data.tail()
print src_train.data.tail()
# let's store these datasets
forsims = { 'src_train': src_train,
'src_vlad': src_vlad,
'Kvlad': Kvlad }
fname = 'forsims.pkl'
pickle.dump(forsims, open( fname, "wb"))
log.info('Wrote %s', fname)
forsims = None
"""
Explanation: now the data has been scaled and centered and we have both raw data (ie, prices, volume) and calculated data (ie, returns, sd, gk)
Let's partition into training and validation sets (80/20) and try a few different ways of packaging the data...
For training, we'll use three different data sets:
- raw only
- calculated only
- raw & calculated
We'll try to use the tfcontrib code to ease our ascent of the tensorflow learning curve.
End of explanation
"""
def _fitntestLinearClassifier( train, vlad, layers=None, model_dir='/tmp/model', steps=10 ):
# use off-the-shelf Linear classifier, returning accuracy and responses
fsize = len(train.data.columns)
nclasses = len(train.target.unique())
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=fsize)]
# Build model
classifier = tf.contrib.learn.LinearClassifier(feature_columns=feature_columns,
n_classes=nclasses,
model_dir=model_dir)
# Fit model.
classifier.fit(x=train.data, y=train.target, steps=steps)
# Evaluate accuracy.
result = classifier.evaluate(x=vlad.data, y=vlad.target)
print('Accuracy: {0:f}'.format(result["accuracy"]))
return result,classifier
def _fitntestDNN( train, vlad, layers=None, model_dir='/tmp/model', steps=10 ):
# build off-the-shelf network, train and validate
fsize = len(train.data.columns)
nclasses = len(train.target.unique())
if layers is None:
layers = [ fsize, fsize, fsize]
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=fsize)]
# Build 3 layer DNN with fsize, 2*fsize, fsize layers
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=layers,
n_classes=nclasses,
model_dir=model_dir)
# Fit model.
classifier.fit(x=train.data, y=train.target, steps=steps)
# Evaluate accuracy.
result = classifier.evaluate(x=vlad.data, y=vlad.target)
print('Accuracy: {0:f}'.format(result["accuracy"]))
return result,classifier
def _fitntestRandomForest( train, vlad, max_nodes=1024, steps=100, model_dir='/tmp/rf') :
# build fit & test random forest for input
fsize = len(train.data.columns)
nclasses = len(train.target.unique())
hparams = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_trees=nclasses, max_nodes=max_nodes, num_classes=nclasses, num_features=fsize)
classifier = tf.contrib.learn.TensorForestEstimator(hparams)
tdata = train.data.as_matrix().astype(np.float32)
ttgt = train.target.as_matrix().astype(np.float32)
vdata = vlad.data.as_matrix().astype(np.float32)
vtgt = vlad.target.as_matrix().astype(np.float32)
monitors = [tf.contrib.learn.TensorForestLossMonitor(10, 10)]
classifier.fit(x=tdata, y=ttgt, steps=steps, monitors=monitors)
result = classifier.evaluate(x=vdata, y=vtgt)#, steps=np.round(steps/10)
print('Accuracy: {0:f}'.format(result["accuracy"]))
return result,classifier
"""
Explanation: data is partitioned into training/validation sets across four feature sets
let's define some functions to use the models from tensorflow
End of explanation
"""
steps = 10
# use the linear classifier
raw_lc = _fitntestLinearClassifier( train=raw_train, vlad=raw_vlad, model_dir='/tmp/raw_lc', steps=steps)
calcd_lc = _fitntestLinearClassifier( train=calcd_train, vlad=calcd_vlad, model_dir='/tmp/calcd_lc',steps=steps)
rc_lc = _fitntestLinearClassifier( train=rc_train, vlad=rc_vlad, model_dir='/tmp/rc_lc', steps=steps)
smraw_lc = _fitntestLinearClassifier( train=smraw_train, vlad=smraw_vlad, model_dir='/tmp/smraw_lc', steps=steps)
src_lc = _fitntestLinearClassifier( train=src_train, vlad=src_vlad, model_dir='/tmp/src_lc', steps=steps)
"""
Explanation: Now let's train & test our model/featureset combinations
first the linear classifier
End of explanation
"""
# use the dnn
raw_dnn = _fitntestDNN( train=raw_train, vlad=raw_vlad,model_dir='/tmp/raw_dnn', steps=steps)
calcd_dnn = _fitntestDNN( train=calcd_train, vlad=calcd_vlad,model_dir='/tmp/calcd_dnn', steps=steps)
rc_dnn = _fitntestDNN( train=rc_train, vlad=rc_vlad,model_dir='/tmp/rc_dnn', steps=steps)
smraw_dnn = _fitntestDNN( train=smraw_train, vlad=smraw_vlad,model_dir='/tmp/smraw_dnn', steps=steps)
src_dnn = _fitntestDNN( train=src_train, vlad=src_vlad,model_dir='/tmp/src_dnn', steps=steps)
"""
Explanation: deep neural network
End of explanation
"""
# random forests
raw_rf = _fitntestRandomForest(train=raw_train, vlad=raw_vlad, model_dir='/tmp/raw_rf', steps=steps)
calcd_rf = _fitntestRandomForest(train=calcd_train, vlad=calcd_vlad, model_dir='/tmp/calcd_rf', steps=steps)
rc_rf = _fitntestRandomForest(train=rc_train, vlad=rc_vlad, model_dir='/tmp/rc_rf', steps=steps)
smraw_rf = _fitntestRandomForest(train=smraw_train, vlad=smraw_vlad, model_dir='/tmp/smraw_rf', steps=steps)
src_rf = _fitntestRandomForest(train=src_train, vlad=src_vlad, model_dir='/tmp/src_rf', steps=steps)
"""
Explanation: random forests
End of explanation
"""
# let's aggregate our results so far
results = pd.DataFrame( [ raw_lc[0],raw_dnn[0], calcd_lc[0],
calcd_dnn[0], rc_lc[0], rc_dnn[0], smraw_lc[0],smraw_dnn[0],src_lc[0],src_dnn[0],
smraw_rf[0],calcd_rf[0],src_rf[0],raw_rf[0],rc_rf[0]])
results['model'] = ['Linear', 'DNN','Linear', 'DNN','Linear', 'DNN','Linear',
'DNN','Linear', 'DNN','RandomForest','RandomForest','RandomForest','RandomForest','RandomForest']
results['features'] =['raw', 'raw','calcd','calcd','raw+calcd','raw+calcd','smraw',
'smraw','smraw+calcd','smraw+calcd','smraw','calcd','smraw+calcd','raw','raw+calcd']
results.sort_values('accuracy',inplace=True)
results
results.groupby('model').agg('median')
results.groupby('features').agg('median').sort_values('accuracy')
resdf = results[['model','features','accuracy']].sort_values(['model','features'])
resdf.set_index(resdf.features, inplace=True)
resdf.drop('features',axis=1,inplace=True)
print resdf
fig, ax = plt.subplots(figsize=(8,6))
for label, df in resdf.groupby('model'):
df.accuracy.plot(ax=ax, label=label)
plt.axhline(y=.333333,color='black')
plt.legend(loc='center right')
plt.title('accuracy by model')
"""
Explanation: let's aggregate & examine our results
End of explanation
"""
vdata = src_vlad.data.as_matrix().astype(np.float32)
vtgt = src_vlad.target.as_matrix().astype(np.float32)
p=src_rf[1].predict( x=vdata)
R = pd.DataFrame( {'predicted':p,'actual':vtgt})
R['dist'] = np.abs(R.actual-R.predicted)
# avg distance is meaningful. a null predictor should get about .88,
# so anything below provides some edge
print R.dist.mean()
#R
twos=R.dist[R.dist==2]
len(twos.index)/float(len(R.index))
#len(twos)
#len(R.index)
"""
Explanation: Let's take a moment to consider our results so far.
models
We've used three simple off the shelf models from tensorflow:
- linear classifier (tf.contrib.learn.LinearClassifier)
- DNN (tf.contrib.learn.DNNClassifier)
- random forests (tf.contrib.learn.TensorForestEstimator)
We've gotten limited results as seen in the table ('accuracy by model') just above. Basically, we expect a null-predictor to get a score of about 33.3% (the black line in the plot). We were able to beat that in 10 of our 15 cases.
The models matter. The DNN severely underperformed with a median accuracy of 29.6% - nearly 4% worse than the null predictor. Worse, it showed limited stability with four scores below par and then a wild 41% on the featureset that did worst for the other models. It's very possible that the hyperparameters are poorly chosen, the model is undertrained or I'm misusing the model in some other fashion, but as implemented it doesn't perform.
The linear model does well but isn't terribly stable at least compared with the random forests which were both consistently positive and consistent across featuresets though clearly showing a preference for more data.
featuresets
We've also created five different feature sets:
- raw (comprehensive) - all of our raw data (sym,dayofyear,dayofweek,open,high,low,close,volume)
- smraw (small) - a subset of the raw data (dow, close, volume)
- calculated - values calculated on raw data (ADV, DeltaV, Return, SD, VARATIO)
- raw+calculated - all data
- smraw+calculated - small raw set with calculated set
It's likely that a limited amount of raw data and then well-chosen calculated data is best, but it's hard to make sweeping assessments based on this limited study.
Up until now, we've only looked at one day and one symbol in a featureset. I'd like to look at stacking or enfolding the data so that we include several days of lookback values as inputs to the models. Another variant of this would be to provide a consistently-sized universe of more than one symbol at a time, possibly also with a lookback element.
Next steps for consideration
There remain numerous paths for progression. Some of these:
Feature improvements.
More calculated features - create more calculated features for model training
Extend our featuresets by adding historical data
Extend our featuresets by looking at more than one name at a time
Different models and/or different hyperparameters.
Drill-down a bit further on the results we've gotten so far
Complete the circuit: take our best results so far and see if we can integrate them into one of our existing strategies to see if such limited predictive ability might still yield meaningful results.
I like all these ideas and want to do them all. But as practitioners, we press on to complete the circuit knowing that we can return to these studies with some concrete results in hand.
In the next workbook, we look at our simple strategies in a bit greater detail with pyfolio.
Following that, we'll complete the circuit and include predictive information from the random forest model to our strategies to see if/how they might be improved even with this limited edge.
A brief look at the distance btw the average prediction and the actual followed by a brief look at random forest regression
End of explanation
"""
# in the spirit of minimizing distance, let's see if we calculate
# distance from regression as that could be meaningful improvement
def _fitntestRandomForest_Regr( train, vlad, max_nodes=1024, steps=10, model_dir='/tmp/rfr') :
# build fit & test random forest for input
fsize = len(train.data.columns)
nclasses = len(train.target.unique())
hparams = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_trees=nclasses, max_nodes=max_nodes, num_classes=nclasses, num_features=fsize)
classifier = tf.contrib.learn.TensorForestEstimator(hparams)
tdata = train.data.as_matrix().astype(np.float32)
ttgt = train.target.as_matrix().astype(np.float32)
vdata = vlad.data.as_matrix().astype(np.float32)
vtgt = vlad.target.as_matrix().astype(np.float32)
regressor = tf.contrib.learn.TensorForestEstimator(hparams)
monitors = [tf.contrib.learn.TensorForestLossMonitor(10, 10)]
regressor.fit(x=tdata, y=ttgt, steps=steps, monitors=monitors)
result = classifier.evaluate(x=vdata, y=vtgt)#, steps=np.round(steps/10)
print('Accuracy: {0:f}'.format(result["accuracy"]))
return result,classifier
src_rfr = _fitntestRandomForest(train=src_train, vlad=src_vlad, model_dir='/tmp/src_rfr', steps=100)
pr = src_rfr[1].predict( x=vdata)
RR = pd.DataFrame( {'predicted':pr,'actual':vtgt})
RR['dist'] = np.abs(RR.actual-RR.predicted)
#
# does regression beat the classifier for distance?
print RR.dist.mean()
twos=RR.dist[RR.dist==2]
print len(twos.index)/float(len(RR.index))
"""
Explanation: Does a random forests regression model outperform the classifier we've used?
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/bnu/cmip6/models/sandbox-1/seaice.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bnu', 'sandbox-1', 'seaice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Seaice
MIP Era: CMIP6
Institute: BNU
Source ID: SANDBOX-1
Topic: Seaice
Sub-Topics: Dynamics, Thermodynamics, Radiative Processes.
Properties: 80 (63 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:41
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Model
2. Key Properties --> Variables
3. Key Properties --> Seawater Properties
4. Key Properties --> Resolution
5. Key Properties --> Tuning Applied
6. Key Properties --> Key Parameter Values
7. Key Properties --> Assumptions
8. Key Properties --> Conservation
9. Grid --> Discretisation --> Horizontal
10. Grid --> Discretisation --> Vertical
11. Grid --> Seaice Categories
12. Grid --> Snow On Seaice
13. Dynamics
14. Thermodynamics --> Energy
15. Thermodynamics --> Mass
16. Thermodynamics --> Salt
17. Thermodynamics --> Salt --> Mass Transport
18. Thermodynamics --> Salt --> Thermodynamics
19. Thermodynamics --> Ice Thickness Distribution
20. Thermodynamics --> Ice Floe Size Distribution
21. Thermodynamics --> Melt Ponds
22. Thermodynamics --> Snow Processes
23. Radiative Processes
1. Key Properties --> Model
Name of seaice model used.
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of sea ice model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of sea ice model code (e.g. CICE 4.2, LIM 2.1, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Variables
List of prognostic variable in the sea ice model.
2.1. Prognostic
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the sea ice component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Seawater Properties
Properties of seawater relevant to sea ice
3.1. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Ocean Freezing Point Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant seawater freezing point, specify this value.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Resolution
Resolution of the sea ice grid
4.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid e.g. N512L180, T512L70, ORCA025 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning applied to sea ice model component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. Document the relative weight given to climate performance metrics versus process oriented metrics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Target
Is Required: TRUE Type: STRING Cardinality: 1.1
What was the aim of tuning, e.g. correct sea ice minima, correct seasonal cycle.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Simulations
Is Required: TRUE Type: STRING Cardinality: 1.1
*Which simulations had tuning applied, e.g. all, not historical, only pi-control? *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Metrics Used
Is Required: TRUE Type: STRING Cardinality: 1.1
List any observed metrics used in tuning model/parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.5. Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Which variables were changed during the tuning process?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Key Parameter Values
Values of key parameters
6.1. Typical Parameters
Is Required: FALSE Type: ENUM Cardinality: 0.N
What values were specificed for the following parameters if used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Additional Parameters
Is Required: FALSE Type: STRING Cardinality: 0.N
If you have any additional paramterised values that you have used (e.g. minimum open water fraction or bare ice albedo), please provide them here as a comma separated list
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Assumptions
Assumptions made in the sea ice model
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.N
General overview description of any key assumptions made in this model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. On Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
Note any assumptions that specifically affect the CMIP6 diagnostic sea ice variables.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Missing Processes
Is Required: TRUE Type: STRING Cardinality: 1.N
List any key processes missing in this model configuration? Provide full details where this affects the CMIP6 diagnostic sea ice variables?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the sea ice component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Provide a general description of conservation methodology.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Properties
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in sea ice by the numerical schemes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
For each conserved property, specify the output variables which close the related budgets. as a comma separated list. For example: Conserved property, variable1, variable2, variable3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.4. Was Flux Correction Used
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does conservation involved flux correction?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Corrected Conserved Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List any variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Grid --> Discretisation --> Horizontal
Sea ice discretisation in the horizontal
9.1. Grid
Is Required: TRUE Type: ENUM Cardinality: 1.1
Grid on which sea ice is horizontal discretised?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the type of sea ice grid?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the advection scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Thermodynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model thermodynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.5. Dynamics Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
What is the time step in the sea ice model dynamic component in seconds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional horizontal discretisation details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Sea ice vertical properties
10.1. Layering
Is Required: TRUE Type: ENUM Cardinality: 1.N
What type of sea ice vertical layers are implemented for purposes of thermodynamic calculations?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.2. Number Of Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using multi-layers specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional vertical grid details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Seaice Categories
What method is used to represent sea ice categories ?
11.1. Has Mulitple Categories
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Set to true if the sea ice model has multiple sea ice categories.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Number Of Categories
Is Required: TRUE Type: INTEGER Cardinality: 1.1
If using sea ice categories specify how many.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Category Limits
Is Required: TRUE Type: STRING Cardinality: 1.1
If using sea ice categories specify each of the category limits.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Ice Thickness Distribution Scheme
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the sea ice thickness distribution scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Other
Is Required: FALSE Type: STRING Cardinality: 0.1
If the sea ice model does not use sea ice categories specify any additional details. For example models that paramterise the ice thickness distribution ITD (i.e there is no explicit ITD) but there is assumed distribution and fluxes are computed accordingly.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Grid --> Snow On Seaice
Snow on sea ice details
12.1. Has Snow On Ice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow on ice represented in this model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12.2. Number Of Snow Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels of snow on ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Snow Fraction
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how the snow fraction on sea ice is determined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.4. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any additional details related to snow on ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamics
Sea Ice Dynamics
13.1. Horizontal Transport
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of horizontal advection of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Transport In Thickness Space
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice transport in thickness space (i.e. in thickness categories)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Ice Strength Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Which method of sea ice strength formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Redistribution
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which processes can redistribute sea ice (including thickness)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Rheology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Rheology, what is the ice deformation formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Thermodynamics --> Energy
Processes related to energy in sea ice thermodynamics
14.1. Enthalpy Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the energy formulation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Thermal Conductivity
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of thermal conductivity is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.3. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of heat diffusion?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.4. Basal Heat Flux
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method by which basal ocean heat flux is handled?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.5. Fixed Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If you have selected {Thermal properties depend on S-T (with fixed salinity)}, supply fixed salinity value for each sea ice layer.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.6. Heat Content Of Precipitation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which the heat content of precipitation is handled.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.7. Precipitation Effects On Salinity
Is Required: FALSE Type: STRING Cardinality: 0.1
If precipitation (freshwater) that falls on sea ice affects the ocean surface salinity please provide further details.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Thermodynamics --> Mass
Processes related to mass in sea ice thermodynamics
15.1. New Ice Formation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method by which new sea ice is formed in open water.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Ice Vertical Growth And Melt
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs the vertical growth and melt of sea ice.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Ice Lateral Melting
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the method of sea ice lateral melting?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.4. Ice Surface Sublimation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method that governs sea ice surface sublimation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.5. Frazil Ice
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of frazil ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16. Thermodynamics --> Salt
Processes related to salt in sea ice thermodynamics.
16.1. Has Multiple Sea Ice Salinities
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the sea ice model use two different salinities: one for thermodynamic calculations; and one for the salt budget?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 16.2. Sea Ice Salinity Thermal Impacts
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does sea ice salinity impact the thermal properties of sea ice?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Thermodynamics --> Salt --> Mass Transport
Mass transport of salt
17.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the mass transport of salt calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Thermodynamics --> Salt --> Thermodynamics
Salt thermodynamics
18.1. Salinity Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is salinity determined in the thermodynamic calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.2. Constant Salinity Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If using a constant salinity value specify this value in PSU?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.3. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the salinity profile used.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Thermodynamics --> Ice Thickness Distribution
Ice thickness distribution details.
19.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice thickness distribution represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Thermodynamics --> Ice Floe Size Distribution
Ice floe-size distribution details.
20.1. Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is the sea ice floe-size represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Please provide further details on any parameterisation of floe-size.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 21. Thermodynamics --> Melt Ponds
Characteristics of melt ponds.
21.1. Are Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are melt ponds included in the sea ice model?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.2. Formulation
Is Required: TRUE Type: ENUM Cardinality: 1.1
What method of melt pond formulation is used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21.3. Impacts
Is Required: TRUE Type: ENUM Cardinality: 1.N
What do melt ponds have an impact on?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22. Thermodynamics --> Snow Processes
Thermodynamic processes in snow on sea ice
22.1. Has Snow Aging
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has a snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Snow Aging Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow aging scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.3. Has Snow Ice Formation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.N
Set to True if the sea ice model has snow ice formation.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.4. Snow Ice Formation Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow ice formation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.5. Redistribution
Is Required: TRUE Type: STRING Cardinality: 1.1
What is the impact of ridging on snow cover?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.6. Heat Diffusion
Is Required: TRUE Type: ENUM Cardinality: 1.1
What is the heat diffusion through snow methodology in sea ice thermodynamics?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Processes
Sea Ice Radiative Processes
23.1. Surface Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used to handle surface albedo.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Ice Radiation Transmission
Is Required: TRUE Type: ENUM Cardinality: 1.N
Method by which solar radiation through sea ice is handled.
End of explanation
"""
|
fevangelista/wicked
|
examples/numerical/spinorbital-CCSD.ipynb
|
mit
|
import time
import wicked as w
import numpy as np
from examples_helpers import *
"""
Explanation: CCSD theory for a closed-shell reference
In this notebook we will use wicked to generate and implement equations for the CCSD method.
To simplify this notebook some of the utility functions are imported from the file examples_helpers.py.
In this example, we run a CCSD computation on the H<sub>6</sub> molecule, reading all the relevant information from the file sr-h6-sto-3g.npy.
End of explanation
"""
molecule = "sr-h6-sto-3g"
with open(f"{molecule}.npy", "rb") as f:
Eref = np.load(f)
nocc, nvir = np.load(f)
H = np.load(f, allow_pickle=True).item()
invD = compute_inverse_denominators(H, nocc, nvir, 2)
"""
Explanation: Read calculation information (integrals, number of orbitals)
We start by reading information about the reference state, integrals, and denominators from the file sr-h6-sto-3g.npy. The variable H is a dictionary that holds the blocks of the Hamiltonian normal-ordered with respect to the Hartree–Fock determinant. invD similarly is a dictionary that stores the denominators $(\epsilon_i + \epsilon_j + \ldots - \epsilon_a - \epsilon_b - \ldots)^{-1}$.
End of explanation
"""
# Compute the MP2 correlation energy
Emp2 = 0.0
for i in range(nocc):
for j in range(nocc):
for a in range(nvir):
for b in range(nvir):
Emp2 += 0.25 * H["oovv"][i][j][a][b] ** 2 * invD["oovv"][i][j][a][b]
print(f"MP2 correlation energy: {Emp2:.12f} Eh")
"""
Explanation: Compute the MP2 energy
To verify that the Hamiltonian is read correctly, we compute the MP2 correlation energy
End of explanation
"""
w.reset_space()
w.add_space("o", "fermion", "occupied", ["i", "j", "k", "l", "m", "n"])
w.add_space("v", "fermion", "unoccupied", ["a", "b", "c", "d", "e", "f"])
Top = w.op("T", ["v+ o", "v+ v+ o o"])
Hop = w.utils.gen_op("H", 1, "ov", "ov") + w.utils.gen_op("H", 2, "ov", "ov")
# the similarity-transformed Hamiltonian truncated to the four-nested commutator term
Hbar = w.bch_series(Hop, Top, 4)
"""
Explanation: Define orbital spaces and the Hamiltonian and cluster operators
Here we define the cluster operator (Top) and the Hamiltonian (Hop) that will be used to derive the CCSD equations. We also define the similarity-transformed Hamiltonian $\bar{H}$ truncated at the four-nested commutator:
\begin{equation}
\bar{H} = \hat{H} + [\hat{H},\hat{T}] + \frac{1}{2} [[\hat{H},\hat{T}],\hat{T}]
+ \frac{1}{6} [[[\hat{H},\hat{T}],\hat{T}],\hat{T}]
+ \frac{1}{24} [[[[\hat{H},\hat{T}],\hat{T}],\hat{T}],\hat{T}] + \ldots
\end{equation}
End of explanation
"""
wt = w.WickTheorem()
expr = wt.contract(w.rational(1), Hbar, 0, 4)
mbeq = expr.to_manybody_equation("R")
"""
Explanation: In the following lines, we apply Wick's theorem to simplify the similarity-transformed Hamiltonian $\bar{H}$ computing all contributions ranging from operator rank 0 to 4 (double substitutions).
Then we convert all the terms into many-body equations accumulated into the residual R.
End of explanation
"""
energy_eq = generate_equation(mbeq, 0, 0)
t1_eq = generate_equation(mbeq, 1, 1)
t2_eq = generate_equation(mbeq, 2, 2)
exec(energy_eq)
exec(t1_eq)
exec(t2_eq)
# show what do these functions look like
print(energy_eq)
"""
Explanation: Here we finally generate the CCSD equations. We use the utility function generate_equation to extract the equations corresponding to a given number of creation and annihilation operators and generated Python functions that we then define with the command exec
End of explanation
"""
Ecorr_ref = -0.107582941213 # from psi4numpy (H6)
T = {"ov": np.zeros((nocc, nvir)), "oovv": np.zeros((nocc, nocc, nvir, nvir))}
header = "Iter. Energy [Eh] Corr. energy [Eh] |R| "
print("-" * len(header))
print(header)
print("-" * len(header))
start = time.perf_counter()
maxiter = 50
for i in range(maxiter):
# 1. compute energy and residuals
R = {}
Ecorr_w = evaluate_residual_0_0(H, T)
Etot_w = Eref + Ecorr_w
R["ov"] = evaluate_residual_1_1(H, T)
Roovv = evaluate_residual_2_2(H, T)
R["oovv"] = antisymmetrize_residual_2_2(Roovv, nocc, nvir)
# 2. amplitude update
update_cc_amplitudes(T, R, invD, 2)
# 3. check for convergence
norm_R = np.sqrt(np.linalg.norm(R["ov"]) ** 2 + np.linalg.norm(R["oovv"]) ** 2)
print(f"{i:3d} {Etot_w:+.12f} {Ecorr_w:+.12f} {norm_R:e}")
if norm_R < 1.0e-8:
break
end = time.perf_counter()
t = end - start
print("-" * len(header))
print(f"CCSD total energy {Etot_w:+.12f} [Eh]")
print(f"CCSD correlation energy {Ecorr_w:+.12f} [Eh]")
print(f"Reference CCSD correlation energy {Ecorr_ref:+.12f} [Eh]")
print(f"Error {Ecorr_w - Ecorr_ref:+.12e} [Eh]")
print(f"Timing {t:+.12e} [s]")
assert np.isclose(Ecorr_w, Ecorr_ref)
"""
Explanation: CCSD algorithm
Here we code a simple loop in which we evaluate the energy and residuals of the CCSD equations and update the amplitudes
End of explanation
"""
|
GoogleCloudPlatform/tf-estimator-tutorials
|
04_Times_Series/02.0 - TF ARRegressor - Experiment + CSV.ipynb
|
apache-2.0
|
TIME_INDEX_FEATURE_NAME = 'time_index'
VALUE_FEATURE_NAMES = ['value']
"""
Explanation: Steps to use the ARRegressor + Experiment API
Define the metadata
Define a data (csv) input function
Define a create Estimator function
Run an Experiment with learn_runner to train, evaluate, and export the model
Predict using the estimator
Serve the saved model
1. Define Metadata
End of explanation
"""
def generate_input_fn(file_names, mode, skip_header_lines=1, batch_size = None, windows_size = None):
columns = {
ts.TrainEvalFeatures.TIMES: TIME_INDEX_FEATURE_NAME,
ts.TrainEvalFeatures.VALUES: VALUE_FEATURE_NAMES
}
reader = tf.contrib.timeseries.CSVReader(filenames=file_names,
column_names=columns,
skip_header_lines=skip_header_lines)
num_threads = multiprocessing.cpu_count() if MULTI_THREADING else 1
if mode == tf.estimator.ModeKeys.TRAIN:
input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader,
batch_size=batch_size,
window_size=windows_size,
num_threads= num_threads
)
elif mode == tf.estimator.ModeKeys.EVAL:
input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
return input_fn
"""
Explanation: 2. Define a Data Input Function
End of explanation
"""
def create_estimator(run_config, hparams):
estimator = ts.ARRegressor(
periodicities= hparams.periodicities,
input_window_size= hparams.input_window_size,
output_window_size= hparams.output_window_size,
num_features=len(VALUE_FEATURE_NAMES),
loss=hparams.loss,
hidden_layer_sizes = hparams.hidden_units,
# anomaly_prior_probability=hparams.anomaly_prob,
# anomaly_distribution=hparams.anomaly_dist,
optimizer = tf.train.AdagradOptimizer(learning_rate=hparams.learning_rate),
config=run_config
)
print("")
print("Estimator Type: {}".format(type(estimator)))
print("")
return estimator
"""
Explanation: 3. Define a Create Estimator Function
End of explanation
"""
CHECKPOINT_STEPS=1000
hparams = tf.contrib.training.HParams(
training_steps = 10000,
periodicities = [200],
input_window_size = 40,
output_window_size=10,
batch_size = 15,
anomaly_prob = 0.5,
anomaly_dist = "gaussian",
loss = tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS, # NORMAL_LIKELIHOOD_LOSS | SQUARED_LOSS
hidden_units = None,
learning_rate = 0.1
)
model_dir = 'trained_models/{}'.format(MODEL_NAME)
run_config = tf.contrib.learn.RunConfig(
save_checkpoints_steps=CHECKPOINT_STEPS,
tf_random_seed=19831060,
model_dir=model_dir
)
print("Model directory: {}".format(run_config.model_dir))
print("Hyper-parameters: {}".format(hparams))
print("")
"""
Explanation: 4. Run Experiment
a. Set hyper-params values
End of explanation
"""
def generate_experiment_fn(**experiment_args):
train_input_fn = generate_input_fn(
TRAIN_DATA_FILES,
skip_header_lines=1,
mode = tf.estimator.ModeKeys.TRAIN,
batch_size=hparams.batch_size,
windows_size = hparams.input_window_size + hparams.output_window_size
)
eval_input_fn = generate_input_fn(
TRAIN_DATA_FILES,
skip_header_lines=1,
mode = tf.estimator.ModeKeys.EVAL,
windows_size = hparams.input_window_size + hparams.output_window_size
)
def _experiment_fn(run_config, hparams):
estimator = create_estimator(run_config, hparams)
return tf.contrib.learn.Experiment(
estimator,
train_steps=hparams.training_steps,
eval_steps=1,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
**experiment_args
)
return _experiment_fn
"""
Explanation: b. Define Experiment Function
End of explanation
"""
if not RESUME_TRAINING:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
learn_runner.run(
experiment_fn=generate_experiment_fn(),
run_config=run_config,
schedule="train", #"train_and_evaluate"
hparams=hparams
)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
"""
Explanation: 5. Run the Experiment
End of explanation
"""
hparams.loss = tf.contrib.timeseries.ARModel.SQUARED_LOSS # NORMAL_LIKELIHOOD_LOSS | SQUARED_LOSS
estimator = create_estimator(run_config, hparams)
eval_input_fn = generate_input_fn(
file_names=TRAIN_DATA_FILES,
mode = tf.estimator.ModeKeys.EVAL,
)
tf.logging.set_verbosity(tf.logging.WARN)
evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
print("")
print(evaluation.keys())
print("")
print("Evaluation Loss ({}) : {}".format(hparams.loss, evaluation['loss']))
def compute_rmse(a, b):
rmse = np.sqrt(np.sum(np.square(a - b)) / len(a))
return rmse
def compute_mae(a, b):
rmse = np.sqrt(np.sum(np.abs(a - b)) / len(a))
return rmse
x_current = evaluation['times'][0]
y_current_actual = evaluation['observed'][0].reshape(-1)
y_current_estimated = evaluation['mean'][0].reshape(-1)
rmse = compute_rmse(y_current_actual, y_current_estimated)
mae = compute_mae(y_current_actual, y_current_estimated)
print("Evaluation RMSE {}".format(rmse))
print("Evaluation MAE {}".format(mae))
plt.figure(figsize=(20, 10))
plt.title("Time Series Data")
plt.plot(x_current, y_current_actual, label='actual')
plt.plot(x_current, y_current_estimated, label='estimated')
plt.xlabel("Time Index")
plt.ylabel("Value")
plt.legend(loc=2)
plt.show()
"""
Explanation: 6. Evalute the Estimator
End of explanation
"""
FORECAST_STEPS = [10,50,100,150,200,250,300]
tf.logging.set_verbosity(tf.logging.ERROR)
eval_input_fn = generate_input_fn(
file_names =TRAIN_DATA_FILES,
mode = tf.estimator.ModeKeys.EVAL
)
evaluation = estimator.evaluate(input_fn=eval_input_fn, steps=1)
df_test = pd.read_csv(TEST_DATA_FILE, names=['time_index','value'], header=0)
print("Test Dataset Size: {}".format(len(df_test)))
print("")
for steps in FORECAST_STEPS:
forecasts = estimator.predict(input_fn=ts.predict_continuation_input_fn(evaluation, steps=steps))
forecasts = tuple(forecasts)[0]
x_next = forecasts['times']
y_next_forecast = forecasts['mean']
y_next_actual = df_test.value[:steps].values
rmse = compute_rmse(y_next_actual, y_next_forecast)
mae = compute_mae(y_next_actual, y_next_forecast)
print("Forecast Steps {}: RMSE {} - MAE {}".format(steps,rmse,mae))
print("")
print(forecasts.keys())
plt.close('all')
plt.figure(figsize=(20, 10))
plt.title("Time Series Data")
plt.plot(x_next, y_next_actual, label='actual')
plt.plot(x_next, y_next_forecast, label='forecasted')
plt.xlabel("Time Index")
plt.ylabel("Value")
plt.legend(loc=2)
plt.show()
x_all = np.concatenate( (x_current, x_next) , axis=0)
y_actual_all = np.concatenate((y_current_actual, y_next_actual), axis=0)
plt.close('all')
plt.figure(figsize=(20, 10))
plt.title("Time Series Data")
plt.plot(x_all, y_actual_all, label='actual')
plt.plot(x_current, y_current_estimated, label='estimated')
plt.plot(x_next, y_next_forecast, label='forecasted')
plt.xlabel("Time Index")
plt.ylabel("Value")
plt.legend(loc=2)
plt.show()
"""
Explanation: 7. Predict using the Estimator
End of explanation
"""
export_dir = model_dir + "/expo"
estimator.export_savedmodel(
export_dir_base=export_dir,
serving_input_receiver_fn=estimator.build_raw_serving_input_receiver_fn(),
as_text=True
)
import os
saved_model_dir = export_dir +"/"+os.listdir(path=export_dir)[-1]
input_values = df_test.value[:40].values
print(saved_model_dir)
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir = saved_model_dir
)
times = np.arange(1,250)
output = predictor_fn(
{
"model_state_00":[input_values],
"model_state_01":input_values.reshape(1,40,1),
"times": [times]
}
)
predictions = list(map(lambda ls: ls[0],output["mean"][0]))
plt.close('all')
plt.figure(figsize=(20, 10))
plt.title("Time Series Data")
plt.plot(times, predictions, label='actual')
plt.xlabel("Time Index")
plt.ylabel("Value")
plt.legend(loc=2)
plt.show()
"""
Explanation: 8. Save & Serve Model
End of explanation
"""
|
5hubh4m/CS231n
|
Assignment1/two_layer_net.ipynb
|
mit
|
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.neural_net import TwoLayerNet
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
"""
Explanation: Implementing a Neural Network
In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.
End of explanation
"""
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
"""
Explanation: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
End of explanation
"""
scores = net.loss(X)
print 'Your scores:'
print scores
print
print 'correct scores:'
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print correct_scores
print
# The difference should be very small. We get < 1e-7
print 'Difference between your scores and correct scores:'
print np.sum(np.abs(scores - correct_scores))
"""
Explanation: Forward pass: compute scores
Open the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters.
Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
End of explanation
"""
loss, _ = net.loss(X, y, reg=0.1)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print 'Difference between your loss and correct loss:'
print np.sum(np.abs(loss - correct_loss))
"""
Explanation: Forward pass: compute loss
In the same function, implement the second part that computes the data and regularizaion loss.
End of explanation
"""
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))
"""
Explanation: Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
End of explanation
"""
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=1e-5,
num_iters=100, verbose=False)
print 'Final training loss: ', stats['loss_history'][-1]
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
"""
Explanation: Train the network
To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
End of explanation
"""
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
"""
Explanation: Load the data
Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.
End of explanation
"""
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.5, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc
"""
Explanation: Train a network
To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
End of explanation
"""
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(net)
"""
Explanation: Debug the training
With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
End of explanation
"""
best_net = None # store the best model into this
learning = [1e-5, 1e-3]
regularization = [0, 1]
decay = [0.9, 1]
results = {}
best_val = -1
for num_hidden in np.arange(50, 300, 50):
for _ in np.arange(0, 50):
i = np.random.uniform(low=learning[0], high=learning[1])
j = np.random.uniform(low=regularization[0], high=regularization[1])
k = np.random.uniform(low=decay[0], high=decay[1])
# Train the network
net = TwoLayerNet(input_size, num_hidden, num_classes)
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=500, batch_size=200,
learning_rate=i, learning_rate_decay=k,
reg=j, verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
results[(num_hidden, i, j, k)] = val_acc
if val_acc > best_val:
best_val = val_acc
# Print the obtained accuracies
for nh, lr, reg, dec in sorted(results):
print 'Hidden: %d, learning rate: %f, regularisation: %f, decay: %f -> %f' % ( \
nh, lr, reg, dec, results[nh, lr, reg, dec])
# Find the best learning rate and regularization strength
best_hidden = 25
best_lr = 0.000958
best_reg = 0.952745
best_decay = 0.935156
best_val = -1
for nh, lr, reg, dec in sorted(results):
if results[(nh, lr, reg, dec)] > best_val:
best_val = results[(nh, lr, reg, dec)]
best_hidden = nh
best_lr = lr
best_reg = reg
best_decay = dec
# Train the best_svm with more iterations
best_net = TwoLayerNet(input_size, best_hidden, num_classes)
stats = best_net.train(X_train, y_train, X_val, y_val,
num_iters=2000, batch_size=200,
learning_rate=best_lr, learning_rate_decay=best_decay,
reg=best_reg, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Best validation accuracy now: %f' % val_acc
# visualize the weights of the best network
show_net_weights(best_net)
"""
Explanation: Tune your hyperparameters
What's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
Tuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
Approximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.
Experiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).
End of explanation
"""
test_acc = (best_net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
"""
Explanation: Run on the test set
When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
We will give you extra bonus point for every 1% of accuracy above 52%.
End of explanation
"""
|
ajhenrikson/phys202-2015-work
|
assignments/assignment09/IntegrationEx01.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
"""
Explanation: Integration Exercise 1
Imports
End of explanation
"""
def trapz(f, a, b, N):
"""Integrate the function f(x) over the range [a,b] with N points."""
t=(b-a)/N
p=np.linspace(a,b,N+1)
weights=np.ones_like(p)
weights[0]=0.5
weights[-1]=0.5
return t*np.dot(f(p),weights)
f = lambda x: x**2
g = lambda x: np.sin(x)
I = trapz(f, 0, 1, 1000)
assert np.allclose(I, 0.33333349999999995)
J = trapz(g, 0, np.pi, 1000)
assert np.allclose(J, 1.9999983550656628)
"""
Explanation: Trapezoidal rule
The trapezoidal rule generates a numerical approximation to the 1d integral:
$$ I(a,b) = \int_a^b f(x) dx $$
by dividing the interval $[a,b]$ into $N$ subdivisions of length $h$:
$$ h = (b-a)/N $$
Note that this means the function will be evaluated at $N+1$ points on $[a,b]$. The main idea of the trapezoidal rule is that the function is approximated by a straight line between each of these points.
Write a function trapz(f, a, b, N) that performs trapezoidal rule on the function f over the interval $[a,b]$ with N subdivisions (N+1 points).
End of explanation
"""
res=integrate.quad(f,0,1)
print(res)
res=integrate.quad(g,0,np.pi)
print(res)
assert True # leave this cell to grade the previous one
"""
Explanation: Now use scipy.integrate.quad to integrate the f and g functions and see how the result compares with your trapz function. Print the results and errors.
End of explanation
"""
|
tobiajo/hops-tensorflow
|
yarntf/examples/slim/slim_walkthrough.ipynb
|
apache-2.0
|
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import math
import numpy as np
import tensorflow as tf
import time
from datasets import dataset_utils
# Main slim library
slim = tf.contrib.slim
"""
Explanation: TF-Slim Walkthrough
This notebook will walk you through the basics of using TF-Slim to define, train and evaluate neural networks on various tasks. It assumes a basic knowledge of neural networks.
Table of contents
<a href="#Install">Installation and setup</a><br>
<a href='#MLP'>Creating your first neural network with TF-Slim</a><br>
<a href='#ReadingTFSlimDatasets'>Reading Data with TF-Slim</a><br>
<a href='#CNN'>Training a convolutional neural network (CNN)</a><br>
<a href='#Pretained'>Using pre-trained models</a><br>
Installation and setup
<a id='Install'></a>
As of 8/28/16, the latest stable release of TF is r0.10, which does not contain the latest version of slim.
To obtain the latest version of TF-Slim, please install the most recent nightly build of TF
as explained here.
To use TF-Slim for image classification (as we do in this notebook), you also have to install the TF-Slim image models library from here. Let's suppose you install this into a directory called TF_MODELS. Then you should change directory to TF_MODELS/slim before running this notebook, so that these files are in your python path.
To check you've got these two steps to work, just execute the cell below. If it complains about unknown modules, restart the notebook after moving to the TF-Slim models directory.
End of explanation
"""
def regression_model(inputs, is_training=True, scope="deep_regression"):
"""Creates the regression model.
Args:
inputs: A node that yields a `Tensor` of size [batch_size, dimensions].
is_training: Whether or not we're currently training the model.
scope: An optional variable_op scope for the model.
Returns:
predictions: 1-D `Tensor` of shape [batch_size] of responses.
end_points: A dict of end points representing the hidden layers.
"""
with tf.variable_scope(scope, 'deep_regression', [inputs]):
end_points = {}
# Set the default weight _regularizer and acvitation for each fully_connected layer.
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(0.01)):
# Creates a fully connected layer from the inputs with 32 hidden units.
net = slim.fully_connected(inputs, 32, scope='fc1')
end_points['fc1'] = net
# Adds a dropout layer to prevent over-fitting.
net = slim.dropout(net, 0.8, is_training=is_training)
# Adds another fully connected layer with 16 hidden units.
net = slim.fully_connected(net, 16, scope='fc2')
end_points['fc2'] = net
# Creates a fully-connected layer with a single hidden unit. Note that the
# layer is made linear by setting activation_fn=None.
predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction')
end_points['out'] = predictions
return predictions, end_points
"""
Explanation: Creating your first neural network with TF-Slim
<a id='MLP'></a>
Below we give some code to create a simple multilayer perceptron (MLP) which can be used
for regression problems. The model has 2 hidden layers.
The output is a single node.
When this function is called, it will create various nodes, and silently add them to whichever global TF graph is currently in scope. When a node which corresponds to a layer with adjustable parameters (eg., a fully connected layer) is created, additional parameter variable nodes are silently created, and added to the graph. (We will discuss how to train the parameters later.)
We use variable scope to put all the nodes under a common name,
so that the graph has some hierarchical structure.
This is useful when we want to visualize the TF graph in tensorboard, or if we want to query related
variables.
The fully connected layers all use the same L2 weight decay and ReLu activations, as specified by arg_scope. (However, the final layer overrides these defaults, and uses an identity activation function.)
We also illustrate how to add a dropout layer after the first fully connected layer (FC1). Note that at test time,
we do not drop out nodes, but instead use the average activations; hence we need to know whether the model is being
constructed for training or testing, since the computational graph will be different in the two cases
(although the variables, storing the model parameters, will be shared, since they have the same name/scope).
End of explanation
"""
with tf.Graph().as_default():
# Dummy placeholders for arbitrary number of 1d inputs and outputs
inputs = tf.placeholder(tf.float32, shape=(None, 1))
outputs = tf.placeholder(tf.float32, shape=(None, 1))
# Build model
predictions, end_points = regression_model(inputs)
# Print name and shape of each tensor.
print "Layers"
for k, v in end_points.iteritems():
print 'name = {}, shape = {}'.format(v.name, v.get_shape())
# Print name and shape of parameter nodes (values not yet initialized)
print "\n"
print "Parameters"
for v in slim.get_model_variables():
print 'name = {}, shape = {}'.format(v.name, v.get_shape())
"""
Explanation: Let's create the model and examine its structure.
We create a TF graph and call regression_model(), which adds nodes (tensors) to the graph. We then examine their shape, and print the names of all the model variables which have been implicitly created inside of each layer. We see that the names of the variables follow the scopes that we specified.
End of explanation
"""
def produce_batch(batch_size, noise=0.3):
xs = np.random.random(size=[batch_size, 1]) * 10
ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise)
return [xs.astype(np.float32), ys.astype(np.float32)]
x_train, y_train = produce_batch(200)
x_test, y_test = produce_batch(200)
plt.scatter(x_train, y_train)
"""
Explanation: Let's create some 1d regression data .
We will train and test the model on some noisy observations of a nonlinear function.
End of explanation
"""
def convert_data_to_tensors(x, y):
inputs = tf.constant(x)
inputs.set_shape([None, 1])
outputs = tf.constant(y)
outputs.set_shape([None, 1])
return inputs, outputs
# The following snippet trains the regression model using a mean_squared_error loss.
ckpt_dir = '/tmp/regression_model/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
inputs, targets = convert_data_to_tensors(x_train, y_train)
# Make the model.
predictions, nodes = regression_model(inputs, is_training=True)
# Add the loss function to the graph.
loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
# The total loss is the uers's loss plus any regularization losses.
total_loss = slim.losses.get_total_loss()
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.005)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training inside a session.
final_loss = slim.learning.train(
train_op,
logdir=ckpt_dir,
number_of_steps=5000,
save_summaries_secs=5,
log_every_n_steps=500)
print("Finished training. Last batch loss:", final_loss)
print("Checkpoint saved in %s" % ckpt_dir)
"""
Explanation: Let's fit the model to the data
The user has to specify the loss function and the optimizer, and slim does the rest.
In particular, the slim.learning.train function does the following:
For each iteration, evaluate the train_op, which updates the parameters using the optimizer applied to the current minibatch. Also, update the global_step.
Occasionally store the model checkpoint in the specified directory. This is useful in case your machine crashes - then you can simply restart from the specified checkpoint.
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_train, y_train)
predictions, end_points = regression_model(inputs, is_training=True)
# Add multiple loss nodes.
mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions)
absolute_difference_loss = slim.losses.absolute_difference(predictions, targets)
# The following two ways to compute the total loss are equivalent
regularization_loss = tf.add_n(slim.losses.get_regularization_losses())
total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss
# Regularization Loss is included in the total loss by default.
# This is good for training, but not for testing.
total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Will initialize the parameters with random weights.
total_loss1, total_loss2 = sess.run([total_loss1, total_loss2])
print('Total Loss1: %f' % total_loss1)
print('Total Loss2: %f' % total_loss2)
print('Regularization Losses:')
for loss in slim.losses.get_regularization_losses():
print(loss)
print('Loss Functions:')
for loss in slim.losses.get_losses():
print(loss)
"""
Explanation: Training with multiple loss functions.
Sometimes we have multiple objectives we want to simultaneously optimize.
In slim, it is easy to add more losses, as we show below. (We do not optimize the total loss in this example,
but we show how to compute it.)
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
# Create the model structure. (Parameters will be loaded below.)
predictions, end_points = regression_model(inputs, is_training=False)
# Make a session which restores the old parameters from a checkpoint.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
inputs, predictions, targets = sess.run([inputs, predictions, targets])
plt.scatter(inputs, targets, c='r');
plt.scatter(inputs, predictions, c='b');
plt.title('red=true, blue=predicted')
"""
Explanation: Let's load the saved model and use it for prediction.
End of explanation
"""
with tf.Graph().as_default():
inputs, targets = convert_data_to_tensors(x_test, y_test)
predictions, end_points = regression_model(inputs, is_training=False)
# Specify metrics to evaluate:
names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({
'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets),
'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets)
})
# Make a session which restores the old graph parameters, and then run eval.
sv = tf.train.Supervisor(logdir=ckpt_dir)
with sv.managed_session() as sess:
metric_values = slim.evaluation.evaluation(
sess,
num_evals=1, # Single pass over data
eval_op=names_to_update_nodes.values(),
final_op=names_to_value_nodes.values())
names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values))
for key, value in names_to_values.iteritems():
print('%s: %f' % (key, value))
"""
Explanation: Let's compute various evaluation metrics on the test set.
In TF-Slim termiology, losses are optimized, but metrics (which may not be differentiable, e.g., precision and recall) are just measured. As an illustration, the code below computes mean squared error and mean absolute error metrics on the test set.
Each metric declaration creates several local variables (which must be initialized via tf.initialize_local_variables()) and returns both a value_op and an update_op. When evaluated, the value_op returns the current value of the metric. The update_op loads a new batch of data, runs the model, obtains the predictions and accumulates the metric statistics appropriately before returning the current value of the metric. We store these value nodes and update nodes in 2 dictionaries.
After creating the metric nodes, we can pass them to slim.evaluation.evaluation, which repeatedly evaluates these nodes the specified number of times. (This allows us to compute the evaluation in a streaming fashion across minibatches, which is usefulf for large datasets.) Finally, we print the final value of each metric.
End of explanation
"""
import tensorflow as tf
from datasets import dataset_utils
url = "http://download.tensorflow.org/data/flowers.tar.gz"
flowers_data_dir = '/tmp/flowers'
if not tf.gfile.Exists(flowers_data_dir):
tf.gfile.MakeDirs(flowers_data_dir)
dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir)
"""
Explanation: Reading Data with TF-Slim
<a id='ReadingTFSlimDatasets'></a>
Reading data with TF-Slim has two main components: A
Dataset and a
DatasetDataProvider. The former is a descriptor of a dataset, while the latter performs the actions necessary for actually reading the data. Lets look at each one in detail:
Dataset
A TF-Slim
Dataset
contains descriptive information about a dataset necessary for reading it, such as the list of data files and how to decode them. It also contains metadata including class labels, the size of the train/test splits and descriptions of the tensors that the dataset provides. For example, some datasets contain images with labels. Others augment this data with bounding box annotations, etc. The Dataset object allows us to write generic code using the same API, regardless of the data content and encoding type.
TF-Slim's Dataset works especially well when the data is stored as a (possibly sharded)
TFRecords file, where each record contains a tf.train.Example protocol buffer.
TF-Slim uses a consistent convention for naming the keys and values inside each Example record.
DatasetDataProvider
A
DatasetDataProvider is a class which actually reads the data from a dataset. It is highly configurable to read the data in various ways that may make a big impact on the efficiency of your training process. For example, it can be single or multi-threaded. If your data is sharded across many files, it can read each files serially, or from every file simultaneously.
Demo: The Flowers Dataset
For convenience, we've include scripts to convert several common image datasets into TFRecord format and have provided
the Dataset descriptor files necessary for reading them. We demonstrate how easy it is to use these dataset via the Flowers dataset below.
Download the Flowers Dataset
<a id='DownloadFlowers'></a>
We've made available a tarball of the Flowers dataset which has already been converted to TFRecord format.
End of explanation
"""
from datasets import flowers
import tensorflow as tf
slim = tf.contrib.slim
with tf.Graph().as_default():
dataset = flowers.get_split('train', flowers_data_dir)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32, common_queue_min=1)
image, label = data_provider.get(['image', 'label'])
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
for i in xrange(4):
np_image, np_label = sess.run([image, label])
height, width, _ = np_image.shape
class_name = name = dataset.labels_to_names[np_label]
plt.figure()
plt.imshow(np_image)
plt.title('%s, %d x %d' % (name, height, width))
plt.axis('off')
plt.show()
"""
Explanation: Display some of the data.
End of explanation
"""
def my_cnn(images, num_classes, is_training): # is_training is not used...
with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2):
net = slim.conv2d(images, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.conv2d(net, 64, [5, 5])
net = slim.max_pool2d(net)
net = slim.flatten(net)
net = slim.fully_connected(net, 192)
net = slim.fully_connected(net, num_classes, activation_fn=None)
return net
"""
Explanation: Convolutional neural nets (CNNs).
<a id='CNN'></a>
In this section, we show how to train an image classifier using a simple CNN.
Define the model.
Below we define a simple CNN. Note that the output layer is linear function - we will apply softmax transformation externally to the model, either in the loss function (for training), or in the prediction function (during testing).
End of explanation
"""
import tensorflow as tf
with tf.Graph().as_default():
# The model can handle any input size because the first layer is convolutional.
# The size of the model is determined when image_node is first passed into the my_cnn function.
# Once the variables are initialized, the size of all the weight matrices is fixed.
# Because of the fully connected layers, this means that all subsequent images must have the same
# input size as the first image.
batch_size, height, width, channels = 3, 28, 28, 3
images = tf.random_uniform([batch_size, height, width, channels], maxval=1)
# Create the model.
num_classes = 10
logits = my_cnn(images, num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
# Initialize all the variables (including parameters) randomly.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the init_op, evaluate the model outputs and print the results:
sess.run(init_op)
probabilities = sess.run(probabilities)
print('Probabilities Shape:')
print(probabilities.shape) # batch_size x num_classes
print('\nProbabilities:')
print(probabilities)
print('\nSumming across all classes (Should equal 1):')
print(np.sum(probabilities, 1)) # Each row sums to 1
"""
Explanation: Apply the model to some randomly generated images.
End of explanation
"""
from preprocessing import inception_preprocessing
import tensorflow as tf
slim = tf.contrib.slim
def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False):
"""Loads a single batch of data.
Args:
dataset: The dataset to load.
batch_size: The number of images in the batch.
height: The size of each image after preprocessing.
width: The size of each image after preprocessing.
is_training: Whether or not we're currently training or evaluating.
Returns:
images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed.
images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization.
labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes.
"""
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, common_queue_capacity=32,
common_queue_min=8)
image_raw, label = data_provider.get(['image', 'label'])
# Preprocess image for usage by Inception.
image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training)
# Preprocess the image for display purposes.
image_raw = tf.expand_dims(image_raw, 0)
image_raw = tf.image.resize_images(image_raw, [height, width])
image_raw = tf.squeeze(image_raw)
# Batch it up.
images, images_raw, labels = tf.train.batch(
[image, image_raw, label],
batch_size=batch_size,
num_threads=1,
capacity=2 * batch_size)
return images, images_raw, labels
from datasets import flowers
# This might take a few minutes.
train_dir = '/tmp/tfslim_model/'
print('Will save model to %s' % train_dir)
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
# Create the model:
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
number_of_steps=1, # For speed, we just do 1 epoch
save_summaries_secs=1)
print('Finished training. Final batch loss %d' % final_loss)
"""
Explanation: Train the model on the Flowers dataset.
Before starting, make sure you've run the code to <a href="#DownloadFlowers">Download the Flowers</a> dataset. Now, we'll get a sense of what it looks like to use TF-Slim's training functions found in
learning.py. First, we'll create a function, load_batch, that loads batches of dataset from a dataset. Next, we'll train a model for a single step (just to demonstrate the API), and evaluate the results.
End of explanation
"""
from datasets import flowers
# This might take a few minutes.
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.DEBUG)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset)
logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5),
})
print('Running evaluation Loop...')
checkpoint_path = tf.train.latest_checkpoint(train_dir)
metric_values = slim.evaluation.evaluate_once(
master='',
checkpoint_path=checkpoint_path,
logdir=train_dir,
eval_op=names_to_updates.values(),
final_op=names_to_values.values())
names_to_values = dict(zip(names_to_values.keys(), metric_values))
for name in names_to_values:
print('%s: %f' % (name, names_to_values[name]))
"""
Explanation: Evaluate some metrics.
As we discussed above, we can compute various metrics besides the loss.
Below we show how to compute prediction accuracy of the trained model, as well as top-5 classification accuracy. (The difference between evaluation and evaluation_loop is that the latter writes the results to a log directory, so they can be viewed in tensorboard.)
End of explanation
"""
from datasets import dataset_utils
url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
"""
Explanation: Using pre-trained models
<a id='Pretrained'></a>
Neural nets work best when they have many parameters, making them very flexible function approximators.
However, this means they must be trained on big datasets. Since this process is slow, we provide various pre-trained models - see the list here.
You can either use these models as-is, or you can perform "surgery" on them, to modify them for some other task. For example, it is common to "chop off" the final pre-softmax layer, and replace it with a new set of weights corresponding to some new set of labels. You can then quickly fine tune the new model on a small new dataset. We illustrate this below, using inception-v1 as the base model. While models like Inception V3 are more powerful, Inception V1 is used for speed purposes.
Take into account that VGG and ResNet final layers have only 1000 outputs rather than 1001. The ImageNet dataset provied has an empty background class which can be used to fine-tune the model to other tasks. VGG and ResNet models provided here don't use that class. We provide two examples of using pretrained models: Inception V1 and VGG-19 models to highlight this difference.
Download the Inception V1 checkpoint
End of explanation
"""
import numpy as np
import os
import tensorflow as tf
import urllib2
from datasets import imagenet
from nets import inception
from preprocessing import inception_preprocessing
slim = tf.contrib.slim
image_size = inception.inception_v1.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg'
image_string = urllib2.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
slim.get_model_variables('InceptionV1'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index]))
"""
Explanation: Apply Pre-trained Inception V1 model to Images.
We have to convert each image to the size expected by the model checkpoint.
There is no easy way to determine this size from the checkpoint itself.
So we use a preprocessor to enforce this.
End of explanation
"""
from datasets import dataset_utils
import tensorflow as tf
url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz"
checkpoints_dir = '/tmp/checkpoints'
if not tf.gfile.Exists(checkpoints_dir):
tf.gfile.MakeDirs(checkpoints_dir)
dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir)
"""
Explanation: Download the VGG-16 checkpoint
End of explanation
"""
import numpy as np
import os
import tensorflow as tf
import urllib2
from datasets import imagenet
from nets import vgg
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
image_size = vgg.vgg_16.default_image_size
with tf.Graph().as_default():
url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg'
image_string = urllib2.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
processed_images = tf.expand_dims(processed_image, 0)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(vgg.vgg_arg_scope()):
# 1000 classes instead of 1001.
logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False)
probabilities = tf.nn.softmax(logits)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
np_image, probabilities = sess.run([image, probabilities])
probabilities = probabilities[0, 0:]
sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.axis('off')
plt.show()
names = imagenet.create_readable_names_for_imagenet_labels()
for i in range(5):
index = sorted_inds[i]
# Shift the index of a class name by one.
print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index+1]))
"""
Explanation: Apply Pre-trained VGG-16 model to Images.
We have to convert each image to the size expected by the model checkpoint.
There is no easy way to determine this size from the checkpoint itself.
So we use a preprocessor to enforce this. Pay attention to the difference caused by 1000 classes instead of 1001.
End of explanation
"""
# Note that this may take several minutes.
import os
from datasets import flowers
from nets import inception
from preprocessing import inception_preprocessing
slim = tf.contrib.slim
image_size = inception.inception_v1.default_image_size
def get_init_fn():
"""Returns a function run by the chief worker to warm-start the training."""
checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"]
exclusions = [scope.strip() for scope in checkpoint_exclude_scopes]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'inception_v1.ckpt'),
variables_to_restore)
train_dir = '/tmp/inception_finetuned/'
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, _, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
# Specify the loss function:
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
slim.losses.softmax_cross_entropy(logits, one_hot_labels)
total_loss = slim.losses.get_total_loss()
# Create some summaries to visualize the training process:
tf.summary.scalar('losses/Total Loss', total_loss)
# Specify the optimizer and create the train op:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run the training:
final_loss = slim.learning.train(
train_op,
logdir=train_dir,
init_fn=get_init_fn(),
number_of_steps=2)
print('Finished training. Last batch loss %f' % final_loss)
"""
Explanation: Fine-tune the model on a different set of labels.
We will fine tune the inception model on the Flowers dataset.
End of explanation
"""
import numpy as np
import tensorflow as tf
from datasets import flowers
from nets import inception
slim = tf.contrib.slim
image_size = inception.inception_v1.default_image_size
batch_size = 3
with tf.Graph().as_default():
tf.logging.set_verbosity(tf.logging.INFO)
dataset = flowers.get_split('train', flowers_data_dir)
images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size)
# Create the model, use the default arg scope to configure the batch norm parameters.
with slim.arg_scope(inception.inception_v1_arg_scope()):
logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True)
probabilities = tf.nn.softmax(logits)
checkpoint_path = tf.train.latest_checkpoint(train_dir)
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_path,
slim.get_variables_to_restore())
with tf.Session() as sess:
with slim.queues.QueueRunners(sess):
sess.run(tf.initialize_local_variables())
init_fn(sess)
np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
for i in xrange(batch_size):
image = np_images_raw[i, :, :, :]
true_label = np_labels[i]
predicted_label = np.argmax(np_probabilities[i, :])
predicted_name = dataset.labels_to_names[predicted_label]
true_name = dataset.labels_to_names[true_label]
plt.figure()
plt.imshow(image.astype(np.uint8))
plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
plt.axis('off')
plt.show()
"""
Explanation: Apply fine tuned model to some images.
End of explanation
"""
|
statsmodels/statsmodels.github.io
|
v0.13.2/examples/notebooks/generated/statespace_forecasting.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
"""
Explanation: Forecasting in statsmodels
This notebook describes forecasting using time series models in statsmodels.
Note: this notebook applies only to the state space model classes, which are:
sm.tsa.SARIMAX
sm.tsa.UnobservedComponents
sm.tsa.VARMAX
sm.tsa.DynamicFactor
End of explanation
"""
endog = macrodata['infl']
endog.plot(figsize=(15, 5))
"""
Explanation: Basic example
A simple example is to use an AR(1) model to forecast inflation. Before forecasting, let's take a look at the series:
End of explanation
"""
# Construct the model
mod = sm.tsa.SARIMAX(endog, order=(1, 0, 0), trend='c')
# Estimate the parameters
res = mod.fit()
print(res.summary())
"""
Explanation: Constructing and estimating the model
The next step is to formulate the econometric model that we want to use for forecasting. In this case, we will use an AR(1) model via the SARIMAX class in statsmodels.
After constructing the model, we need to estimate its parameters. This is done using the fit method. The summary method produces several convenient tables showing the results.
End of explanation
"""
# The default is to get a one-step-ahead forecast:
print(res.forecast())
"""
Explanation: Forecasting
Out-of-sample forecasts are produced using the forecast or get_forecast methods from the results object.
The forecast method gives only point forecasts.
End of explanation
"""
# Here we construct a more complete results object.
fcast_res1 = res.get_forecast()
# Most results are collected in the `summary_frame` attribute.
# Here we specify that we want a confidence level of 90%
print(fcast_res1.summary_frame(alpha=0.10))
"""
Explanation: The get_forecast method is more general, and also allows constructing confidence intervals.
End of explanation
"""
print(res.forecast(steps=2))
fcast_res2 = res.get_forecast(steps=2)
# Note: since we did not specify the alpha parameter, the
# confidence level is at the default, 95%
print(fcast_res2.summary_frame())
"""
Explanation: The default confidence level is 95%, but this can be controlled by setting the alpha parameter, where the confidence level is defined as $(1 - \alpha) \times 100\%$. In the example above, we specified a confidence level of 90%, using alpha=0.10.
Specifying the number of forecasts
Both of the functions forecast and get_forecast accept a single argument indicating how many forecasting steps are desired. One option for this argument is always to provide an integer describing the number of steps ahead you want.
End of explanation
"""
print(res.forecast('2010Q2'))
fcast_res3 = res.get_forecast('2010Q2')
print(fcast_res3.summary_frame())
"""
Explanation: However, if your data included a Pandas index with a defined frequency (see the section at the end on Indexes for more information), then you can alternatively specify the date through which you want forecasts to be produced:
End of explanation
"""
fig, ax = plt.subplots(figsize=(15, 5))
# Plot the data (here we are subsetting it to get a better look at the forecasts)
endog.loc['1999':].plot(ax=ax)
# Construct the forecasts
fcast = res.get_forecast('2011Q4').summary_frame()
fcast['mean'].plot(ax=ax, style='k--')
ax.fill_between(fcast.index, fcast['mean_ci_lower'], fcast['mean_ci_upper'], color='k', alpha=0.1);
"""
Explanation: Plotting the data, forecasts, and confidence intervals
Often it is useful to plot the data, the forecasts, and the confidence intervals. There are many ways to do this, but here's one example
End of explanation
"""
# Step 1: fit model parameters w/ training sample
training_obs = int(len(endog) * 0.8)
training_endog = endog[:training_obs]
training_mod = sm.tsa.SARIMAX(
training_endog, order=(1, 0, 0), trend='c')
training_res = training_mod.fit()
# Print the estimated parameters
print(training_res.params)
# Step 2: produce one-step-ahead forecasts
fcast = training_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
"""
Explanation: Note on what to expect from forecasts
The forecast above may not look very impressive, as it is almost a straight line. This is because this is a very simple, univariate forecasting model. Nonetheless, keep in mind that these simple forecasting models can be extremely competitive.
Prediction vs Forecasting
The results objects also contain two methods that all for both in-sample fitted values and out-of-sample forecasting. They are predict and get_prediction. The predict method only returns point predictions (similar to forecast), while the get_prediction method also returns additional results (similar to get_forecast).
In general, if your interest is out-of-sample forecasting, it is easier to stick to the forecast and get_forecast methods.
Cross validation
Note: some of the functions used in this section were first introduced in statsmodels v0.11.0.
A common use case is to cross-validate forecasting methods by performing h-step-ahead forecasts recursively using the following process:
Fit model parameters on a training sample
Produce h-step-ahead forecasts from the end of that sample
Compare forecasts against test dataset to compute error rate
Expand the sample to include the next observation, and repeat
Economists sometimes call this a pseudo-out-of-sample forecast evaluation exercise, or time-series cross-validation.
Example
We will conduct a very simple exercise of this sort using the inflation dataset above. The full dataset contains 203 observations, and for expositional purposes we'll use the first 80% as our training sample and only consider one-step-ahead forecasts.
A single iteration of the above procedure looks like the following:
End of explanation
"""
# Step 1: append a new observation to the sample and refit the parameters
append_res = training_res.append(endog[training_obs:training_obs + 1], refit=True)
# Print the re-estimated parameters
print(append_res.params)
"""
Explanation: To add on another observation, we can use the append or extend results methods. Either method can produce the same forecasts, but they differ in the other results that are available:
append is the more complete method. It always stores results for all training observations, and it optionally allows refitting the model parameters given the new observations (note that the default is not to refit the parameters).
extend is a faster method that may be useful if the training sample is very large. It only stores results for the new observations, and it does not allow refitting the model parameters (i.e. you have to use the parameters estimated on the previous sample).
If your training sample is relatively small (less than a few thousand observations, for example) or if you want to compute the best possible forecasts, then you should use the append method. However, if that method is infeasible (for example, because you have a very large training sample) or if you are okay with slightly suboptimal forecasts (because the parameter estimates will be slightly stale), then you can consider the extend method.
A second iteration, using the append method and refitting the parameters, would go as follows (note again that the default for append does not refit the parameters, but we have overridden that with the refit=True argument):
End of explanation
"""
# Step 2: produce one-step-ahead forecasts
fcast = append_res.forecast()
# Step 3: compute root mean square forecasting error
true = endog.reindex(fcast.index)
error = true - fcast
# Print out the results
print(pd.concat([true.rename('true'),
fcast.rename('forecast'),
error.rename('error')], axis=1))
"""
Explanation: Notice that these estimated parameters are slightly different than those we originally estimated. With the new results object, append_res, we can compute forecasts starting from one observation further than the previous call:
End of explanation
"""
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.append(updated_endog, refit=False)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
"""
Explanation: Putting it altogether, we can perform the recursive forecast evaluation exercise as follows:
End of explanation
"""
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
"""
Explanation: We now have a set of three forecasts made at each point in time from 1999Q2 through 2009Q3. We can construct the forecast errors by subtracting each forecast from the actual value of endog at that point.
End of explanation
"""
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
"""
Explanation: To evaluate our forecasts, we often want to look at a summary value like the root mean square error. Here we can compute that for each horizon by first flattening the forecast errors so that they are indexed by horizon and then computing the root mean square error fore each horizon.
End of explanation
"""
# Setup forecasts
nforecasts = 3
forecasts = {}
# Get the number of initial training observations
nobs = len(endog)
n_init_training = int(nobs * 0.8)
# Create model for initial training sample, fit parameters
init_training_endog = endog.iloc[:n_init_training]
mod = sm.tsa.SARIMAX(training_endog, order=(1, 0, 0), trend='c')
res = mod.fit()
# Save initial forecast
forecasts[training_endog.index[-1]] = res.forecast(steps=nforecasts)
# Step through the rest of the sample
for t in range(n_init_training, nobs):
# Update the results by appending the next observation
updated_endog = endog.iloc[t:t+1]
res = res.extend(updated_endog)
# Save the new set of forecasts
forecasts[updated_endog.index[0]] = res.forecast(steps=nforecasts)
# Combine all forecasts into a dataframe
forecasts = pd.concat(forecasts, axis=1)
print(forecasts.iloc[:5, :5])
# Construct the forecast errors
forecast_errors = forecasts.apply(lambda column: endog - column).reindex(forecasts.index)
print(forecast_errors.iloc[:5, :5])
# Reindex the forecasts by horizon rather than by date
def flatten(column):
return column.dropna().reset_index(drop=True)
flattened = forecast_errors.apply(flatten)
flattened.index = (flattened.index + 1).rename('horizon')
print(flattened.iloc[:3, :5])
# Compute the root mean square error
rmse = (flattened**2).mean(axis=1)**0.5
print(rmse)
"""
Explanation: Using extend
We can check that we get similar forecasts if we instead use the extend method, but that they are not exactly the same as when we use append with the refit=True argument. This is because extend does not re-estimate the parameters given the new observation.
End of explanation
"""
print(endog.index)
"""
Explanation: By not re-estimating the parameters, our forecasts are slightly worse (the root mean square error is higher at each horizon). However, the process is faster, even with only 200 datapoints. Using the %%timeit cell magic on the cells above, we found a runtime of 570ms using extend versus 1.7s using append with refit=True. (Note that using extend is also faster than using append with refit=False).
Indexes
Throughout this notebook, we have been making use of Pandas date indexes with an associated frequency. As you can see, this index marks our data as at a quarterly frequency, between 1959Q1 and 2009Q3.
End of explanation
"""
# Annual frequency, using a PeriodIndex
index = pd.period_range(start='2000', periods=4, freq='A')
endog1 = pd.Series([1, 2, 3, 4], index=index)
print(endog1.index)
# Quarterly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='QS')
endog2 = pd.Series([1, 2, 3, 4], index=index)
print(endog2.index)
# Monthly frequency, using a DatetimeIndex
index = pd.date_range(start='2000', periods=4, freq='M')
endog3 = pd.Series([1, 2, 3, 4], index=index)
print(endog3.index)
"""
Explanation: In most cases, if your data has an associated data/time index with a defined frequency (like quarterly, monthly, etc.), then it is best to make sure your data is a Pandas series with the appropriate index. Here are three examples of this:
End of explanation
"""
index = pd.DatetimeIndex([
'2000-01-01 10:08am', '2000-01-01 11:32am',
'2000-01-01 5:32pm', '2000-01-02 6:15am'])
endog4 = pd.Series([0.2, 0.5, -0.1, 0.1], index=index)
print(endog4.index)
"""
Explanation: In fact, if your data has an associated date/time index, it is best to use that even if does not have a defined frequency. An example of that kind of index is as follows - notice that it has freq=None:
End of explanation
"""
mod = sm.tsa.SARIMAX(endog4)
res = mod.fit()
"""
Explanation: You can still pass this data to statsmodels' model classes, but you will get the following warning, that no frequency data was found:
End of explanation
"""
res.forecast(1)
"""
Explanation: What this means is that you cannot specify forecasting steps by dates, and the output of the forecast and get_forecast methods will not have associated dates. The reason is that without a given frequency, there is no way to determine what date each forecast should be assigned to. In the example above, there is no pattern to the date/time stamps of the index, so there is no way to determine what the next date/time should be (should it be in the morning of 2000-01-02? the afternoon? or maybe not until 2000-01-03?).
For example, if we forecast one-step-ahead:
End of explanation
"""
# Here we'll catch the exception to prevent printing too much of
# the exception trace output in this notebook
try:
res.forecast('2000-01-03')
except KeyError as e:
print(e)
"""
Explanation: The index associated with the new forecast is 4, because if the given data had an integer index, that would be the next value. A warning is given letting the user know that the index is not a date/time index.
If we try to specify the steps of the forecast using a date, we will get the following exception:
KeyError: 'The `end` argument could not be matched to a location related to the index of the data.'
End of explanation
"""
|
tensorflow/examples
|
courses/udacity_deep_learning/3_regularization.ipynb
|
apache-2.0
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
"""
Explanation: Deep Learning
Assignment 3
Previously in 2_fullyconnected.ipynb, you trained a logistic regression and a neural network model.
The goal of this assignment is to explore regularization techniques.
End of explanation
"""
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
"""
Explanation: First reload the data we generated in 1_notmnist.ipynb.
End of explanation
"""
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
"""
Explanation: Reformat into a shape that's more adapted to the models we're going to train:
- data as a flat matrix,
- labels as float 1-hot encodings.
End of explanation
"""
|
jeroarenas/MLBigData
|
2_Classification/Classification III-student.ipynb
|
mit
|
%matplotlib inline
"""
Explanation: Classification III Lab: Working with classifiers
In this lab session we are going to continue working with classification algorithms, mainly, we are going to focus on decision trees and their use in ensembles.
During this lab we will cover:
* Part 1: Trees*
* Part 2: Random forests*
* Part 3: Ensembles of classifiers: bagging and boosting*
End of explanation
"""
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
# Initialize the random generator seed to compare results
np.random.seed(0)
iris = datasets.load_iris()
X = iris.data # All input features are used
Y = iris.target
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.4)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
"""
Explanation: Part 0: Load and preprocess data
In the following sections, we are going to use all input features of the Iris dataset. So, let's start running the following cell to load the complete Iris data.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from sklearn import tree
clf_tree = # <FILL IN>
acc_tree= # <FILL IN>
print("The test accuracy of the decision tree is %2.2f" %(100*acc_tree))
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_tree, 2), 0.95 , 'incorrect result: The value of C_opt is uncorrect')
"""
Explanation: Part 1: Trees
1.1: Training a decision Tree
Decision Trees learn simple decision rules selecting iteratively a input feature and setting a threshold over it, so the are simple tool to understand and to interpret.
Use the DecisionTreeClassifier( ) function to train a decision tree. Although the tree depth is usually a parameter to select, here we are working with only for input features, so you can use all default parameter and obtain a good performance. Complete the following code to return in the variable acc_tree the tree accuracy.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
rang_n_trees=np.arange(1,10)
tuned_parameters = [{'n_estimators': rang_n_trees}]
nfold = 10
clf_RF = #<FILL IN>
n_trees_opt = #<FILL IN>
acc_RF = #<FILL IN>
print "The number of selected trees is " + str(n_trees_opt)
print("The test accuracy of the RF is %2.2f" %(100*acc_RF))
"""
Explanation: Try to use the following example of the scikit-learn help, to plot the classification regions for different pairs of input features. Modify the necessary code line to plot our training data over the decision regions.
Be careful, this examples retrains different classifiers for each pair of input features; therefore, its solution differs from the above one that we have just computed.
Part 2: Random Forest
2.1: Training a Random Forest
A Random Forest (RF) trains several decision tree classifiers, where each one is trained with different sub-samples of the training data, and averages their outputs to improve the final accuracy.
Use the RandomForestClassifier( ) function to train a RF classifier and select by cross validation the number of trees to use. The remaining parameters, such as the number of subsampled data or features, can be used with their default values. Return the optimal number of trees to be used and the final accuracy of the RF classifier.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to compare results
np.random.seed(0)
print 'This can take a some minutes, be patient'
# Create RF classifier object with CV
clf_RF = # <FILL IN>
acc_RF_vector=[]
n_trees_vector=[]
for run in np.arange(50):
# For each run, train it, compute its accuracy and examine the number of optimal trees
clf_RF.# <FILL IN>
acc = # <FILL IN>
acc_RF_vector.append(acc)
n_trees = # <FILL IN>
n_trees_vector.append(n_trees)
# Compute averaged accuracies and number of used trees
mean_acc_RF = # <FILL IN>
std_acc_RF = # <FILL IN>
mean_n_trees = # <FILL IN>
std_n_trees = # <FILL IN>
# Print the results
print('Averaged accuracy for RF classifier is %2.2f +/- %2.2f '%(100*mean_acc_RF, 100*std_acc_RF))
print('Averaged number of selected trees is %2.2f +/- %2.2f '%(mean_n_trees, std_n_trees))
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
Test.assertEquals(np.round(mean_acc_RF, 1), 0.9 , 'incorrect result: The value of mean_acc_RF is uncorrect')
Test.assertEquals(np.round(std_acc_RF, 2), 0.03 , 'incorrect result: The value of std_acc_RF is uncorrect')
Test.assertEquals(np.round(mean_n_trees, 1), 4.2 , 'incorrect result: The value of mean_n_trees is uncorrect')
Test.assertEquals(np.round(std_n_trees, 1), 2.0 , 'incorrect result: The value of std_n_trees is uncorrect')
"""
Explanation: Run the above code again, do you obtain the same accuracy?
2.2: Obtaining results statistically significant
Random forest have a random component when the training data are subsampled, so you can obtain a different result for different runnings of the algorithm. In this case, to be able to provide a statistically significant measurement of the performance of the classifier, we need to average the result over a large number of runs.
Complete the following code, to train again the RF classifier, but averaging its test accuracies over 50 runs. Provide its average accuracy and the average number of selected trees (include their standard deviations).
End of explanation
"""
from sklearn.ensemble import BaggingClassifier
from sklearn import tree
base_learner = tree.DecisionTreeClassifier(max_depth=1)
bagging = BaggingClassifier(base_learner, n_estimators = 10, max_samples=0.5, max_features = 0.5)
bagging.fit(X_train, Y_train)
acc_test = bagging.score(X_test, Y_test)
print('Accuracy of bagged ensemble is %2.2f '%(100*acc_test))
"""
Explanation: Part 3: Ensembles
The goal of ensemble methods is to combine the predictions of several base estimators or learners to obtain a classifier of improved performance. We are going to work with two ensemble methods:
Bagging methods: their driving principle is to build several estimators with diversity among them and then to average their predictions.
Boosting methods: in this case, base estimators are built sequentially forcing new learners to pay more attention to samples missclassified by previous learners.
3.1. Bagging methods
Here, to implement bagged classifiers, we are going to use BaggingClassifier( ) object which includes different degrees of freedom in the learners design: with or without samples replacement, selecting random subsets of features instead of samples or selecting subsets of both samples and features.
For the sake of simplicity, we are going to use as base learner a decision stump (i.e., a decision tree with one depth level). Note that in the case of using decision trees as learners, the resulting ensemble results in a random forest.
Complete the following code to train a ensemble of bagged decision stumps. Set max_samples (percentage of training data used to train each learner) and max_features parameters (percentage of input features used to train each learner) to 0.5, and fix to 10 the number of learners used.
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to test results
np.random.seed(0)
acc_test_evol = []
rang_n_learners = range(1,50,2)
for n_learners in rang_n_learners:
acc_test_run=[]
for run in range(50):
bagging = # <FILL IN>
acc = # <FILL IN>
acc_test_run.append(acc)
acc_test_evol.append(np.mean(acc_test_run))
# Ploting results
plt.figure()
plt.plot(rang_n_learners,acc_test_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Evolution of the bagged ensemble accuracy with the number of learners ')
plt.show()
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_test_evol[-1], 2), 0.94 , 'incorrect result: The value final of acc_test_evol is uncorrect')
"""
Explanation: Analyze the final ensemble performance according to the number of learners. Average the result over 20 or more different runs to obtain statically significant results (note that the above accuracy change if you run the code again).
End of explanation
"""
###########################################################
# TODO: Replace <FILL IN> with appropriate code
###########################################################
# Initialize the random generator seed to test results
np.random.seed(0)
from sklearn.ensemble import AdaBoostClassifier
base_learner = tree.DecisionTreeClassifier(max_depth=1)
# Train a discrete Adaboost classifier and obtain its accuracy
AB_D = #<FILL IN>
acc_AB_D = # <FILL IN>
# Train a real Adaboost classifier and obtain its accuracy
AB_R = # <FILL IN>
acc_AB_R = # <FILL IN>
print('Accuracy of discrete adaboost ensemble is %2.2f '%(100*acc_AB_D))
print('Accuracy of real adaboost ensemble is %2.2f '%(100*acc_AB_R))
###########################################################
# TEST CELL
###########################################################
from test_helper import Test
# TEST accuracy values
Test.assertEquals(np.round(acc_AB_D, 2), 0.95 , 'incorrect result: The value of acc_AB_D is uncorrect')
Test.assertEquals(np.round(acc_AB_R, 2), 0.88 , 'incorrect result: The value of acc_AB_R is uncorrect')
"""
Explanation: 3.2. Adaboost
To train an AdaBoost classifier, scikit-learn provides AdaBoostClassifier() method which includes two versions of the Adaboost algorithm:
* Discrete Adaboost: the learners' outputs are discretized (they provide an estimation of the labels).
* Real Adaboost: the learners' outputs are real values (they are the soft-outputs or the class probabilities).
As in previous subsection, use a decision stump as base learner. Fix to 50 the number of learners and compare the results of both approaches: Discrete Adaboost (set algorithm parameter to 'SAMME') and Real Adaboost (algorithm='SAMME.R').
End of explanation
"""
acc_AB_D_evol=[acc for acc in AB_D.staged_score(X_test, Y_test)]
acc_AB_R_evol=[acc for acc in AB_R.staged_score(X_test, Y_test)]
# Ploting results
rang_n_learners=np.arange(50)+1
plt.figure()
plt.subplot(211)
plt.plot(rang_n_learners,acc_AB_D_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Discrete AB accuracy')
plt.subplot(212)
plt.plot(rang_n_learners,acc_AB_R_evol)
plt.xlabel('Number of learners')
plt.ylabel('Accuracy')
plt.title('Real AB accuracy')
plt.show()
"""
Explanation: Unlike BaggingClassifier() method, AdaBoostClassifier() let you analyze the evolution of error without having to train the ensemble for different number of learners. For this task, you can use the classifier method .staged_score() which returns the evolution of the ensemble accuracy. Note that it returns this information with a generator object, so you have to iterate over it to access to each element.
The following code lines let you plot the evolution of the ensemble accuracy (over the test data) for both discrete and real Adaboost approaches.
End of explanation
"""
|
utensil/julia-playground
|
py/profile_mv_dual.ipynb
|
mit
|
!pip install pyprof2calltree
!brew install qcachegrind
%%writefile test_41.py
from galgebra.ga import Ga
GA = Ga('e*1|2|3')
a = GA.mv('a', 'vector')
b = GA.mv('b', 'vector')
c = GA.mv('c', 'vector')
def cross(x, y):
return (x ^ y).dual()
xx = cross(a, cross(b, c))
!python -m cProfile -o test_41.cprof test_41.py
!python -m pyprof2calltree -i test_41.cprof -k
"""
Explanation: This is used to profile https://github.com/pygae/galgebra/issues/41 .
The following code uses https://github.com/pygae/galgebra/tree/new_printer .
End of explanation
"""
%%writefile test_41.py
from galgebra.ga import Ga
GA = Ga('e*1|2|3', norm=False)
a = GA.mv('a', 'vector')
b = GA.mv('b', 'vector')
c = GA.mv('c', 'vector')
def cross(x, y):
return (x ^ y).dual()
xx = cross(a, cross(b, c))
!python -m cProfile -o test_41.cprof test_41.py
!python -m pyprof2calltree -i test_41.cprof -k
"""
Explanation: With View options set to:
The profiling result is like:
End of explanation
"""
from galgebra.ga import Ga
GA = Ga('e*1|2|3')
a = GA.mv('a', 'vector')
b = GA.mv('b', 'vector')
c = GA.mv('c', 'vector')
def cross(x, y):
return (x ^ y).dual()
xx = cross(a, cross(b, c))
xx
GA.E()
GA.I()
from galgebra.ga import Ga
GA = Ga('e*1|2|3', norm=False)
a = GA.mv('a', 'vector')
b = GA.mv('b', 'vector')
c = GA.mv('c', 'vector')
def cross(x, y):
return (x ^ y).dual()
xx = cross(a, cross(b, c))
xx
GA.E()
GA.I()
"""
Explanation:
End of explanation
"""
|
morganics/bayesianpy
|
examples/notebook/titanic_classification.ipynb
|
apache-2.0
|
%matplotlib inline
import pandas as pd
import numpy as np
import re
import sys
sys.path.append("../../../bayesianpy")
import bayesianpy
import bayesianpy.visual
import logging
import os
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score
pattern = re.compile("([A-Z]{1})([0-9]{1,3})")
def get_cabin_floor_and_number(cabin):
if not isinstance(cabin, str):
return "", np.nan
cabins = cabin.split(" ")
for cabin in cabins:
match = re.match(pattern, cabin)
if match is not None:
floor = match.group(1)
number = match.group(2)
return floor, number
return "", np.nan
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
"""
Explanation: Classification on the Titanic Dataset
The following example gives an idea about how you could run basic classification using a Gaussian mixture model on the Titanic dataset, using a latent node, continuous variables as well as discrete variables. The example uses cross validation to get a more robust accuracy score across the training and testing data sets.
The initial step is our imports, and a bit of code for extracting floor and room number.
End of explanation
"""
db_folder = bayesianpy.utils.get_path_to_parent_dir("")
titanic = pd.read_csv(os.path.join(db_folder, "data/titanic.csv"))
titanic['Floor'], titanic['CabinNumber'] = zip(*titanic.Cabin.map(get_cabin_floor_and_number))
titanic.CabinNumber = titanic.CabinNumber.astype(float)
titanic.Floor.replace("", np.nan, inplace=True)
# drop variables that vary too much, e.g. with almost every row
titanic.drop(['Cabin', 'Ticket', 'Name', 'PassengerId'], inplace=True, axis=1)
"""
Explanation: The first step is a bit of preprocessing to get the data in the required format.
End of explanation
"""
bayesianpy.jni.attach(logger)
"""
Explanation: It's then necessary to attach the thread to the JVM through a pipe created by Jpype (otherwise you get a recursion error message).
End of explanation
"""
auto = bayesianpy.data.AutoType(titanic)
network_factory = bayesianpy.network.NetworkFactory(logger)
discrete = titanic[list(auto.get_discrete_variables())]
continuous = titanic[list(auto.get_continuous_variables())]
print("Discrete variables: {}".format(discrete.columns.tolist()))
print("Continuous variables: {}".format(continuous.columns.tolist()))
"""
Explanation: There are a few basic utility functions for deciding on the type of the data provided - obviously if you're already aware of the type then it's more accurate to manually specify datatypes.
End of explanation
"""
# write data to the temporary sqllite db
with bayesianpy.data.DataSet(titanic, db_folder, logger) as dataset:
# Use a standard template, which generally gives good performance
mixture_naive_bayes_tpl = bayesianpy.template.MixtureNaiveBayes(logger, discrete=discrete, continuous=continuous)
model = bayesianpy.model.NetworkModel(
mixture_naive_bayes_tpl.create(network_factory),
logger)
# result contains a bunch of metrics regarding the training step
results = model.train(dataset)
layout = bayesianpy.visual.NetworkLayout(results.get_network())
graph = layout.build_graph()
pos = layout.fruchterman_reingold_layout(graph)
layout.visualise(graph, pos)
"""
Explanation: The structure will look something like the following (as visualised in networkx). Bayes Server does have a UI, so you could save the model that you generate through the API.
End of explanation
"""
# write data to the temporary sqllite db
with bayesianpy.data.DataSet(titanic, db_folder, logger) as dataset:
# Use a standard template, which generally gives good performance
mixture_naive_bayes_tpl = bayesianpy.template.MixtureNaiveBayes(logger, discrete=discrete, continuous=continuous)
k_folds = 3
kf = KFold(titanic.shape[0], n_folds=k_folds, shuffle=True)
score = 0
# use cross validation to try and predict whether the individual survived or not
for k, (train_indexes, test_indexes) in enumerate(kf):
model = bayesianpy.model.NetworkModel(
mixture_naive_bayes_tpl.create(network_factory),
logger)
# result contains a bunch of metrics regarding the training step
model.train(dataset.subset(train_indexes))
# note that we've not 'dropped' the target data anywhere, this will be retracted when it's queried,
# by specifying query_options.setQueryEvidenceMode(bayesServerInference().QueryEvidenceMode.RETRACT_QUERY_EVIDENCE)
results = model.batch_query(dataset.subset(test_indexes), bayesianpy.model.QueryMostLikelyState("Survived",
output_dtype=titanic['Survived'].dtype))
# Each query just appends a column/ columns on to the original dataframe, so results is the same as titanic.iloc[test_indexes],
# with (in this case) one additional column called 'Survived_maxlikelihood', joined to the original.
score += accuracy_score(y_pred=results['Survived_maxlikelihood'].tolist(),
y_true=results['Survived'].tolist())
print("Average score was {}. Baseline accuracy is about 0.61.".format(score / k_folds))
"""
Explanation: Finally, run the code through 3 folds to get an average score from three different models.
End of explanation
"""
|
3upperm2n/notes-deeplearning
|
projects/tv_script_generation/.ipynb_checkpoints/dlnd_tv_script_generation-checkpoint.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
#print(text)
from collections import Counter
counts = Counter(text)
vocab = sorted(counts, reverse=True, key=counts.get)
#print(vocab)
vocab_to_int = {word: index for index, word in enumerate(vocab, 1)}
int_to_vocab = {index: word for word, index in vocab_to_int.items()}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
token_dd = {}
token_dd['.']='||period||'
token_dd[',']='||comma||'
token_dd['\"']='||quotation_mark||'
token_dd[';']='||semicolon||'
token_dd['!']='||exclamation_mark||'
token_dd['?']='||question_mark||'
token_dd['(']='||left_parentheses||'
token_dd[')']='||right_parentheses||'
token_dd['--']='||dash||'
token_dd['\n']='||return||'
return token_dd
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
inputs_ = tf.placeholder(tf.int32, [None, None], name='input')
targets_ = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
return inputs_, targets_, lr
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following the tuple (Input, Targets, LearingRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(256)
lstm_drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5)
cell = tf.contrib.rnn.MultiRNNCell([lstm_drop] * rnn_size)
#initial_state = cell.zero_state(batch_size, tf.float32)
#initial_state = tf.identity(initial_state, name='initial_state')
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
#print(vocab_size)
#print(rnn_size)
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, FinalState = build_rnn(cell, embed)
#print(outputs.shape)
Logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return Logits, FinalState
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
#print(int_text)
batch_seq = batch_size * seq_length
n_batches = len(int_text) // batch_seq
trunc_size = n_batches * batch_seq
out = []
for i in range(n_batches):
input_ = []
target_ = []
for j in range(batch_size):
start_pos = i * seq_length + j * seq_length
end_pos = start_pos + seq_length
input_.append(int_text[start_pos : end_pos])
target_.append(int_text[start_pos + 1 : end_pos + 1])
out.append([input_, target_])
return np.array(out)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2 3], [ 7 8 9]],
# Batch of targets
[[ 2 3 4], [ 8 9 10]]
],
# Second Batch
[
# Batch of Input
[[ 4 5 6], [10 11 12]],
# Batch of targets
[[ 5 6 7], [11 12 13]]
]
]
```
End of explanation
"""
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 64
# RNN Size
rnn_size = 128
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 10
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return None, None, None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
mdastro/UV_ETGs
|
GAMAII/Coding/CatAnalysis_Part01.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from __future__ import unicode_literals
from matplotlib.gridspec import GridSpec
# %matplotlib notebook
"""
Explanation: Libraries
End of explanation
"""
my_data = np.loadtxt('../Catalogue/Match07_small_mags_doubleclean_type.csv', delimiter=',', dtype=str)
my_dictionary = {}
for i in range(len(my_data[0, :])): # Converting numpy array into dictionary
my_dictionary[my_data[0, i]] = np.array(my_data[0 + 1:, i], dtype=str)
print my_data.shape
print my_data.size
print my_data[0,:]
redshift = my_dictionary['Z'].astype(float)
cataid = my_dictionary['CATAID'].astype(str)
z_prob = my_dictionary['PROB'].astype(float)
z_quality = my_dictionary['NQ'].astype(int)
fuv_band = my_dictionary['MAG_AB_FUV'].astype(float)
nuv_band = my_dictionary['MAG_AB_NUV'].astype(float)
u_band = my_dictionary['MAG_AB_U'].astype(float)
g_band = my_dictionary['MAG_AB_G'].astype(float)
r_band = my_dictionary['MAG_AB_R'].astype(float)
mag_abs_r = my_dictionary['MAG_ABSOLUTE_R'].astype(float)
stellar_mass = my_dictionary['logmstar'].astype(float) # stellar mass from sed fitting - log scale
stellar_age = my_dictionary['logage'].astype(float) # stellar age - log scale
stellar_met = my_dictionary['metal'].astype(float) # stellar metallicity
dn4000 = my_dictionary['D4000N'].astype(float)
h_alpha_flux = my_dictionary['HA_FLUX_COMP'].astype(float)
h_alpha_ew = my_dictionary['HA_EW_COMP'].astype(float)
h_beta_flux = my_dictionary['HB_FLUX_COMP'].astype(float)
h_beta_ew = my_dictionary['HB_EW_COMP'].astype(float)
nii_flux = my_dictionary['NIIR_FLUX_COMP'].astype(float) # R for red or 6583A -- see http://www.gama-survey.org/dr3/schema/dmu.php?id=8
oiii_flux = my_dictionary['OIIIR_EW_COMP'].astype(float) # R for red or 5007A -- see http://www.gama-survey.org/dr3/schema/dmu.php?id=8
uv_class = my_dictionary['UV_CLASS_YI2011'].astype(str)
obj_type = my_dictionary['TYPE'].astype(int)
print np.unique(uv_class)
print my_data[:,0].shape
"""
Explanation: Loading Dataset
End of explanation
"""
z_min=0.06
z_max=0.40
"""
Explanation: Redshift volume on which I intend to focus my analysis
End of explanation
"""
plt.hist(z_prob, bins=200)
plt.yscale('log')
plt.show()
indexes = np.arange(redshift.size)
index_all = indexes[(r_band>0)*(r_band<19.8)*(nuv_band>0)*(fuv_band>0)*((fuv_band-nuv_band)<50)
*((fuv_band-nuv_band)>(-20))*(redshift>=z_min)*(z_prob>0.8)*(obj_type==3)]
print my_data[index_all].shape
print (np.unique(cataid[index_all])).size
"""
Explanation: Selecting the subsample
End of explanation
"""
index_uvup = np.where(((r_band>0)*(r_band<19.8)*(nuv_band>0)*(fuv_band>0)*(nuv_band-r_band)>5.4)
*(fuv_band-nuv_band<0.9)*(fuv_band-r_band<6.6)*(fuv_band-nuv_band<50)*(fuv_band-nuv_band>-20)
*(redshift>=z_min))
index_rsf = np.where(((r_band>0)*(r_band<19.8)*(nuv_band>0)*(fuv_band>0)*(nuv_band-r_band)<5.4)
*(fuv_band-nuv_band<50)*(fuv_band-nuv_band>-20)*(redshift>=z_min))
index_uvweak = np.where(((r_band>0)*(r_band<19.8)*(nuv_band>0)*(fuv_band>0)*(nuv_band-r_band)>5.4)
*((fuv_band-r_band)>6.6)*(fuv_band-nuv_band<50)*(fuv_band-nuv_band>-20)*(redshift>=z_min))
index_redsequence = np.where(((r_band>0)*(r_band<19.8)*(nuv_band>0)*(fuv_band>0)*(nuv_band-r_band)>5.4)
*(fuv_band-nuv_band<50)*(fuv_band-nuv_band>-20)*(redshift>=z_min))
"""
Explanation: Characterizing the UV emission of the Galaxies
Indices - UV upturn; UV weak; RSF; red sequence galaxies
End of explanation
"""
sns.set_style("whitegrid")
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.
plt.subplots(1,1, figsize=(8,5))
plot01, = plt.plot((nuv_band - r_band)[index_rsf], (fuv_band - nuv_band)[index_rsf], 'o', markersize=6,
color = '#018571', alpha=0.5, label="RSF")
plot02, = plt.plot((nuv_band - r_band)[index_uvweak], (fuv_band - nuv_band)[index_uvweak], 's',
markersize=6, color = '#dfc27d', alpha=0.5, label="UV Weak")
plot03, = plt.plot((nuv_band - r_band)[index_uvup], (fuv_band - nuv_band)[index_uvup], 'D', markersize=6,
color = '#a6611a', alpha=0.5, label="UV upturn")
plt.legend(numpoints=1, loc='best', fontsize=14, frameon=True, framealpha=0.85)
# plt.text(0.0, 4, r"RSF", fontsize=18)
# plt.text(7.5, 4, r"UV Weak", fontsize=18)
# plt.text(7.5, -1.8, r"UV upturn", fontsize=18)
plt.axvline(x=5.4, color='black', linewidth=2.)
plt.axhline(y=0.9, xmin=0.659, xmax=2, color='black', linewidth=2.)
plt.xlabel("NUV-r", fontsize=15)
plt.ylabel("FUV-NUV", fontsize=15)
plt.tick_params('both', labelsize='14')
plt.grid(alpha=0.00)
plt.savefig('../Figs/g2_maglim_yi_diagram.pdf')
plt.savefig('../Figs/g2_maglim_yi_diagram.png')
plt.show()
"""
Explanation: Plot 01: Yi et al. (2011) UV diagnosis plot
End of explanation
"""
xbpt_k01 = np.linspace(-2.2, 0.4, 1000) # Kewyley et al. 2001
xbpt_k03 = np.linspace(-2.2, -0.01, 1000) # Kaufmann et al. 2003
xbpt_s06 = np.linspace(-2.2, -0.05, 1000) # Stasinska et al. 2006
xbpt_s07 = np.linspace(-0.182, 2.0, 1000) # Schawinski et al. 2007
ybpt_k01 = []
for j in range(len(xbpt_k01)):
ybpt_k01j = 0.61 / (xbpt_k01[j] - 0.47) + 1.19 # Kewyley et al. 2001
ybpt_k01.append(ybpt_k01j)
ybpt_k01 = np.array(ybpt_k01)
ybpt_k03 = []
for j in range(len(xbpt_k03)):
ybpt_k03j = 0.61 / (xbpt_k03[j] - 0.05) + 1.3 # Kaufmann et al. 2003
ybpt_k03.append(ybpt_k03j)
ybpt_k03 = np.array(ybpt_k03)
ybpt_s06 = []
for j in range(len(xbpt_s06)):
# Stasinska et al. 2006
ybpt_s06j = (-30.787 + (1.1358 * xbpt_s06[j]) + 0.27297) * np.tanh(5.7409 * xbpt_s06[j]) - 31.093
ybpt_s06.append(ybpt_s06j)
ybpt_s06 = np.array(ybpt_s06)
ybpt_s07 = []
for j in range(len(xbpt_s07)):
ybpt_s07j = 1.05 * xbpt_s07[j] + 0.45 # Schawinski et al. 2007
ybpt_s07.append(ybpt_s07j)
ybpt_s07 = np.array(ybpt_s07)
"""
Explanation: Characterizing the emission-lines profile of these objects
BPT
End of explanation
"""
print np.unique(np.isnan(h_alpha_flux[index_all])) #is there any NAN value?
print np.unique(np.isinf(h_alpha_flux[index_all])) #is there any infinite value?
print h_alpha_flux[index_all][[h_alpha_flux[index_all]==0]].size #total amount of zeroes
print h_alpha_flux[index_all].size #total size of the sample
print h_alpha_flux[index_all].size - h_alpha_flux[index_all][[h_alpha_flux[index_all]==0]].size #removing the zeroes
idx_bpt_clean = np.where((np.logical_not(np.isnan(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isnan(np.log10(oiii_flux[index_all]/h_beta_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(oiii_flux[index_all]/h_beta_flux[index_all]))))
*(np.logical_not(h_alpha_flux[index_all]<=0))*(np.logical_not(h_beta_flux[index_all]<=0))
*(np.logical_not(nii_flux[index_all]<=0))*(np.logical_not(oiii_flux[index_all]<=0)))
idx_whan_clean = np.where((np.logical_not(np.isnan(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isnan(np.log10(h_alpha_ew[index_all]))))
*(np.logical_not(np.isinf(np.log10(h_alpha_ew[index_all]))))
*(np.logical_not(h_alpha_flux[index_all]<=0))*(np.logical_not(nii_flux[index_all]<=0))
*(np.logical_not(h_alpha_ew[index_all]<=0)))
print redshift[index_all][idx_bpt_clean].size
print redshift[index_all][idx_whan_clean].size
print redshift[index_all].size-redshift[index_all][idx_bpt_clean].size
# for i in range(h_alpha_flux[index_all][idx_bpt_clean].size):
# print h_alpha_flux[index_all][idx_bpt_clean][i]
"""
Explanation: New indices for emission lines - cleaning the trash
End of explanation
"""
h_alpha_flux_bpt = h_alpha_flux[index_all][idx_bpt_clean]
h_beta_flux_bpt = h_beta_flux[index_all][idx_bpt_clean]
nii_flux_bpt = nii_flux[index_all][idx_bpt_clean]
oiii_flux_bpt = oiii_flux[index_all][idx_bpt_clean]
uv_class_bpt = uv_class[index_all][idx_bpt_clean]
print h_alpha_flux_bpt.size
print uv_class_bpt.size
print np.unique(uv_class_bpt)
idx_uvup_bpt = np.where(uv_class_bpt=='UV_UPTURN')
idx_uvwk_bpt = np.where(uv_class_bpt=='UV_WEAK')
idx_rsf_bpt = np.where(uv_class_bpt=='RSF')
"""
Explanation: Creating new arrays of clean h alpha, h beta, nii, oiii to simplify the notation
BPT
End of explanation
"""
h_alpha_flux_whan = h_alpha_flux[index_all][idx_whan_clean]
h_beta_flux_whan = h_beta_flux[index_all][idx_whan_clean]
nii_flux_whan = nii_flux[index_all][idx_whan_clean]
h_alpha_ew_whan = h_alpha_ew[index_all][idx_whan_clean]
uv_class_whan = uv_class[index_all][idx_whan_clean]
print h_alpha_flux_whan.size
print uv_class_whan.size
print np.unique(uv_class_whan)
idx_uvup_whan = np.where(uv_class_whan=='UV_UPTURN')
idx_uvwk_whan = np.where(uv_class_whan=='UV_WEAK')
idx_rsf_whan = np.where(uv_class_whan=='RSF')
"""
Explanation: WHAN
End of explanation
"""
xbpt = np.log10(nii_flux_bpt/h_alpha_flux_bpt)
xbpt_uvup = np.log10(nii_flux_bpt[idx_uvup_bpt]/h_alpha_flux_bpt[idx_uvup_bpt])
xbpt_uvwk = np.log10(nii_flux_bpt[idx_uvwk_bpt]/h_alpha_flux_bpt[idx_uvwk_bpt])
xbpt_rsf = np.log10(nii_flux_bpt[idx_rsf_bpt]/h_alpha_flux_bpt[idx_rsf_bpt])
"""
Explanation: All BPT settings
X-axis
End of explanation
"""
ybpt = np.log10(oiii_flux_bpt/h_beta_flux_bpt)
ybpt_uvup = np.log10(oiii_flux_bpt[idx_uvup_bpt]/h_beta_flux_bpt[idx_uvup_bpt])
ybpt_uvwk = np.log10(oiii_flux_bpt[idx_uvwk_bpt]/h_beta_flux_bpt[idx_uvwk_bpt])
ybpt_rsf = np.log10(oiii_flux_bpt[idx_rsf_bpt]/h_beta_flux_bpt[idx_rsf_bpt])
"""
Explanation: Y-axis
End of explanation
"""
xwhan = np.log10(nii_flux_whan/h_alpha_flux_whan)
xwhan_uvup = np.log10(nii_flux_whan[idx_uvup_whan]/h_alpha_flux_whan[idx_uvup_whan])
xwhan_uvwk = np.log10(nii_flux_whan[idx_uvwk_whan]/h_alpha_flux_whan[idx_uvwk_whan])
xwhan_rsf = np.log10(nii_flux_whan[idx_rsf_whan]/h_alpha_flux_whan[idx_rsf_whan])
"""
Explanation: All WHAN settings
X-axis
End of explanation
"""
ywhan = np.log10(h_alpha_ew_whan)
ywhan_uvup = np.log10(h_alpha_ew_whan[idx_uvup_whan])
ywhan_uvwk = np.log10(h_alpha_ew_whan[idx_uvwk_whan])
ywhan_rsf = np.log10(h_alpha_ew_whan[idx_rsf_whan])
print xbpt_rsf.size, ybpt_rsf.size
print xwhan_rsf.size, ywhan_rsf.size
print xwhan.size
print (xwhan_uvup.size+xwhan_uvwk.size+xwhan_rsf.size)
"""
Explanation: Y-axis
End of explanation
"""
# INITIAL SETTINGS
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.
plt.subplots(figsize=(10.5,5))
# FIRST PLOT -- BPT
plt.subplot(1,2,1)
plot01c = plt.scatter(xbpt_rsf, ybpt_rsf, c='#018571', s=5, alpha=0.3, marker='o')
plot01b = plt.scatter(xbpt_uvwk, ybpt_uvwk, c='#dfc27d', s=10, alpha=1, marker='s')
plot01a = plt.scatter(xbpt_uvup, ybpt_uvup, c='#a6611a', s=20, alpha=1, marker='D')
plot02, = plt.plot(xbpt_k01, ybpt_k01, ':', color='black', label='Kewley+01')
plot03, = plt.plot(xbpt_k03, ybpt_k03, '-', color='black', label='Kauffman+03')
plot04, = plt.plot(xbpt_s06, ybpt_s06, '-.', color='black', label='Stasinska+06')
plot05, = plt.plot(xbpt_s07, ybpt_s07, '--', color='black', label='Schawinski+07')
l1 = plt.legend([plot01a, plot01b, plot01c], [r"UV Upturn", r"UV Weak", r"RSF"], numpoints=1, loc='lower left',
fontsize=12, frameon=True, framealpha=0.85)
l2 = plt.legend([plot02, plot03, plot04, plot05], [r"Kewley+01", r"Kauffman+03", r"Stasińska+06", r"Schawinski+07"],
numpoints=3, loc='lower right', fontsize=12, frameon=True, framealpha=0.85)
l1.get_frame().set_edgecolor('black')
l2.get_frame().set_edgecolor('black')
plt.gca().add_artist(l1)
plt.fill_betweenx(ybpt_k01, xbpt_k01, xbpt_s06, where=(xbpt_s06>-1.242)*(ybpt_s06<0.835), facecolor='gray', alpha=0.2)
plt.fill_between(xbpt_s06, ybpt_k01, ybpt_s06, where=(xbpt_s06>-1.242)*(ybpt_k01<0.835), facecolor='gray', alpha=0.2)
plt.xlabel(r"$\log ([NII]/H{\alpha})$", fontweight='bold', size=14)
plt.ylabel(r"$\log (\left[OIII\right]/H \beta) $", fontweight='bold', fontsize=14)
plt.text(-1.9, 0.2, r"Star Forming", fontsize=16)
# plt.text(-0.5, 1.6, r"AGN", fontsize=13)
plt.text(-0.5, 1.3, r"Seyfert", fontsize=13)
plt.text(0.9, -1, r"LINER", fontsize=13)
plt.annotate(r"Composite", xy=(0., -2), xycoords='data', xytext=(0.5, 0.5), size=13,
arrowprops=dict(arrowstyle='wedge',facecolor='black', connectionstyle="angle3,angleA=90,angleB=0"))
plt.xlim([-2, 1.5])
plt.ylim([-5, 2])
plt.minorticks_on()
plt.tick_params('both', labelsize='13')
plt.grid(alpha=0.0)
# SECOND PLOT -- WHAN DIAGRAM
plt.subplot(1,2,2)
plot01c = plt.scatter(xwhan_rsf, ywhan_rsf, c='#018571', s=5, alpha=0.3, marker='o')
plot01b = plt.scatter(xwhan_uvwk, ywhan_uvwk, c='#dfc27d', s=10, alpha=1.0, marker='s')
plot01a = plt.scatter(xwhan_uvup, ywhan_uvup, c='#a6611a', s=20, alpha=1.0, marker='D')
l3 = plt.legend([plot01a, plot01b, plot01c], [r"UV Upturn", r"UV Weak", r"RSF"],numpoints=1, loc='upper right',
fontsize=12, frameon=True, framealpha=0.7)
l3.get_frame().set_edgecolor('black')
plt.axvline(x=-0.4, ymin=.332, ymax=3.5, color='black', linewidth=1.5)
plt.axhline(y=+0.5, color='black', linewidth=0.5)
plt.axhline(y=0.78, xmin=0.469, xmax=1, color='black', linewidth=1.5)
plt.xlabel(r"$\log ([NII]/H{\alpha})$", fontweight='bold', fontsize=14)
plt.ylabel(r"$\log EW(H{\alpha})$", fontweight='bold', fontsize=14)
plt.text(-1.75, -0.75, r"Retired/Passive", fontsize=13)
plt.text(0.75, 2.0, r"sAGN", fontsize=13)
plt.text(0.75, 0.6, r"wAGN", fontsize=13)
plt.text(-1.75, 3, r"Star Forming", fontsize=13)
plt.xlim([-2, 1.5])
plt.ylim([-1.0, 3.5])
plt.minorticks_on()
plt.tick_params('both', labelsize='13')
plt.grid(alpha=0.0)
# FINAL SETTINGS
plt.tight_layout()
plt.savefig('../Figs/g2_bptwhan.pdf')
plt.savefig('../Figs/g2_bptwhan.png')
plt.show()
"""
Explanation: Plot 02: BPT and WHAN
End of explanation
"""
# OVERALL SETTINGS
plt.subplots(figsize=(8,5))
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.
bpt_xlim = [-2, 1.5]
bpt_ylim = [-5, 2]
whan_xlim = [-2, 1.5]
whan_ylim = [-1.0, 3.5]
# BPT
## RSF
ax1=plt.subplot(2,3,1)
plot01c = plt.scatter(xbpt_rsf, ybpt_rsf, c='#018571', s=10, alpha=0.7, label='RSF')
plot02, = plt.plot(xbpt_k01, ybpt_k01, ':', color='black', label='Kewley+01')
plot03, = plt.plot(xbpt_k03, ybpt_k03, '-', color='black', label='Kauffman+03')
plot04, = plt.plot(xbpt_s06, ybpt_s06, '-.', color='black', label='Stasinska+06')
plot05, = plt.plot(xbpt_s07, ybpt_s07, '--', color='black', label='Schawinski+07')
plt.ylabel(r"$\log (\left[OIII\right]/H \beta) $", fontweight='bold', fontsize=15)
# plt.legend([plot01c], [r"RSF"], numpoints=500, loc='upper right', fontsize=10, frameon=True, framealpha=1.)
plt.title("RSF", fontsize=14)
plt.xlim(bpt_xlim)
plt.ylim(bpt_ylim)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(bpt_xlim[0], bpt_xlim[1], 1.))
plt.yticks(np.arange(bpt_ylim[0], bpt_ylim[1], 2))
plt.grid(alpha=0.0)
## UV Weak
ax2=plt.subplot(2,3,2)
plot01b = plt.scatter(xbpt_uvwk, ybpt_uvwk, c='#dfc27d', s=10, alpha=1, label='UV weak')
plot02, = plt.plot(xbpt_k01, ybpt_k01, ':', color='black', label='Kewley+01')
plot03, = plt.plot(xbpt_k03, ybpt_k03, '-', color='black', label='Kauffman+03')
plot04, = plt.plot(xbpt_s06, ybpt_s06, '-.', color='black', label='Stasinska+06')
plot05, = plt.plot(xbpt_s07, ybpt_s07, '--', color='black', label='Schawinski+07')
# plt.xlabel(r"$\log ([NII]/H{\alpha})$", fontweight='bold', size=19)
plt.title("UV weak", fontsize=14)
plt.xlim(bpt_xlim)
plt.ylim(bpt_ylim)
ax2.yaxis.set_visible(False)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(bpt_xlim[0], bpt_xlim[1], 1.))
plt.grid(alpha=0.0)
## UV Upturn
ax3=plt.subplot(2,3,3)
plot01a = plt.scatter(xbpt_uvup, ybpt_uvup, c='#a6611a', s=10, alpha=1, label='UV upturn')
plot02, = plt.plot(xbpt_k01, ybpt_k01, ':', color='black', label='Kewley+01')
plot03, = plt.plot(xbpt_k03, ybpt_k03, '-', color='black', label='Kauffman+03')
plot04, = plt.plot(xbpt_s06, ybpt_s06, '-.', color='black', label='Stasinska+06')
plot05, = plt.plot(xbpt_s07, ybpt_s07, '--', color='black', label='Schawinski+07')
plt.title("UV upturn", fontsize=14)
plt.xlim(bpt_xlim)
plt.ylim(bpt_ylim)
ax3.yaxis.set_visible(False)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(bpt_xlim[0], bpt_xlim[1], 1.))
plt.grid(alpha=0.0)
# WHAN
## RSF
ax4=plt.subplot(2,3,4)
plot01c = plt.scatter(xwhan_rsf, ywhan_rsf, c='#018571', s=10, alpha=1, label='RSF')
plt.axvline(x=-0.4, ymin=.332, ymax=3.5, color='black', linewidth=1.5)
plt.axhline(y=+0.5, color='black', linewidth=0.5)
plt.axhline(y=0.82, xmin=0.455, xmax=1, color='black', linewidth=1.5)
plt.ylabel(r"$\log EW(H{\alpha})$", fontweight='bold', fontsize=16)
plt.xlim(whan_xlim)
plt.ylim(whan_ylim)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(whan_xlim[0], whan_xlim[1], 1.))
plt.yticks(np.arange(whan_ylim[0], whan_ylim[1], 1.))
plt.grid(alpha=0.0)
ax5=plt.subplot(2,3,5)
plot01b = plt.scatter(xwhan_uvwk, ywhan_uvwk, c='#dfc27d', s=10, alpha=1., label='UV weak')
plt.axvline(x=-0.4, ymin=.332, ymax=3.5, color='black', linewidth=1.5)
plt.axhline(y=+0.5, color='black', linewidth=0.5)
plt.axhline(y=0.82, xmin=0.455, xmax=1, color='black', linewidth=1.5)
plt.xlabel(r"$\log ([NII]/H{\alpha})$", fontweight='bold', size=16)
plt.xlim(whan_xlim)
plt.ylim(whan_ylim)
ax5.yaxis.set_visible(False)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(whan_xlim[0], whan_xlim[1], 1.))
plt.grid(alpha=0.0)
ax6=plt.subplot(2,3,6)
plot01a = plt.scatter(xwhan_uvup, ywhan_uvup, c='#a6611a', s=10, alpha=1, label='UV upturn')
plt.axvline(x=-0.4, ymin=.332, ymax=3.5, color='black', linewidth=1.5)
plt.axhline(y=+0.5, color='black', linewidth=0.5)
plt.axhline(y=0.82, xmin=0.455, xmax=1, color='black', linewidth=1.5)
plt.xlim(whan_xlim)
plt.ylim(whan_ylim)
ax6.yaxis.set_visible(False)
plt.minorticks_on()
plt.tick_params('both', labelsize='14')
plt.xticks(np.arange(whan_xlim[0], whan_xlim[1], 1.))
plt.grid(alpha=0.0)
plt.tight_layout()
plt.savefig('../Figs/g2_bptwhan_split.pdf')
plt.savefig('../Figs/g2_bptwhan_split.png')
plt.show()
"""
Explanation: Plot 03: BPT and WHAN fragmented
End of explanation
"""
print xbpt.size
print xwhan.size
whan_class = []
for i in range(xwhan.size):
if (xwhan[i]<-0.4)*(ywhan[i]>0.5):
whan_class_i = 'SF'
elif (xwhan[i]>-0.4)*(ywhan[i]>0.82):
whan_class_i = 'sAGN'
elif (xwhan[i]>-0.4)*(ywhan[i]<0.82)*(ywhan[i]>0.5):
whan_class_i = 'wAGN'
elif (ywhan[i]<0.5):
whan_class_i = 'Retired/Passive'
else:
print 'error'
whan_class.append(whan_class_i)
whan_class = np.array(whan_class)
idx_sf = np.where(whan_class=='SF')
idx_sagn = np.where(whan_class=='sAGN')
idx_wagn = np.where(whan_class=='wAGN')
idx_rp = np.where(whan_class=='Retired/Passive')
"""
Explanation: Analysing the impact of the cross-correlation between BPT, WHAN and UV classes
End of explanation
"""
print r"RSF objects in SF region in WHAN diagram is %d" % list(uv_class_whan[idx_sf]).count('RSF')
print r"UV weak objects in SF region in WHAN diagram is %d" % list(uv_class_whan[idx_sf]).count('UV_WEAK')
print r"UV upturn objects in SF region in WHAN diagram is %d" % list(uv_class_whan[idx_sf]).count('UV_UPTURN')
print r"RSF objects in sAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_sagn]).count('RSF')
print r"UV weak objects in sAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_sagn]).count('UV_WEAK')
print r"UV upturn objects in sAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_sagn]).count('UV_UPTURN')
print r"RSF objects in wAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_wagn]).count('RSF')
print r"UV weak objects in wAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_wagn]).count('UV_WEAK')
print r"UV upturn objects in wAGN region in WHAN diagram is %d" % list(uv_class_whan[idx_wagn]).count('UV_UPTURN')
print r"RSF objects in Retired/Passive region in WHAN diagram is %d" % list(uv_class_whan[idx_rp]).count('RSF')
print r"UV weak objects in Retired/Passive region in WHAN diagram is %d" % list(uv_class_whan[idx_rp]).count('UV_WEAK')
print r"UV upturn objects in Retired/Passive region in WHAN diagram is %d" % list(uv_class_whan[idx_rp]).count('UV_UPTURN')
"""
Explanation: I now have both classifications: UV and WHAN. Let's crossmatch these informations and count the amount of galaxies in each situation.
End of explanation
"""
print "UV Classification & SF & sAGN & wAGN & Retired/Passive \\"
print "RSF & %d & %d & %d & %d \\ " % (list(uv_class_whan[idx_sf]).count('RSF'),
list(uv_class_whan[idx_sagn]).count('RSF'),
list(uv_class_whan[idx_wagn]).count('RSF'),
list(uv_class_whan[idx_rp]).count('RSF'))
print "UV weak & %d & %d & %d & %d \\ " % (list(uv_class_whan[idx_sf]).count('UV_WEAK'),
list(uv_class_whan[idx_sagn]).count('UV_WEAK'),
list(uv_class_whan[idx_wagn]).count('UV_WEAK'),
list(uv_class_whan[idx_rp]).count('UV_WEAK'))
print "UV upturn & %d & %d & %d & %d \\ " % (list(uv_class_whan[idx_sf]).count('UV_UPTURN'),
list(uv_class_whan[idx_sagn]).count('UV_UPTURN'),
list(uv_class_whan[idx_wagn]).count('UV_UPTURN'),
list(uv_class_whan[idx_rp]).count('UV_UPTURN'))
"""
Explanation: Printing the same information above for latex table
End of explanation
"""
bpt_class = []
idx_co = []
idx_sf = []
idx_sy = []
idx_ln = []
for i in range(xbpt.size):
# checking the proximity of each observation to each curve
idx_k01 = np.abs(xbpt_k01-xbpt[i]).argmin() # index of the K01 curve closest to my observation i
idx_s06 = np.abs(xbpt_s06-xbpt[i]).argmin() # index of the S06 curve closest to my observation i
idx_s07 = np.abs(xbpt_s07-xbpt[i]).argmin() # index of the S07 curve closest to my observation i
if (ybpt[i]>ybpt_k01[idx_k01])*(ybpt[i]>ybpt_s07[idx_s07]):
idx_sy.append(i)
bpt_class.append('Seyfert')
elif (ybpt[i]>ybpt_k01[idx_k01])*(ybpt[i]<ybpt_s07[idx_s07]):
idx_ln.append(i)
bpt_class.append('LINER')
elif (ybpt[i]<ybpt_k01[idx_k01])*(ybpt[i]>ybpt_s06[idx_s06]):
idx_co.append(i)
bpt_class.append('Composite')
else:
idx_sf.append(i)
bpt_class.append('SF')
bpt_class = np.array(bpt_class)
print bpt_class.size
plot_co = plt.scatter(xbpt[idx_co], ybpt[idx_co], c='#a6611a', s=10, alpha=1)
plot_sf = plt.scatter(xbpt[idx_sf], ybpt[idx_sf], c='green', s=10, alpha=0.8)
plot_sy = plt.scatter(xbpt[idx_sy], ybpt[idx_sy], c='blue', s=10, alpha=0.8)
plot_ln = plt.scatter(xbpt[idx_ln], ybpt[idx_ln], c='magenta', s=10, alpha=0.8)
# plot_na = plt.scatter(xbpt[idx_na], ybpt[idx_na], c='red', s=10, alpha=1)
plot02, = plt.plot(xbpt_k01, ybpt_k01, ':', color='black', label='Kewley+01')
plot03, = plt.plot(xbpt_k03, ybpt_k03, '-', color='black', label='Kauffman+03')
plot04, = plt.plot(xbpt_s06, ybpt_s06, '-.', color='black', label='Stasinska+06')
plot05, = plt.plot(xbpt_s07, ybpt_s07, '--', color='black', label='Schawinski+07')
plt.xlim(bpt_xlim)
plt.ylim(bpt_ylim)
plt.minorticks_on()
plt.tick_params('both', labelsize='15')
plt.xticks(np.arange(bpt_xlim[0], bpt_xlim[1], 1.))
plt.grid(alpha=0.0)
plt.show()
"""
Explanation: Estimating the amount of objects in each category of the BPT diagram
End of explanation
"""
idxx_sf = np.where(bpt_class=='SF')
idxx_sy = np.where(bpt_class=='Seyfert')
idxx_ln = np.where(bpt_class=='LINER')
idxx_co = np.where(bpt_class=='Composite')
print "UV Classification & SF & Seyfert & LINER & Composite \\"
print "RSF & %d & %d & %d & %d \\ " % (list(uv_class_bpt[idxx_sf]).count('RSF'),
list(uv_class_bpt[idxx_sy]).count('RSF'),
list(uv_class_bpt[idxx_ln]).count('RSF'),
list(uv_class_bpt[idxx_co]).count('RSF'))
print "UV weak & %d & %d & %d & %d \\ " % (list(uv_class_bpt[idxx_sf]).count('UV_WEAK'),
list(uv_class_bpt[idxx_sy]).count('UV_WEAK'),
list(uv_class_bpt[idxx_ln]).count('UV_WEAK'),
list(uv_class_bpt[idxx_co]).count('UV_WEAK'))
print "UV upturn & %d & %d & %d & %d \\ " % (list(uv_class_bpt[idxx_sf]).count('UV_UPTURN'),
list(uv_class_bpt[idxx_sy]).count('UV_UPTURN'),
list(uv_class_bpt[idxx_ln]).count('UV_UPTURN'),
list(uv_class_bpt[idxx_co]).count('UV_UPTURN'))
"""
Explanation: I now have both classifications: UV and BPT. Let's crossmatch these informations and count the amount of galaxies in each situation.
End of explanation
"""
bins = np.arange(0, (redshift[index_all]).max(), 0.05)
ratio_uvup_redseq = []
average_redshift = []
z_uv = []
z_rs = []
redshift_uvup = redshift[index_uvup]
for i in range(bins.size):
if i==0:
continue
else:
index_redseq_i = np.where((bins[i-1]<=redshift[index_redsequence])*(redshift[index_redsequence]<=bins[i]))
index_uvup_i = np.where((bins[i-1]<=redshift_uvup)*(redshift_uvup <= bins[i]))
redshift_bin_redseq = redshift[index_redseq_i]
redshift_bin_uvup = redshift_uvup[index_uvup_i]
if (redshift_bin_redseq.size==0):
ratio_uvup_i = 0
print "There are no UV Upturn galaxies in this range of redshift: %.2f and %.2f" % (bins[i-1], bins[i])
else:
ratio_uvup_i = (np.float(redshift_bin_uvup.size) / np.float(redshift_bin_redseq.size)) *100
average_redshift_i = np.average((bins[i], bins[i-1]))
average_redshift.append(average_redshift_i)
z_uv.append(redshift_bin_uvup.size)
z_rs.append(redshift_bin_redseq.size)
ratio_uvup_redseq.append(ratio_uvup_i)
ratio_uvup_redseq = np.array(ratio_uvup_redseq)
z_uv = np.array(z_uv)
z_rs = np.array(z_rs)
average_redshift = np.array(average_redshift)
n_groups = bins.size
index = np.arange(1,n_groups,1)
sns.set_style('white')
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.
plt.rcParams['mathtext.fontset'] = u'stixsans'
plt.subplots(1,1, figsize=(8,5))
plt.bar(index[[ratio_uvup_redseq!=0]], ratio_uvup_redseq[[ratio_uvup_redseq!=0]], width=1., alpha=0.8,
color='#a6611a', edgecolor='#a6611a')
for i in range(bins[[ratio_uvup_redseq!=0]].size):
plt.text(index[i+1]-0.2, ratio_uvup_redseq[i+1]+3.5, r"$\mathrm{\mathbf{\frac{%4d}{%4d}}}$" % (z_uv[i+1], z_rs[i+1]),
fontsize=15)
plt.xticks(index, bins)
plt.ylabel("% of UV Upturn Galaxies", fontsize=15)
plt.xlabel("Redshift", fontsize=15)
plt.tick_params('both', labelsize='14')
plt.xlim(0.5, bins[[ratio_uvup_redseq!=0]].size +2.5)
plt.ylim(0, 75)
plt.savefig('../Figs/g2_barplot_uvred.pdf')
plt.savefig('../Figs/g2_barplot_uvred.png')
plt.show()
idx_not_bpt= np.where(np.logical_not((np.logical_not(np.isnan(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isnan(np.log10(oiii_flux[index_all]/h_beta_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(oiii_flux[index_all]/h_beta_flux[index_all]))))
*(np.logical_not(h_alpha_flux[index_all]<=0))*(np.logical_not(h_beta_flux[index_all]<=0))
*(np.logical_not(nii_flux[index_all]<=0))*(np.logical_not(oiii_flux[index_all]<=0))))
print idx_not_bpt
print idx_bpt_clean
print (uv_class[index_all][idx_not_bpt]).size
print (uv_class[index_all][idx_bpt_clean]).size
print (uv_class[index_all][idx_not_bpt]).size + (uv_class[index_all][idx_bpt_clean]).size
idx_not_whan = np.where(np.logical_not((np.logical_not(np.isnan(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isinf(np.log10(nii_flux[index_all]/h_alpha_flux[index_all]))))
*(np.logical_not(np.isnan(np.log10(h_alpha_ew[index_all]))))
*(np.logical_not(np.isinf(np.log10(h_alpha_ew[index_all]))))
*(np.logical_not(h_alpha_flux[index_all]<=0))*(np.logical_not(nii_flux[index_all]<=0))
*(np.logical_not(h_alpha_ew[index_all]<=0))))
print np.array(idx_not_whan).size
print np.unique(uv_class[index_all][idx_not_whan])
print (uv_class[index_all][idx_not_whan]).size + (uv_class[index_all][idx_whan_clean]).size
print list(uv_class[index_all][idx_not_bpt]).count('UV_UPTURN')
print list(uv_class[index_all][idx_not_bpt]).count('UV_WEAK')
print list(uv_class[index_all][idx_not_bpt]).count('RSF')
print list(uv_class[index_all][idx_not_whan]).count('UV_UPTURN')
print list(uv_class[index_all][idx_not_whan]).count('UV_WEAK')
print list(uv_class[index_all][idx_not_whan]).count('RSF')
print uv_class[index_all][idx_not_whan].size
print uv_class[index_all][idx_whan_clean].size
emlines_not_bpt = []
for i in range(np.array(idx_not_bpt).size):
emlines_not_bpt.append('NA')
emlines_not_bpt = np.array(emlines_not_bpt)
emlines_not_whan = []
for i in range(np.array(idx_not_whan).size):
emlines_not_whan.append('NA')
emlines_not_whan = np.array(emlines_not_whan)
idxs_bpt_temp = idx_bpt_clean + idx_not_bpt
idxs_bpt = list(idxs_bpt_temp[0])+list(idxs_bpt_temp[1])
print len(idxs_bpt)
"""
Explanation: Now we are evaluating the impact of UV upturn sources in redshift
End of explanation
"""
bpt_catai_temp1 = pd.DataFrame(cataid[index_all][idx_bpt_clean])
bpt_class_temp1 = pd.DataFrame(bpt_class)
bpt_pos = pd.concat([bpt_catai_temp1, bpt_class_temp1], axis=1)
bpt_pos.columns = ['CATAID', 'BPT_CLASS']
bpt_catai_temp2 = pd.DataFrame(cataid[index_all][idx_not_bpt])
bpt_class_temp2 = pd.DataFrame(emlines_not_bpt)
bpt_neg = pd.concat([bpt_catai_temp2, bpt_class_temp2], axis=1)
bpt_neg.columns = ['CATAID', 'BPT_CLASS']
bpt_all = pd.concat([bpt_pos, bpt_neg])
print np.unique(bpt_all['CATAID']).size
whan_catai_temp1 = pd.DataFrame(cataid[index_all][idx_whan_clean])
whan_class_temp1 = pd.DataFrame(whan_class)
whan_pos = pd.concat([whan_catai_temp1, whan_class_temp1], axis=1)
whan_pos.columns = ['CATAID', 'WHAN_CLASS']
whan_catai_temp2 = pd.DataFrame(cataid[index_all][idx_not_whan])
whan_class_temp2 = pd.DataFrame(emlines_not_whan)
whan_neg = pd.concat([whan_catai_temp2, whan_class_temp2], axis=1)
whan_neg.columns = ['CATAID', 'WHAN_CLASS']
whan_all = pd.concat([whan_pos, whan_neg])
print np.unique(whan_all['CATAID']).size
my_df_temp = pd.DataFrame(my_data[index_all+1])
my_df_temp.columns = my_data[0,:]
my_df_temp2 = my_df_temp.set_index('CATAID').join(bpt_all.set_index('CATAID')).join(whan_all.set_index('CATAID'))
my_df_temp2['BPT_CLASS'].value_counts(dropna=False)
my_df_temp2['WHAN_CLASS'].value_counts(dropna=False)
my_df_temp2.to_csv('../Catalogue/Match07_smalldoubleclean_emlines.csv', index=True)
"""
Explanation: Now we are going to add these information into the whole dataset, in case we need it in the future
End of explanation
"""
|
AllenDowney/ModSimPy
|
notebooks/chap21.ipynb
|
mit
|
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
"""
Explanation: Modeling and Simulation in Python
Chapter 21
Copyright 2017 Allen Downey
License: Creative Commons Attribution 4.0 International
End of explanation
"""
m = UNITS.meter
s = UNITS.second
kg = UNITS.kilogram
"""
Explanation: With air resistance
Next we'll add air resistance using the drag equation
I'll start by getting the units we'll need from Pint.
End of explanation
"""
params = Params(height = 381 * m,
v_init = 0 * m / s,
g = 9.8 * m/s**2,
mass = 2.5e-3 * kg,
diameter = 19e-3 * m,
rho = 1.2 * kg/m**3,
v_term = 18 * m / s)
"""
Explanation: Now I'll create a Params object to contain the quantities we need. Using a Params object is convenient for grouping the system parameters in a way that's easy to read (and double-check).
End of explanation
"""
def make_system(params):
"""Makes a System object for the given conditions.
params: Params object
returns: System object
"""
diameter, mass = params.diameter, params.mass
g, rho = params.g, params.rho,
v_init, v_term = params.v_init, params.v_term
height = params.height
area = np.pi * (diameter/2)**2
C_d = 2 * mass * g / (rho * area * v_term**2)
init = State(y=height, v=v_init)
t_end = 30 * s
dt = t_end / 100
return System(params, area=area, C_d=C_d,
init=init, t_end=t_end, dt=dt)
"""
Explanation: Now we can pass the Params object make_system which computes some additional parameters and defines init.
make_system uses the given radius to compute area and the given v_term to compute the drag coefficient C_d.
End of explanation
"""
system = make_system(params)
"""
Explanation: Let's make a System
End of explanation
"""
def slope_func(state, t, system):
"""Compute derivatives of the state.
state: position, velocity
t: time
system: System object
returns: derivatives of y and v
"""
y, v = state
rho, C_d, g = system.rho, system.C_d, system.g
area, mass = system.area, system.mass
f_drag = rho * v**2 * C_d * area / 2
a_drag = f_drag / mass
dydt = v
dvdt = -g + a_drag
return dydt, dvdt
"""
Explanation: Here's the slope function, including acceleration due to gravity and drag.
End of explanation
"""
slope_func(system.init, 0, system)
"""
Explanation: As always, let's test the slope function with the initial conditions.
End of explanation
"""
def event_func(state, t, system):
"""Return the height of the penny above the sidewalk.
"""
y, v = state
return y
"""
Explanation: We can use the same event function as in the previous chapter.
End of explanation
"""
results, details = run_ode_solver(system, slope_func, events=event_func)
details
"""
Explanation: And then run the simulation.
End of explanation
"""
results.head()
results.tail()
"""
Explanation: Here are the results.
End of explanation
"""
t_sidewalk = get_last_label(results) * s
"""
Explanation: The final height is close to 0, as expected.
Interestingly, the final velocity is not exactly terminal velocity, which suggests that there are some numerical errors.
We can get the flight time from results.
End of explanation
"""
def plot_position(results):
plot(results.y)
decorate(xlabel='Time (s)',
ylabel='Position (m)')
plot_position(results)
savefig('figs/chap21-fig01.pdf')
"""
Explanation: Here's the plot of position as a function of time.
End of explanation
"""
def plot_velocity(results):
plot(results.v, color='C1', label='v')
decorate(xlabel='Time (s)',
ylabel='Velocity (m/s)')
plot_velocity(results)
"""
Explanation: And velocity as a function of time:
End of explanation
"""
# Solution goes here
# Solution goes here
plot_position(results)
# Solution goes here
"""
Explanation: From an initial velocity of 0, the penny accelerates downward until it reaches terminal velocity; after that, velocity is constant.
Exercise: Run the simulation with an initial velocity, downward, that exceeds the penny's terminal velocity. Hint: You can create a new Params object based on an existing one, like this:
params2 = Params(params, v_init=-30 * m/s)
What do you expect to happen? Plot velocity and position as a function of time, and see if they are consistent with your prediction.
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercise: Suppose we drop a quarter from the Empire State Building and find that its flight time is 19.1 seconds. Use this measurement to estimate the terminal velocity.
You can get the relevant dimensions of a quarter from https://en.wikipedia.org/wiki/Quarter_(United_States_coin).
Create a Params object with the system parameters. We don't know v_term, so we'll start with the inital guess v_term = 18 * m / s.
Use make_system to create a System object.
Call run_ode_solver to simulate the system. How does the flight time of the simulation compare to the measurement?
Try a few different values of t_term and see if you can get the simulated flight time close to 19.1 seconds.
Optionally, write an error function and use root_scalar to improve your estimate.
Use your best estimate of v_term to compute C_d.
Note: I fabricated the observed flight time, so don't take the results of this exercise too seriously.
End of explanation
"""
|
robertoalotufo/ia898
|
2S2018/Ex09 Tecnicas de segmentacao.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
"""
Explanation: Ex09 - Técnicas de segmentação
End of explanation
"""
im2 = mpimg.imread('../data/astablet.tif')
plt.imshow(im2, cmap='gray')
"""
Explanation: Parte 1 - Segmentando múltiplos objetos por limiarização
A limiarização também pode ser feita em imagens com mais de um objeto e usando limiares múltiplos. Tente segmentar separadamente a cartela de comprimidos em uma imagem e os comprimidos em outra. Utilize o hstograma da imagem para escolher os limiares.
End of explanation
"""
def otsu(f):
n = np.product(np.shape(f))
h = 1.*np.bincount(f.ravel()) / n
if len(h) == 1: return 1,1
x = np.arange(np.product(np.shape(h)))
w0 = np.cumsum(h)
w1 = 1 - w0
eps = 1e-10
m0 = np.cumsum(x * h) / (w0 + eps)
mt = m0[-1]
m1 = (mt - m0[0:-1]*w0[0:-1]) / w1[0:-1]
sB2 = w0[0:-1] * w1[0:-1] * ((m0[0:-1] - m1)**2)
t = np.argmax(sB2)
v = sB2[t]
st2 = sum((x-mt)**2 * h)
eta = v / st2
return t, eta
"""
Explanation: Parte 2 - Demonstração da limiarização por Otsu
Tente melhorar a demonstração da limiarização por Otsu (Demo).
End of explanation
"""
|
aje/POT
|
notebooks/plot_optim_OTreg.ipynb
|
mit
|
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
"""
Explanation: Regularized OT with generic solver
Illustrates the use of the generic solver for regularized OT with
user-designed regularization term. It uses Conditional gradient as in [6] and
generalized Conditional Gradient as proposed in [5][7].
[5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for
Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine
Intelligence , vol.PP, no.99, pp.1-1.
[6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport. SIAM Journal on Imaging Sciences,
7(3), 1853-1882.
[7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized
conditional gradient: analysis of convergence and applications.
arXiv preprint arXiv:1510.06567.
End of explanation
"""
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
"""
Explanation: Generate data
End of explanation
"""
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
"""
Explanation: Solve EMD
End of explanation
"""
#%% Example with Frobenius norm regularization
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(3)
ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg')
"""
Explanation: Solve EMD with Frobenius norm regularization
End of explanation
"""
#%% Example with entropic regularization
def f(G):
return np.sum(G * np.log(G))
def df(G):
return np.log(G) + 1.
reg = 1e-3
Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg')
"""
Explanation: Solve EMD with entropic regularization
End of explanation
"""
#%% Example with Frobenius norm + entropic regularization with gcg
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True)
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg')
pl.show()
"""
Explanation: Solve EMD with Frobenius norm + entropic regularization
End of explanation
"""
|
MPBA/pyHRV
|
tutorials/4-misc.ipynb
|
gpl-3.0
|
# import libraries
from __future__ import division
import numpy as np
import os
import matplotlib.pyplot as plt
from pyphysio.tests import TestData
%matplotlib inline
# import all pyphysio classes and methods
import pyphysio as ph
# import data and creating a signal
ecg_data = TestData.ecg()
fsamp = 2048
ecg = ph.EvenlySignal(values = ecg_data, sampling_freq = fsamp, signal_type = 'ecg')
# Step 1: Filtering and preprocessing
# (optional) IIR filtering : remove high frequency noise
ecg = ph.IIRFilter(fp=45, fs = 50, ftype='ellip')(ecg)
# normalization : normalize data
ecg = ph.Normalize(norm_method='standard')(ecg)
# resampling : increase the sampling frequency by cubic interpolation
ecg = ecg.resample(fout=4096, kind='cubic')
fsamp = 4096
# Step 2: Information Extraction
ibi = ph.BeatFromECG()(ecg)
# (optional) edit IBI
# ibi_ok = ph.Annotate(ecg, ibi)()
# Step 3: Computation of physiological indicators
# create fake label
label = np.zeros(1200)
label[300:600] = 1
label[900:1200] = 2
label = ph.EvenlySignal(label, sampling_freq = 10, signal_type = 'label')
# define a list of indicators we want to compute
hrv_indicators = [ph.Mean(name='RRmean'), ph.StDev(name='RRstd'), ph.RMSSD(name='rmsSD')]
"""
Explanation: This is a tutorial for Python 2.7
pyphysio library
4. Miscellaneous
In this tutorial we consider other functions and techniques for the advanced usage of pyphysio.
In particular we present the following topics
1. More about segmentation: custom segmentation and label management
4.1 More about segmentation
We reproduce here the main steps for the processing of an ECG signal
End of explanation
"""
t_start = [0.5, 15, 98.7]
t_stop = [5, 21, 110.4]
"""
Explanation: 4.1.1 Creation of custom segments
To create custom segments we need to define the start and stop instants of each segment:
End of explanation
"""
#custom windows
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop)
"""
Explanation: And then use the function CustomSegments to use the defined instants for the segmentation:
End of explanation
"""
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators)
"""
Explanation: Then the processing can be continued as usual:
End of explanation
"""
#custom windows
label_segments = ph.LabelSegments(labels=label)
"""
Explanation: Note that we obtained three rows, corresponding to the three custom segments we defined above.
4.1.2 Creation of segments using the information about the experiment sessions
We can use the information about the experiment sessions to automatically segment the signal in order to have a unique segment for each session:
End of explanation
"""
indicators, col_names = ph.fmap(label_segments, hrv_indicators, ibi)
print(indicators[:, :4])
"""
Explanation: Then the processing can be continued as usual:
End of explanation
"""
t_start = [0.5, 15, 78.7]
t_stop = [5, 21, 110.4]
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label)
"""
Explanation: Note that we obtained four rows, corresponding to the four different sessions of the experiment.
4.1.3 Management of the labels
In case we want to keep track of the portion of the experiment each segment belongs to, we should add the information associated to the experiment sessions to the segmentation function (as saw in 2.1 and 2.2, Step 3).
For instance, in case of a custom segmentation:
End of explanation
"""
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
"""
Explanation: Therefore, the third column of the matrix obtained from the computation of the indicators will contain the information about the session the segment was taken from.
End of explanation
"""
plt.figure()
label.plot() #plot the label signal
plt.vlines(t_start, 0, 2, 'g') #plot the start of the segments, green vertical lines
plt.vlines(t_stop, 0, 2, 'r') #plot the end of the segments, red vertical lines
"""
Explanation: Note that we obtain only two segments and we miss the last one.
This is because, according to the information about the experiment sessions, the third segment belongs to two different sessions:
End of explanation
"""
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 110.4]
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 130.4] # endo of the last segments has been changed: 110.4 --> 130.40
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
# drop_cut = True (default) --> drop the segments in which the signal ends before the end of the segment
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
t_start = [0.5, 15, 78.7]
t_stop = [10, 21, 130.4] # endo of the last segments has been changed: 110.4 --> 130.40
# drop_mixed = False --> keep also the segments belonging to different experiment sessions
# drop_cut = False --> keep also the segments in which the signal ends before the end of the segment
custom_segments = ph.CustomSegments(begins = t_start, ends = t_stop, labels = label, drop_mixed=False, drop_cut=False)
indicators, col_names = ph.fmap(custom_segments, hrv_indicators, ibi)
print(indicators[:, :3])
"""
Explanation: In this special case the function cannot assign the window to a specific experimental session.
In addition, a signal might end before the end of a segment (e.g. due to disconnection).
There are two parameters that can be used ina segmentation function to handle these special cases:
* drop_mixed: whether to drop the segments that present mixed values of the labels. It is True by default;
* drop_cut: whether to drop the segments in which the segmented signal ends before the end of the segment. It is True by default.
Some examples below:
End of explanation
"""
from os.path import expanduser
home = expanduser("~") # data will be saved in the user's home directory
print(home)
"""
Explanation: 4.2 Use pickle to save and load signals
You may find useful to store the signals resulting from the intermediate results of the signal processing procedure.
pyphysio provides the to_csv(FILENAME) method to save a signal as a .csv file:
End of explanation
"""
#ibi.to_csv(home+'/IBI.csv')
"""
Explanation: The following line saves the IBI signal into a csv file.
To avoid the risk of overwriting existing data the line has been commented.
Uncomment to execute
End of explanation
"""
#ibi.to_pickle(home+'/IBI.pkl')
#ibi2 = ph.from_pickle('IBI.pkl')
#ax1 = plt.subplot(211)
#ibi.plot()
#plt.ylabel('ibi')
#plt.subplot(212, sharex=ax1)
#ibi2.plot()
#plt.ylabel('ibi2')
"""
Explanation: However the process of load back the csv file into a Signal to continue the processing is not straightforward.
It is therefore suggested, especially when the signals will not be analysed with other software (e.g. R, spreadsheets etc), to use:
* to_pickle(FILENAME) function to save the signal in a binary-compressed file;
* from_pickle(FILENAME) to load the saved file back into another script.
The following lines save the IBI signal into a pickleable file and load it into a new signal ("ibi2").
To avoid the risk of overwriting existing data the lines have been commented.
Uncomment to execute
End of explanation
"""
|
endangeredoxen/pywebify
|
pywebify/tests/webpages.ipynb
|
gpl-2.0
|
%load_ext autoreload
%autoreload 2
import os, sys
path = os.path.abspath('../..'); sys.path.insert(0, path) if path not in sys.path else None
from IPython.display import HTML
from pywebify import webpage
Page = webpage.Webpage
"""
Explanation: webpages module
author: kevin.tetz
description: webpages module tests
End of explanation
"""
Page(blah=True)
"""
Explanation: field setting
invalid attribute
raises Attribute error
End of explanation
"""
page = Page(relpaths=['spam', 'eggs'])
page.pagefilename = 'newwebpage'
print(page.pagename)
"""
Explanation: set pagefilename
the filename concatenates the basepath, relpaths, pagefilename, and pagefileext into the pagename when set
End of explanation
"""
wp = Page(relpaths=['foo', 'bar', 'baz'])
page.figfilename = 'newfigure'
print(page.figname)
"""
Explanation: set figfilename
similarly the figfilename concatenates the basepath, relpaths, figfilename, and figfileext into the figname when set
End of explanation
"""
HTML(Page().content('<hr><h1>Hello World!</h1><hr>').render().pagename)
os.listdir(r'..\img')
HTML(Page().content('<hr><h1>Hello Image!</h1><hr>').imglink(r'..\img\favicon.png').render().pagename)
"""
Explanation: render
basic template
End of explanation
"""
page = Page()
page.pagefilename = 'tabs1'
page.pagelink().tabsinit('tab0').content('tab0 content!')
page.tabsnext('tab1').content('tab1 content?').tabsnext('tab2').content('hello tab 2!@?!')
HTML(page.tabsend().render().pagename)
"""
Explanation: tabs
End of explanation
"""
|
tpin3694/tpin3694.github.io
|
machine-learning/find_maximum_and_minimum.ipynb
|
mit
|
# Load library
import numpy as np
"""
Explanation: Title: Find The Maximum And Minimum
Slug: find_maximum_and_minimum
Summary: How to find the maximum, minimum, and average of the elements in an array.
Date: 2017-09-03 12:00
Category: Machine Learning
Tags: Vectors Matrices Arrays
Authors: Chris Albon
Preliminaries
End of explanation
"""
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
"""
Explanation: Create Matrix
End of explanation
"""
# Return maximum element
np.max(matrix)
"""
Explanation: Find Maximum Element
End of explanation
"""
# Return minimum element
np.min(matrix)
"""
Explanation: Find Minimum Element
End of explanation
"""
# Find the maximum element in each column
np.max(matrix, axis=0)
"""
Explanation: Find Maximum Element By Column
End of explanation
"""
# Find the maximum element in each row
np.max(matrix, axis=1)
"""
Explanation: Find Maximum Element By Row
End of explanation
"""
|
dbouquin/IS_608
|
608_HW4/IS608_HW4.ipynb
|
mit
|
# Import modules for analysis and visualization
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats.mstats import gmean
import numpy as np
from numpy import genfromtxt
import os.path
from datetime import datetime
# Clean up and import (pandas)
data_url = "https://raw.githubusercontent.com/jlaurito/CUNY_IS608/master/lecture4/data/riverkeeper_data_2013.csv"
raw_data = pd.read_csv(data_url)
# info like what you get with str() in R
raw_data.dtypes
# look at the first 25 rows
# remember the first row is the colnames
raw_data.head(25)
# Adjust the data types to make the data easier to work with
raw_data["Site"] = raw_data["Site"].astype('category') # like facets
raw_data["Date"] = pd.to_datetime(raw_data["Date"]) # dates as dates
# We need to remove the greater than and less than symbols and treat "EnteroCount" as an integer -- 'int64'
raw_data["EnteroCount"] = raw_data["EnteroCount"].str.lstrip('><')
raw_data["EnteroCount"] = raw_data["EnteroCount"].astype('int64')
# Check to make sure the changes are correct
raw_data.dtypes
raw_data.head(20) # there are no more < or > symbols
"""
Explanation: <h3>IS 608 HW 4</h3>
Using the dataset available here complete the following:
1. Create lists & graphs of the best and worst places to swim in the dataset.
2. The testing of water quality can be sporadic. Which sites have been tested most regularly? Which ones have long gaps between tests? Pick out 5-10 sites and visually compare how regularly their water quality is tested.
3. Is there a relationship between the amount of rain and water quality? Show this relationship graphically. If you can, estimate the effect of rain on quality at different sites and create a visualization to compare them.
Background: The United States Environmental Protection Agency (EPA) reports Entero counts as colonies (or cells) per 100 ml of water. The federal standard for unacceptable water quality is a single sample value of greater than 110 Enterococcus/100mL, or five or more samples with a geometric mean (a weighted average) greater than 30 Enterococcus/100mL.
End of explanation
"""
# Create a column showing whether or not the water quality is acceptable in a given place
# unnaceptable:
# 110 Enterococcus/100mL OR
# five or more samples with a geometric mean (a weighted average) > 30 Enterococcus/100mL.
# The prompt is really vague in defining the geometric mean condition - I used gmean()
# When calculated, geometric mean by site of highest 5 samples and single sample >= 110
# shows nearly all sites would have water with unacceptable water quality at some point
raw_data['swim'] = np.where(
(raw_data.groupby('Site').EnteroCount.transform(max) > 110)
| (raw_data.groupby('Site').EnteroCount.transform(lambda group: gmean(group.nlargest(5))) > 30),
'unacceptable', 'acceptable')
raw_data.head(10)
# As a result of above criteria not being very useful, I instead grouped the data by site and
# calculated the mean entero_count for each site
mean_entero = raw_data.groupby(['Site'])['EnteroCount'].mean().sort_values()
# Top ten best places to swim
print "Ten best places to swim"
print "+++++++++++++++++++++++"
mean_entero.head(10)
# Ten worst places to swim
print "Ten worst places to swim"
print "++++++++++++++++++++++++"
mean_entero.tail(10).sort_values(ascending=False)
# write the results to a csv and read in for more flexible use (now and later)
mean_entero.to_csv( 'mean_entero.csv' )
mean_entero1 = pd.read_csv('mean_entero.csv')
mean_entero1.columns = ['Site','Average_Entero_Count']
mean_entero1.head() #make sure it looks right
# Plot the results
%matplotlib inline
sns.set_style("whitegrid")
sns.set_style("ticks")
plt.figure(figsize=(10, 20))
ax = sns.barplot(x ="Average_Entero_Count", y="Site", data=mean_entero1, palette="Blues_d")
ax.set(xlabel='Average Entero Count', ylabel='Site')
ax.set_title('Waterways ranked by Average Entero Count')
sns.despine()
# Don't go swimming in the Gowanus Canal
"""
Explanation: Create lists & graphs of the best and worst places to swim in the dataset.
End of explanation
"""
# Are we looking for consistancy or frequency in testing??
# Note to self - learn more about plotting time series data
# Determine the most recent reading for each site (might be useful?)
maxDate = raw_data.groupby(by=["Site"])["Date"].max()
maxDate.sort_values(ascending=False, inplace=True)
# what are our most FREQUENTLY tested Sites?
test_Site_counts = raw_data.groupby(by=["Site"])["Date"].count()
test_Site_counts.sort_values(ascending=False, inplace=True)
# Print out results
print "Most recently tested site dates"
print "++++++++++++++++++++++++"
print maxDate.head(15)
print "\n"
print "Most frequently tested sites"
print "++++++++++++++++++++++++"
print test_Site_counts.head(15)
# Consistancy is more important
# Figure out how many days elapsed between readings and calculate average by group.
# This gives us a general sense of the regularity of testing
# After lots of cursing and fiddling!
lag_date_data = raw_data.groupby('Site')['Date'].apply(lambda x: x.diff().mean()).reset_index().rename(columns={'Date':'Mean_lag_days'})
lag_date_data.head()
# write it to a csv to make it more flexible
lag_date_data.to_csv( 'lag_data.csv' )
# Clean up the csv for plotting
lag_data = pd.read_csv('lag_data.csv')
lag_data.columns = ['index','Site','mean_test_lag']
lag_data_num_col = pd.DataFrame(lag_data['mean_test_lag'].str.split().tolist(), columns=['mean_lag_days','junk','more_junk'])
mean_lag_data = pd.concat([lag_data, lag_data_num_col], axis=1)
del mean_lag_data['index']
del mean_lag_data['mean_test_lag']
del mean_lag_data['junk']
del mean_lag_data['more_junk']
# convert mean column to numeric type
mean_lag_data["mean_lag_days"] = pd.to_numeric(mean_lag_data["mean_lag_days"])
# apply absolute value to mean column
mean_lag_data['mean_lag_days'] = mean_lag_data['mean_lag_days'].abs()
mean_lag_data.head(10)
print mean_lag_data.dtypes
# Get a general sense of the spread - on average how often are the sites tested
%matplotlib inline
plt.figure(figsize=(4.5, 10))
sns.violinplot(mean_lag_data, palette="colorblind")
sns.despine()
# Subset random sample of sites to compare and plot
# Random subset of 10% of the data
sub_lag_data = mean_lag_data.sample(frac=0.1, replace=True)
%matplotlib inline
plt.figure(figsize=(10, 8))
ax = sns.barplot(x ="mean_lag_days", y="Site", data=sub_lag_data, palette="Blues_d")
ax.set(xlabel='Mean Lag Time', ylabel='Site')
ax.set_title('Mean Lag Time Between Tests in Days')
sns.despine()
# From the plot we can see there is a huge range in average lag between tests!
"""
Explanation: The testing of water quality can be sporadic. Which sites have been tested most regularly? Which ones have long gaps between tests? Pick out 5-10 sites and visually compare how regularly their water quality is tested.
End of explanation
"""
# Cleanup so we're using just the entero count and rain totals
del raw_data['Site']
del raw_data['Date']
del raw_data['SampleCount']
raw_data.head()
%matplotlib inline
# Scatterplot to show relationship between rainfall and Entero Count
plt.figure(figsize=(7.5, 6.5))
sns.regplot('FourDayRainTotal', 'EnteroCount', data=raw_data, fit_reg=False, x_jitter=1)
sns.despine()
sns.plt.ylim(0)
sns.plt.xlim(0)
# There seems to be a relatively strong relationship between rainfall and water quality once
# you go beyond ~3 inches of rain. At that point there are much fewer high Entero Count readings
# Compare any two locations with the following
# re-read in/clean up
data_url = "https://raw.githubusercontent.com/jlaurito/CUNY_IS608/master/lecture4/data/riverkeeper_data_2013.csv"
site_rain_data = pd.read_csv(data_url)
site_rain_data["Site"] = site_rain_data["Site"].astype('category') # like facets
site_rain_data["Date"] = pd.to_datetime(site_rain_data["Date"]) # dates as dates
site_rain_data["EnteroCount"] = site_rain_data["EnteroCount"].str.lstrip('><')
site_rain_data["EnteroCount"] = site_rain_data["EnteroCount"].astype('int64')
site_rain_data.head()
"""
Explanation: Is there a relationship between the amount of rain and water quality? Show this relationship graphically. If you can, estimate the effect of rain on quality at different sites and create a visualization to compare them.
End of explanation
"""
site1 = raw_input('enter your first site --> ') # Gowanus Canal <- worst
site2 = raw_input('enter your second site --> ') # Croton Point Beach <- best water
# Gowanus is dirtiest, Croton Point Beach is cleanest
x = site_rain_data.loc[site_rain_data['Site'] == site1]
y = site_rain_data.loc[site_rain_data['Site'] == site2]
frames = [x, y]
result = pd.concat(frames)
del result['Date']
del result['SampleCount']
result.head()
plt.figure(figsize=(10, 10))
sns.lmplot('FourDayRainTotal', 'EnteroCount', data=result, hue='Site', palette="Set2", fit_reg=False, scatter_kws={"s": 50}, legend=False)
plt.legend(loc='upper right')
sns.despine()
sns.plt.ylim(0)
sns.plt.xlim(0)
"""
Explanation: Enter some sites into the search boxes
End of explanation
"""
|
alsam/Claw.jl
|
src/euler/Euler_approximate.ipynb
|
mit
|
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import numpy as np
from exact_solvers import euler
from utils import riemann_tools as rt
from ipywidgets import interact
from ipywidgets import widgets
State = euler.Primitive_State
def roe_averages(q_l, q_r, gamma=1.4):
rho_sqrt_l = np.sqrt(q_l[0])
rho_sqrt_r = np.sqrt(q_r[0])
p_l = (gamma-1.)*(q_l[2]-0.5*(q_l[1]**2)/q_l[0])
p_r = (gamma-1.)*(q_r[2]-0.5*(q_r[1]**2)/q_r[0])
denom = rho_sqrt_l + rho_sqrt_r
u_hat = (q_l[1]/rho_sqrt_l + q_r[1]/rho_sqrt_r)/denom
H_hat = ((q_l[2]+p_l)/rho_sqrt_l + (q_r[2]+p_r)/rho_sqrt_r)/denom
c_hat = np.sqrt((gamma-1)*(H_hat-0.5*u_hat**2))
return u_hat, c_hat, H_hat
def Euler_roe(q_l, q_r, gamma=1.4):
"""
Approximate Roe solver for the Euler equations.
"""
rho_l = q_l[0]
rhou_l = q_l[1]
u_l = rhou_l/rho_l
rho_r = q_r[0]
rhou_r = q_r[1]
u_r = rhou_r/rho_r
u_hat, c_hat, H_hat = roe_averages(q_l, q_r, gamma)
dq = q_r - q_l
s1 = u_hat - c_hat
s2 = u_hat
s3 = u_hat + c_hat
alpha2 = (gamma-1.)/c_hat**2 *((H_hat-u_hat**2)*dq[0]+u_hat*dq[1]-dq[2])
alpha3 = (dq[1] + (c_hat - u_hat)*dq[0] - c_hat*alpha2) / (2.*c_hat)
alpha1 = dq[0] - alpha2 - alpha3
r1 = np.array([1., u_hat-c_hat, H_hat - u_hat*c_hat])
r2 = np.array([1., u_hat, 0.5*u_hat**2])
q_l_star = q_l + alpha1*r1
q_r_star = q_l_star + alpha2*r2
states = np.column_stack([q_l,q_l_star,q_r_star,q_r])
speeds = [s1, s2, s3]
wave_types = ['contact','contact', 'contact']
def reval(xi):
rho = (xi<s1)*states[0,0] + (s1<=xi)*(xi<s2)*states[0,1] + \
(s2<=xi)*(xi<s3)*states[0,2] + (s3<=xi)*states[0,3]
mom = (xi<s1)*states[1,0] + (s1<=xi)*(xi<s2)*states[1,1] + \
(s2<=xi)*(xi<s3)*states[1,2] + (s3<=xi)*states[1,3]
E = (xi<s1)*states[2,0] + (s1<=xi)*(xi<s2)*states[2,1] + \
(s2<=xi)*(xi<s3)*states[2,2] + (s3<=xi)*states[2,3]
return rho, mom, E
return states, speeds, reval, wave_types
"""
Explanation: Approximate solvers for the Euler equations of gas dynamics
In this chapter we discuss approximate solvers for the one-dimensional Euler equations:
\begin{align}
\rho_t + (\rho u)_x & = 0 \
(\rho u)_t + (\rho u^2 + p)_x & = 0 \
E_t + ((E+p)u)_x & = 0.
\end{align}
As in Euler, we focus on the case of an ideal gas, for which the total energy is given by
\begin{align} \label{EA:EOS}
E = \frac{p}{\gamma-1} + \frac{1}{2}\rho u^2.
\end{align}
To examine the Python code for this chapter, and for the exact Riemann solution, see:
exact_solvers/euler.py ...
on github.
Roe solver
We first derive a Roe solver for the Euler equations, following the same approach as in Shallow_water_approximate. Namely, we assume that $\hat{A} = f'(\hat{q})$ for some average state $\hat{q}$, and impose the condition of conservation:
\begin{align} \label{EA:cons}
f'(\hat{q}) (q_r - q_\ell) & = f(q_r) - f(q_\ell).
\end{align}
We will need the following quantities:
\begin{align}
q & = \begin{pmatrix} \rho \ \rho u \ E \end{pmatrix}, \ \ \ \ \ \ f(q) = \begin{pmatrix} \rho u \ \rho u^2 + p \ H u \rho \end{pmatrix}, \
f'(\hat{q}) & = \begin{pmatrix}
0 & 1 & 0 \
\frac{\gamma-3}{2}\hat{u}^2 & (3-\gamma)\hat{u} & \gamma-1 \
\frac{\gamma-1}{2}\hat{u}^3 - \hat{u}\hat{H} & \hat{H} - (\gamma-1)\hat{u}^2 & \gamma \hat{u} \end{pmatrix}.
\end{align}
Here $H = \frac{E+p}{\rho}$ is the enthalpy. We have rewritten most expressions involving $E$ in terms of $H$ because it simplifies the derivation that follows. We now solve (\ref{EA:cons}) to find $\hat{u}$ and $\hat{H}$. It turns out that, for the case of a polytropic ideal gas, the average density $\hat{\rho}$ plays no role in the Roe solver.
The first equation of (\ref{EA:cons}) is an identity, satisfied independently of our choice of $\hat{q}$. The second equation is (using (\ref{EA:EOS}))
\begin{align}
\frac{\gamma-3}{2}\hat{u}^2 (\rho_r - \rho_\ell) + (3-\gamma)\hat{u}(\rho_r u_r - \rho_\ell u_\ell) \ + (\gamma-1)\left( \frac{p_r-p_\ell}{\gamma-1} + \frac{1}{2}(\rho_r u_r^2 - \rho_\ell u_\ell^2) \right) & = \rho_r u_r^2 - \rho_\ell u_\ell^2 + p_r - p_\ell,
\end{align}
which simplifies to a quadratic equation for $\hat{u}$:
\begin{align} \label{EA:u_quadratic}
(\rho_r - \rho_\ell)\hat{u}^2 - 2(\rho_r u_r - \rho_\ell u_\ell) \hat{u} + (\rho_r u_r^2 - \rho_\ell u_\ell^2) & = 0,
\end{align}
with roots
\begin{align}
\hat{u}\pm & = \frac{\rho_r u_r - \rho\ell u_\ell \mp \sqrt{\rho_r \rho_\ell} (u_\ell - u_r)}{\rho_r - \rho_\ell} = \frac{\sqrt{\rho_r} u_r \pm \sqrt{\rho_\ell} u_\ell}{\sqrt{\rho_r}\pm\sqrt{\rho_\ell}}
\end{align}
Notice that this is identical to the Roe average of the velocity for the shallow water equations, if we replace the density $\rho$ with depth $h$. As before, we choose the root $u_+$ since it is well defined for all values of $\rho_r, \rho_\ell$.
Next we find $\hat{H}$ by solving the last equation of (\ref{EA:cons}), which reads
\begin{align}
\left( \frac{\gamma-1}{2}\hat{u}^3 - \hat{u}\hat{H} \right)(\rho_r - \rho_\ell) \ + \left( \hat{H} - (\gamma-1)\hat{u}^2 \right)(\rho_r u_r - \rho_\ell u_\ell) + \gamma \hat{u}(E_r - E_\ell) & = H_r u_r \rho_r - H_\ell u_\ell \rho_\ell.
\end{align}
We can simplify this using the equality $\gamma E = \rho H + \frac{\gamma-1}{2}\rho u^2$ and solve for $\hat{H}$ to find
\begin{align}
\hat{H}{\pm} & = \frac{\rho_r H_r (u_r - \hat{u}+) - \rho_\ell H_\ell (u_\ell - \hat{u}+)}{\rho_r u_r - \rho\ell u_\ell - \hat{u}\pm(\rho_r -\rho\ell)} \
& = \frac{\rho_r H_r (u_r - \hat{u}+) - \rho\ell H_\ell (u_\ell - \hat{u}+)}{\pm\sqrt{\rho_r \rho\ell}(u_r-u_\ell)} \
& = \frac{\rho_r H_r - \rho_\ell H_\ell \mp\sqrt{\rho_r \rho_\ell}(H_r - H_\ell)}{\rho_r - \rho_\ell} \
& = \frac{\sqrt{\rho_r}H_r \pm \sqrt{\rho_\ell} H_\ell}{\sqrt{\rho_r}\pm\sqrt{\rho_\ell}}.
\end{align}
Once more, we take the plus sign in the final expression for $\hat{H}$, giving the Roe averages
$$
\hat{u} = \frac{\sqrt{\rho_r} u_r + \sqrt{\rho_\ell} u_\ell}{\sqrt{\rho_r} + \sqrt{\rho_\ell}},
\qquad \hat{H} = \frac{\sqrt{\rho_r}H_r + \sqrt{\rho_\ell} H_\ell}{\sqrt{\rho_r} + \sqrt{\rho_\ell}}.
$$
To implement the Roe solver, we also need the eigenvalues and eigenvectors of the averaged flux Jacobian $f'(\hat{q})$. These are just the eigenvalues of the true Jacobian, evaluated at the averaged state:
\begin{align}
\lambda_1 & = \hat{u} - \hat{c}, & \lambda_2 & = \hat{u} & \lambda_3 & = \hat{u} + \hat{c},
\end{align}
\begin{align}
r_1 & = \begin{bmatrix} 1 \ \hat{u}-\hat{c} \ \hat{H}-\hat{u}\hat{c}\end{bmatrix} &
r_2 & = \begin{bmatrix} 1 \ \hat{u} \ \frac{1}{2}\hat{u}^2 \end{bmatrix} &
r_3 & = \begin{bmatrix} 1 \ \hat{u}+\hat{c} \ \hat{H}+\hat{u}\hat{c}\end{bmatrix}.
\end{align}
Here $\hat{c} = \sqrt{(\gamma-1)(\hat{H}-\hat{u}^2/2)}$.
Solving the system of equations
\begin{align}
q_r - q_\ell & = \sum_{p=1}^3 {\mathcal W}p = \sum{p=1}^3 \alpha_p r_p
\end{align}
for the wave strengths gives
\begin{align}
\alpha_2 & = \delta_1 + (\gamma-1)\frac{\hat{u}\delta_2 - \delta_3}{\hat{c}^2} \
\alpha_3 & = \frac{\delta_2 + (\hat{c}-\hat{u})\delta_1 - \hat{c}\alpha_2}{2\hat{c}} \
\alpha_1 & = \delta_1 - \alpha_2 - \alpha_3,
\end{align}
where $\delta = q_r - q_\ell$. We now have everything we need to implement the Roe solver.
End of explanation
"""
def compare_solutions(left_state, right_state, solvers=['Exact','HLLE']):
q_l = np.array(euler.primitive_to_conservative(*left_state))
q_r = np.array(euler.primitive_to_conservative(*right))
outputs = []
states = {}
for solver in solvers:
if solver.lower() == 'exact':
outputs.append(euler.exact_riemann_solution(q_l,q_r))
if solver.lower() == 'hlle':
outputs.append(Euler_hlle(q_l, q_r))
states['hlle'] = outputs[-1][0]
if solver.lower() == 'roe':
outputs.append(Euler_roe(q_l, q_r))
states['roe'] = outputs[-1][0]
plot_function = \
rt.make_plot_function([val[0] for val in outputs],
[val[1] for val in outputs],
[val[2] for val in outputs],
[val[3] for val in outputs],
solvers, layout='vertical',
variable_names=euler.primitive_variables,
derived_variables=euler.cons_to_prim,
vertical_spacing=0.15,
show_time_legend=True)
interact(plot_function,
t=widgets.FloatSlider(min=0,max=0.9,step=0.1,value=0.4));
return states
left = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
"""
Explanation: An implementation of this solver for use in Clawpack can be found here. Recall that an exact Riemann solver for the Euler equations appears in exact_solvers/euler.py.
Examples
Let's compare the Roe approximation to the exact solution. As a first example, we use the Sod shock tube.
End of explanation
"""
left = State(Density = 0.1,
Velocity = 0.,
Pressure = 0.1)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
"""
Explanation: Recall that in the true solution the middle wave is a contact discontinuity and carries only a jump in the density. For that reason the three-dimensional phase space plot is generally shown projected onto the pressure-velocity plane as shown above: The two intermediate states in the true solution have the same pressure and velocity, and so are denoted by a single Middle state in the phase plane plot.
The Roe solver, on the other hand, generates a middle wave that carries a jump in all 3 variables and there are two green dots appearing in the plot above for the two middle states (though the pressure jump is quite small in this example). For a Riemann problem like this one with zero initial velocity on both sides, the Roe average velocity must also be zero, so the middle wave is stationary; this is of course not typically true in the exact solution, even when $u_\ell=u_r=0$.
Here is a second example. Experiment with the initial states to explore how the Roe solution compares to the exact solution.
End of explanation
"""
M = 2. # Mach number of the shock wave
gamma = 1.4
mu = 2*(M**2-1)/(M*(gamma+1.))
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
c_r = np.sqrt(gamma*right.Pressure/right.Density)
rho_l = right.Density * M/(M-mu)
p_l = right.Pressure * ((2*M**2-1)*gamma+1)/(gamma+1)
u_l = mu*c_r
left = State(Density = rho_l,
Velocity = u_l,
Pressure = p_l)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
euler.phase_plane_plot(left, right, approx_states=states['roe'])
"""
Explanation: Single-shock solution
Next we demonstrate the exactness property of the Roe solver by applying it to a case where the left and right states are connected by a single shock wave.
End of explanation
"""
left = State(Density = 0.1,
Velocity = -2.,
Pressure = 0.1)
right = State(Density = 1.,
Velocity = -1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','Roe'])
"""
Explanation: It is evident that the solution consists of a single right-going shock. The exact solution cannot be seen because it coincides exactly with the Roe solution. The path of the shock in the first plot also cannot be seen since it is plotted under the path of the rightmost Roe solution wave. The two solutions differ only in the wave speeds predicted for the other two waves, but since these waves have zero strength this makes no difference.
Transonic rarefactions and an entropy fix
Here is an example of a Riemann problem whose solution includes a transonic 2-rarefaction:
End of explanation
"""
def Euler_hlle(q_l, q_r, gamma=1.4):
"""HLLE approximate solver for the Euler equations."""
rho_l = q_l[0]
rhou_l = q_l[1]
u_l = rhou_l/rho_l
rho_r = q_r[0]
rhou_r = q_r[1]
u_r = rhou_r/rho_r
E_r = q_r[2]
E_l = q_l[2]
u_hat, c_hat, H_hat = roe_averages(q_l, q_r, gamma)
p_r = (gamma-1.) * (E_r - rho_r*u_r**2/2.)
p_l = (gamma-1.) * (E_l - rho_l*u_l**2/2.)
H_r = (E_r+p_r) / rho_r
H_l = (E_l+p_l) / rho_l
c_r = np.sqrt((gamma-1.)*(H_r-u_r**2/2.))
c_l = np.sqrt((gamma-1.)*(H_l-u_l**2/2.))
s1 = min(u_l-c_l,u_hat-c_hat)
s2 = max(u_r+c_r,u_hat+c_hat)
rho_m = (rhou_r - rhou_l - s2*rho_r + s1*rho_l)/(s1-s2)
rhou_m = (rho_r*u_r**2 - rho_l*u_l**2 \
+ p_r - p_l - s2*rhou_r + s1*rhou_l)/(s1-s2)
E_m = ( u_r*(E_r+p_r) - u_l*(E_l+p_l) - s2*E_r + s1*E_l)/(s1-s2)
q_m = np.array([rho_m, rhou_m, E_m])
states = np.column_stack([q_l,q_m,q_r])
speeds = [s1, s2]
wave_types = ['contact','contact']
def reval(xi):
rho = (xi<s1)*rho_l + (s1<=xi)*(xi<=s2)*rho_m + (s2<xi)*rho_r
mom = (xi<s1)*rhou_l + (s1<=xi)*(xi<=s2)*rhou_m + (s2<xi)*rhou_r
E = (xi<s1)*E_l + (s1<=xi)*(xi<=s2)*E_m + (s2<xi)*E_r
return rho, mom, E
return states, speeds, reval, wave_types
"""
Explanation: Notice that in the exact solution, the right edge of the rarefaction travels to the right. In the Roe solution, all waves travel to the left. As in the case of the shallow water equations, here too this behavior can lead to unphysical solutions when this approximate solver is used in a numerical discretization. In order to correct this, we can split the single wave into two when a transonic rarefaction is present, in a way similar to what is done in the shallow water equations. We do not go into details here.
HLLE Solver
Recall that an HLL solver uses only two waves with a constant state between them. The Euler equations are our first example for which the number of waves in the true solution is larger than the number of waves in the approximate solution. As one might expect, this leads to noticeable inaccuracy in solutions produced by the solver.
Again following Einfeldt, the left-going wave speed is chosen to be the minimum of the Roe speed for the 1-wave and the characterstic speed $\lambda^1$ in the left state $q_\ell$. The right-going wave speed is chosen to be the maximum of the Roe speed for the 3-wave and the characteristic speed $\lambda^3$ in the right state $q_r$. Effectively, this means that
\begin{align}
s_1 & = \min(u_\ell - c_\ell, \hat{u}-\hat{c}) \
s_2 & = \max(u_r + c_r, \hat{u}+\hat{c})
\end{align}
Recall that once we have chosen these two wave speeds, conservation dictates the value of the intermediate state:
\begin{align} \label{SWA:hll_middle_state}
q_m = \frac{f(q_r) - f(q_\ell) - s_2 q_r + s_1 q_\ell}{s_1 - s_2}.
\end{align}
End of explanation
"""
left = State(Density = 3.,
Velocity = 0.,
Pressure = 3.)
right = State(Density = 1.,
Velocity = 0.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact','HLLE'])
euler.phase_plane_plot(left, right, approx_states=states['hlle'])
"""
Explanation: Examples
End of explanation
"""
left = State(Density = 1.,
Velocity = -5.,
Pressure = 1.)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact', 'Roe'])
"""
Explanation: Preservation of positivity
Just as we saw in the case of the shallow water equations, the Roe solver (or any linearized solver) for the Euler equations fails to preserve positivity of the pressure and/or density in some situations. Here is one example.
End of explanation
"""
left = State(Density = 1.,
Velocity = -10.,
Pressure = 1.)
right = State(Density = 1.,
Velocity = 1.,
Pressure = 1.)
states = compare_solutions(left, right, solvers=['Exact', 'HLLE']);
euler.phase_plane_plot(left,right,approx_states=states['hlle'])
"""
Explanation: As we can see, in this example each Roe solver wave moves much more slowly than the leading edge of the corresponding true rarefaction. In order to maintain conservation, this implies that the middle Roe state must have lower density than the true middle state. This leads to a negative density. Note that the velocity and pressure take huge values in the intermediate state.
The HLLE solver, on the other hand, guarantees positivity of the density and pressure. Since the HLLE wave speed in the case of a rarefaction is always the speed of the leading edge of the true rarefaction, and since the HLLE solution is conservative, the density in a rarefaction will always be at least as great as that of the true solution. This can be seen clearly in the example below.
End of explanation
"""
|
LeonardoCastro/Servicio_social
|
Parte 2 - PyCUDA y aplicaciones/06 - Impresiones y tiempos en PyCUDA.ipynb
|
mit
|
%%writefile ./Programas/saludar.py
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
mod = SourceModule("""
#include <stdio.h>
__global__ void saluda()
{
printf("Mi indice x es %d, mi indice en y es %d\\n", threadIdx.x, threadIdx.y);
}
""")
func = mod.get_function("saluda")
func(block=(4,4,1))
"""
Explanation: Impresiones
Si uno desea que el kernel imprima algo mientras se ejecuta se tiene que hacer uso de printf que es la función utilizada en C y CUDA para imprimir. Sin embargo hay recordar algunas cosas. En primer lugar, printf imprime cosas en la consola, es por eso que el lector notará al ejecutar la siguiente celda, que incluso cuando en el kernel le estamos ordenando que imprima, en el notebook no obtenemos nada. A primera vista parecería un error garrafal, sin embargo si comentáramos la primer linea (la que comienza con %%writefile), al revisar la consola desde la cuál abrimos el notebook se observaría que los resultados esperados se han impreso ahí. Ejecutemos la siguiente celda, con el comando mágico %%writefile para escribir el contenido de la celda en un archivo.
End of explanation
"""
!ipython ./Programas/saludar.py
"""
Explanation: Ahora ejecutemos el programa
End of explanation
"""
%%writefile ./Programas/saludar_bloques.py
import pycuda.driver as cuda
import pycuda.autoinit
from pycuda.compiler import SourceModule
mod = SourceModule("""
#include <stdio.h>
__global__ void say_hi()
{
printf("Soy el thread numero %d en threadIdx.x:%d.threadIdx.y:%d blockIdx.:%d blockIdx.y:%d blockDim.x:%d blockDim.y:%d\\n",(threadIdx.x+threadIdx.y*blockDim.x+(blockIdx.x*blockDim.x*blockDim.y)+(blockIdx.y*blockDim.x*blockDim.y)),threadIdx.x, threadIdx.y,blockIdx.x,blockIdx.y,blockDim.x,blockDim.y);
}
""")
func = mod.get_function("say_hi")
func(block=(4,4,1),grid=(2,2,1))
"""
Explanation: Por último hagamos un kernel que imprima más datos acerca del thread, como su índice de bloque, la dimensión del bloque, y sus índices dentro del bloque.
End of explanation
"""
!ipython ./Programas/saludar_bloques.py
"""
Explanation: Lo ejecutamos. Esperamos que se ejecuten uno por uno los bloques; veamos qué es lo que pasa.
End of explanation
"""
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
import numpy
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
import pycuda.cumath
from pycuda.elementwise import ElementwiseKernel
blocks = 64
block_size = 128
valores = blocks * block_size
print "Usando", valores, "valores"
# Número de iteraciones para los cálculos
n_iter = 100000
print "Calculando %d iteraciones" % (n_iter)
# Crear dos timers
inicio = drv.Event()
fin = drv.Event()
# SourceModele
mod = SourceModule("""
__global__ void gpusin(float *dest, float *a, int n_iter)
{
const int i = blockDim.x*blockIdx.x + threadIdx.x;
for(int n = 0; n < n_iter; n++) {
a[i] = sin(a[i]);
}
dest[i] = a[i];
}
""")
gpusin = mod.get_function("gpusin")
# creamos un arreglo 1s
a = numpy.ones(valores).astype(numpy.float32)
# creamos un arreglo para guardar el resultado
dest = numpy.zeros_like(a)
inicio.record() # comenzamos a tomar el tiempo
gpusin(drv.Out(dest), drv.In(a), numpy.int32(n_iter), grid=(blocks,1), block=(block_size,1,1) )
fin.record() # terminamos de tomar el tiempo
# calculamos cuánto duró la corrida
fin.synchronize()
segs = inicio.time_till(fin)*1e-3
print "Tiempo con SourceModule y primeros 3 resultados:"
print "%fs, %s" % (segs, str(dest[:3]))
# Usando ElementwiseKernel con sin en un ciclo for en C
kernel = ElementwiseKernel(
"float *a, int n_iter",
"for(int n = 0; n < n_iter; n++) { a[i] = sin(a[i]);}",
"gpusin")
a = numpy.ones(valores).astype(numpy.float32)
a_gpu = gpuarray.to_gpu(a)
inicio.record() # comenzamos a tomar el tiempo
kernel(a_gpu, numpy.int(n_iter))
fin.record() # terminamos de tomar el tiempo
# calculamos cuánto duró la corrida
fin.synchronize()
segs = inicio.time_till(fin)*1e-3
print "Tiempo con Elementwise y primeros 3 resultados:"
print "%fs, %s" % (segs, str(a_gpu.get()[:3]))
# Elementwise haciendo el loop en Python
kernel = ElementwiseKernel(
"float *a",
"a[i] = sin(a[i]);",
"gpusin")
a = numpy.ones(valores).astype(numpy.float32)
a_gpu = gpuarray.to_gpu(a)
inicio.record() # comenzamos a tomar el tiempo
for i in range(n_iter):
kernel(a_gpu)
fin.record() # terminamos de tomar el tiempo
# calculamos cuánto duró la corrida
fin.synchronize()
segs = inicio.time_till(fin)*1e-3
print "Tiempo con Elementwise en Python y primeros 3 resultados:"
print "%fs, %s" % (segs, str(a_gpu.get()[:3]))
# GPUArray
# El resultado se copia a la memoria principal en cada iteración (esto es un cuello de botella)
a = numpy.ones(valores).astype(numpy.float32)
a_gpu = gpuarray.to_gpu(a)
inicio.record() # comenzamos a tomar el tiempo
for i in range(n_iter):
a_gpu = pycuda.cumath.sin(a_gpu)
fin.record() # terminamos de tomar el tiempo
# calculamos cuánto duró la corrida
fin.synchronize()
segs = inicio.time_till(fin)*1e-3
print "Tiempo con GPUArray y primeros 3 resultados:"
print "%fs, %s" % (segs, str(a_gpu.get()[:3]))
# CPU
a = numpy.ones(valores).astype(numpy.float32)
inicio.record() # comenzamos a tomar el tiempo
inicio.synchronize()
for i in range(n_iter):
a = numpy.sin(a)
fin.record() # terminamos de tomar el tiempo
# calculamos cuánto duró la corrida
fin.synchronize()
segs = inicio.time_till(fin)*1e-3
print "Tiempo con CPU y primeros 3 resultados:"
print "%fs, %s" % (segs, str(a[:3]))
"""
Explanation: En efecto se ejecutaron uno a la vez.
Rendimiento
A continuación agregamos un código para comparar el rendimiento de varias formas de operar. Cada una es clarificada en el código.
End of explanation
"""
|
jsjol/GaussianProcessRegressionForDiffusionMRI
|
notebooks/show_ODFs.ipynb
|
bsd-3-clause
|
dataset = 'SPARC'
if dataset == 'HCP':
subject_path = conf['HCP']['data_paths']['mgh_1007']
loader = get_HCP_loader(subject_path)
small_data_path = '{}/mri/small_data.npy'.format(subject_path)
loader.update_filename_data(small_data_path)
data = loader.data
gtab = loader.gtab
voxel_size = loader.voxel_size
elif dataset == 'SPARC':
subject_path = conf['SPARC']['data_paths']['gradient_60']
gtab, data, voxel_size = preprocess_SPARC(subject_path, normalize=True)
btable = np.loadtxt(get_data('dsi4169btable'))
#btable = np.loadtxt(get_data('dsi515btable'))
gtab_dsi = gradient_table(btable[:, 0], btable[:, 1:],
big_delta=gtab.big_delta, small_delta=gtab.small_delta)
"""
Explanation: Load the data.
End of explanation
"""
map_model_laplacian_aniso = mapmri.MapmriModel(gtab, radial_order=6,
laplacian_regularization=True,
laplacian_weighting='GCV')
mapfit_laplacian_aniso = map_model_laplacian_aniso.fit(data)
"""
Explanation: Fit a MAPL model to the data.
End of explanation
"""
tenmodel = dti.TensorModel(gtab)
tenfit = tenmodel.fit(data)
fitted = {'MAPL': mapfit_laplacian_aniso.predict(gtab)[:, :, 0],
'DTI': tenfit.predict(gtab)[:, :, 0]}
"""
Explanation: We want to use an FA image as background, this requires us to fit a DTI model.
End of explanation
"""
kern = get_default_kernel(n_max=6, spatial_dims=2)
gp_model = GaussianProcessModel(gtab, spatial_dims=2, kernel=kern, verbose=False)
gp_fit = gp_model.fit(np.squeeze(data), mean=None, voxel_size=voxel_size[0:2], retrain=True)
kern = get_default_kernel(n_max=2, spatial_dims=2)
gp_dti_model = GaussianProcessModel(gtab, spatial_dims=2, kernel=kern, verbose=False)
gp_dti_fit = gp_dti_model.fit(np.squeeze(data), mean=fitted['DTI'], voxel_size=voxel_size[0:2], retrain=True)
kern = get_default_kernel(n_max=2, spatial_dims=2)
gp_mapl_model = GaussianProcessModel(gtab, spatial_dims=2, kernel=kern, verbose=False)
gp_mapl_fit = gp_mapl_model.fit(np.squeeze(data), mean=fitted['MAPL'], voxel_size=voxel_size[0:2], retrain=True)
"""
Explanation: Fit GP without mean and with DTI and MAPL as mean.
End of explanation
"""
pred = {'MAPL': mapfit_laplacian_aniso.predict(gtab_dsi)[:, :, 0],
'DTI': tenfit.predict(gtab_dsi)[:, :, 0]}
"""
Explanation: gp_model = GaussianProcessModel(gtab, spatial_dims=2, q_magnitude_transform=np.sqrt, verbose=False)
gp_fit = gp_model.fit(np.squeeze(data), mean=None, voxel_size=voxel_size[0:2], retrain=True)
gp_dti_fit = gp_model.fit(np.squeeze(data), mean=fitted['DTI'], voxel_size=voxel_size[0:2], retrain=True)
gp_mapl_fit = gp_model.fit(np.squeeze(data), mean=fitted['MAPL'], voxel_size=voxel_size[0:2], retrain=True)
End of explanation
"""
sphere = get_sphere('symmetric724').subdivide(1)
"""
Explanation: Compute the ODFs
Load an odf reconstruction sphere
End of explanation
"""
odf = {'MAPL': mapfit_laplacian_aniso.odf(sphere, s=0),
'DTI': tenfit.odf(sphere)}
odf['GP'] = gp_fit.odf(sphere, gtab_dsi=gtab_dsi, mean=None)[:, :, None, :]
odf['DTI_GP'] = gp_dti_fit.odf(sphere, gtab_dsi=gtab_dsi, mean=pred['DTI'])[:, :, None, :]
odf['MAPL_GP'] = gp_mapl_fit.odf(sphere, gtab_dsi=gtab_dsi, mean=pred['MAPL'])[:, :, None, :]
"""
Explanation: The radial order $s$ can be increased to sharpen the results, but it might
also make the odfs noisier. Note that a "proper" ODF corresponds to $s=0$.
End of explanation
"""
for name, _odf in odf.items():
ren = window.Renderer()
ren.background((1, 1, 1))
odf_actor = actor.odf_slicer(_odf, sphere=sphere, scale=0.5, colormap='jet')
background_actor = actor.slicer(tenfit.fa, opacity=1)
odf_actor.display(z=0)
odf_actor.RotateZ(90)
background_actor.display(z=0)
background_actor.RotateZ(90)
background_actor.SetPosition(0, 0, -1)
ren.add(background_actor)
ren.add(odf_actor)
window.record(ren, out_path='odfs_{}.png'.format(name), size=(1000, 1000))
"""
Explanation: Display the ODFs
End of explanation
"""
|
UWPreMAP/PreMAP2017
|
lessons/06-plotting.ipynb
|
mit
|
#The following set of commands are needed if you're on a MacOS and not Linux, which is none of you in class, so don't worry about it!
#import matplotlib
#matplotlib.use('TkAgg')
# we use matplotlib and specifically pyplot
# the convention is to import it like this:
import matplotlib.pyplot as plt
# We'll also read some data using astropy, so let's import that
import numpy as np
from astropy.io import ascii
# I'm also using this "magic" function to make my plots appear in this notebook
# Only do this when working with notebooks
%matplotlib inline
"""
Explanation: Pre-MAP Course Website | Pre-MAP GitHub | Google
Plotting with matplotlib
examples in this notebook are based on Nicholas Hunt-Walker's plotting tutorial and Jake VanderPlas' matplotlib tutorial
In this notebook we will learn how to make basic plots like scatter plots, histograms and line plots in using matplotlib in python.
The gallery
If you know what you want a plot to look like, but you don't know the code to make it, go to the matplotlib gallery, where you can see examples and see the source code that generated them.
Basic Plot Commands
Some of the basic plotting commands include
python
plt.plot() # all purpose plotting function
plt.errorbar() # plotting with errorbars
plt.loglog(), plt.semilogx(), plt.semilogy() # plotting in logarithmic space
End of explanation
"""
# Sample data
x = np.arange(10)
y = np.arange(10, 20)
# Make the plot, then show the plot
plt.plot(x, y)
plt.show()
"""
Explanation: Let's make some sample x and y data, and plot it with the plt.plot command:
End of explanation
"""
plt.plot(x, y, '.', markersize=20, color='red')
plt.show()
"""
Explanation: You can customize a bunch of features within the plot:
* markersize sets the symbol size
* color sets the color
* The first argument after y sets the marker shape. Try: x, ., o, +, --, -., :
End of explanation
"""
# Let's make x go from zero to 4*pi
x = np.linspace(0, 4*np.pi, 50)
y = np.sin(x)
# This will be a thick dashed line:
plt.plot(x, y, linestyle='--', linewidth=5)
# Add labels to the axes
plt.xlabel('Xlabel')
plt.ylabel('Ylable')
# Set the plot title
plt.title('Sine Curve')
plt.show()
"""
Explanation: Let's plot a sine function:
End of explanation
"""
# Let's plot y=x^3
x = np.arange(10)
y = x**3
# Let's make up some errorbars in x and y
xerr_values = 0.2 * np.sqrt(x)
yerr_values = 5 * np.sqrt(y)
# Call the errorbar function
plt.errorbar(x, y, xerr=xerr_values, yerr=yerr_values)
plt.show()
"""
Explanation: Let's plot a figure with errorbars:
End of explanation
"""
x = np.linspace(0, 20)
y = np.exp(x)
plt.semilogy(x, y)
plt.show()
"""
Explanation: There are three options for log-plots: plt.loglog(), plt.semilogx(), plt.semilogy()
End of explanation
"""
xred = np.random.rand(100)
yred = np.random.rand(100)
xblue = np.random.rand(20)
yblue = np.random.rand(20)
plt.plot(xred, yred, '^', color='red', markersize=8,
label='Red Points')
plt.plot(xblue, yblue, '+', color='blue', markersize=12,
markeredgewidth=3, label='Blue Points')
plt.xlabel('Xaxis')
plt.ylabel('Yaxis')
plt.legend()
# You can also place the legend in different places using this:
# plt.legend(loc='lower left')
plt.show()
"""
Explanation: To add a legend to your plot, include the label argument in the plot command, then call plt.legend() at the end of the plotting commands, before plt.show().
End of explanation
"""
x = np.linspace(0, 10)
y = np.sin(x)
plt.plot(x, y)
plt.title('sin')
plt.xlabel('Xaxis')
plt.ylabel('Yaxis')
# just give savefig the file name, or path to file name that you want to write
plt.savefig('sineplot.png')
plt.show()
"""
Explanation: To save figures in python you just use plt.savefig():
End of explanation
"""
# Semimajor-axis in units of AU:
a_AU = np.array([0.387, 0.723, 1. , 1.524, 5.203, 9.537, 19.191, 30.069, 39.482])
# Orbital period in units of years
T_yr = np.array([0.24, 0.62, 1., 1.88, 11.86, 29.46, 84.01, 164.8, 247.7])
# Let's set gravity and mass of the sun in [cgs] units:
G = 6.67e-8
Msun = 1.99e+33
plt.loglog(a_AU, T_yr, 'o')
plt.xlabel('Semi-Major Axis [AU]')
plt.ylabel('Period [yrs]')
plt.show()
# now plot a function over the data
# as you work more in python you will learn how to actually fit models to your data
def keplers_third_law(a, M):
return np.sqrt((4*np.pi**2 * a**3) / (G * M))
# Convert semimajor-axis into centimeters
a_cm = a_AU * 1.496e+13
# Convert period into seconds
T_s = T_yr * 3.154e+7
plt.loglog(a_cm, T_s, 'o')
plt.loglog(a_cm, keplers_third_law(a_cm, Msun), '--',
label='Keplers Third Law') # try swapping out Msun with something else and see what it looks like
plt.xlabel('Semi-Major Axis [cm]')
plt.ylabel('Period [s]')
plt.legend(loc=2)
plt.show()
"""
Explanation: Let's make a log-log plot displaying Kepler's law:
End of explanation
"""
# first let's read in some data to use for plotting
galaxy_table = ascii.read('data/mygalaxy.dat')
galaxy_table[:5]
# simple scatter plot
plt.scatter(galaxy_table['col1'], galaxy_table['col2'])
plt.show()
"""
Explanation: Scatter Plots
End of explanation
"""
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
plt.show()
# here would be the equivalent statement using plt.plot(), note that the syntax is a little different
plt.plot(galaxy_table['col1'], galaxy_table['col2'], 'o',
color='blue', markersize=1, markeredgecolor='None')
plt.show()
"""
Explanation: SIDE NOTE: If you are running things in the IPython environment or from a script you would want to do something like the following to get your plots to show up in a new window:
python
plt.scatter(galaxy_table['col1'], galaxy_table['col2'])
plt.show()
In an IPython Notebook, you will see the plot outputs whether or not you call plt.show() because we've used the %matplotlib inline magic function.
Let's break down these basic examples:
- We are running functions called "plot" or "scatter" that take specific arguments.
- The most basic arguments that these functions take are in the form of (x,y) values for the plot, and we get these from a data table.
- We can use more specific arugments like 'o' to customize things like the plot symbol (marker) that we are using.
With plt.scatter() you can change things like point color, point size, point edge color and point type. The argument syntax for adding these options are as follows:
color = 'colorname'; could be 'b' for blue, 'k' for black, 'r' for red
s = number; changes marker size
markeredgecolor = None or 'colorname'
marker = 'symbolname', i.e. 's' for square, 'o' for circle, '+' for cross, 'x' for x, '*' for star, '^' for triangle, etc.
Let's do an example:
End of explanation
"""
plt.scatter(galaxy_table['col1'], galaxy_table['col2'], color='blue',
s=1, edgecolor='None', marker='o')
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.show()
"""
Explanation: The plot is starting to look better, but there is one really important thing that is missing: axis labels. These are very easy to put in in matplotlib using plt.xlabel() and plt.ylabel(). These functions take strings as their arguments for the labels, but can also take other arguments that case the text format:
End of explanation
"""
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.xlim([-180,180])
plt.ylim([-90,90])
plt.show()
"""
Explanation: We can also change things like the axis limits with plt.xlim() and plt.ylim(). For these we just want to feed it a range of values for each axis:
End of explanation
"""
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# Labels
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
# Set limits
plt.xlim([-180,180])
plt.ylim([-90,90])
# Choose axis ticks
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
# turn on minor tick marks
plt.minorticks_on()
plt.grid() # turn on a background grip to guide the eye
plt.show()
"""
Explanation: The axis labels are easy to read, but the numbers and tick marks on the axis are pretty small. We can tweak lots of little things about how the tick marks look, how they are spaced, and if we want to have a grid to guide the reader's eyes. I will give just a couple of examples here:
End of explanation
"""
plt.figure(figsize=(10,4)) # change figure size
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# Labels
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
# Set limits
plt.xlim([-180,180])
plt.ylim([-90,90])
# Choose axis ticks
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
# turn on minor tick marks
plt.minorticks_on()
plt.grid() # turn on a background grip to guide the eye
plt.show()
"""
Explanation: By default the figure is square, but maybe this is not the best way to represent our data. If this is the case we can change the size of the figure:
End of explanation
"""
plt.figure(figsize=(10,4)) # change figure size
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# the next three lines put text on the figure at the specified coordinates
plt.text(-90, -50, 'LMC', fontsize=20)
plt.text(-60, -60, 'SMC', fontsize=20)
plt.text(0, -30, 'MW Bulge', fontsize=20)
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.xlim([-180,180])
plt.ylim([-90,90])
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
plt.minorticks_on() # turn on minor tick marks
plt.grid() # turn on a background grip to guide the eye
plt.show()
"""
Explanation: The last thing I'll mention here is how to put text on plots. This too is simple as long as you specify (x,y) coordinates for the text.
End of explanation
"""
# plots histogram where the y-axis is counts
x = np.random.randn(10000)
num, bins, patches = plt.hist(x, bins=50)
plt.xlabel('Bins')
plt.ylabel('Counts')
plt.show()
# plots histogram where the y-axis is a probability distribution
plt.hist(x, bins=50, normed=True)
plt.xlabel('Bins')
plt.ylabel('Probability')
plt.show()
# plots a histogram where the y-axis is a fraction of the total
weights = np.ones_like(x)/len(x)
plt.hist(x, bins=50, weights=weights)
plt.ylabel('Fraction')
plt.xlabel('Bins')
plt.show()
# print out num and bins and see what they look like! what size is each array?
# how would you plot this histogram using plt.plot? what is the x value and what is the y value?
"""
Explanation: Exercise 1
Take the plot of that demonstrates Kepler's laws in our solar system, and:
label each data point to the correct planet.
change the size of axes label to make them clearer.
make any aesthetic changes that you think enhances the meaning you're tryin to convey with the plot (maybe you want to convey that each planet is a different size, and so change the marker size; maybe you want to convey that inner planets are rocky and outer planets are gaseous, so change the marker colour for a subset of them; etc)
Histograms
Histograms can be a great way to visualize data, and they are (surprise) easy to make it python! The basic command is
python
num, bins, patches = plt.hist(array, bins=number)
Num refers to the number of elements in each bin, and bins refers to each bin on the x-axis. Note that bins actually gives you bin EDGES, so there will always be num+1 number of bins. We can ignore patches for now. As arguments plt.hist() takes an array and the number of bins you would like (default is bins=10). Some other optional arguments for plt.hist are:
range: lower and upper range of bins
normed: set to 'True' or 'False.' If true it will return a normalized probability distribution instead of just raw number counts for the y-axis.
histtype: can be step to something like 'step', 'stepfilled', or 'bar' for the histogram style.
weights: an array of values that must be of the same size as the number of bins. It controls the factor by which the number counts are weighted, i.e. it makes your number counts into number_counts*weight.
End of explanation
"""
# make two side by side plots
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
plt.figure(figsize=[15,3])
plt.subplot(1,2,1) # 1 row, 2 columns, 1st figure
plt.plot(x1,y1)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.subplot(1,2,2) # 1 row, 2 columsn, 2nd figure
plt.plot(x2,y2)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.show()
# stack two plots on top of one another
plt.subplot(2,1,1) # 1 row, 2 columns, 1st figure
plt.plot(x1,y1)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.subplot(2,1,2) # 1 row, 2 columsn, 2nd figure
plt.plot(x2,y2)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.show()
"""
Explanation: Subplots
Subplots are a way put multiple plots in what amounts to the same figure; think of subplots like an array of plots! The following picture is helpful for understanding how matplotlib places subplots based on row, column, and figure number:
<img src="images/subplot-grid.png">
End of explanation
"""
# don't worry about this way to read in files right now
import pandas as pd
exoplanets = pd.read_csv('data/exoplanet.eu_catalog_1022.csv')
# get rid of some rows with missing values to be safe
exoplanets = exoplanets[np.isfinite(exoplanets['orbital_period'])]
# let's see what the data table looks like
exoplanets.head()
# plot distance from host star versus mass (in jupiter masses) for each exoplanet
plt.loglog(exoplanets['semi_major_axis'], exoplanets['mass'],'.')
plt.annotate("Earth", xy=(1,1/317.), size=12)
plt.annotate("Jupiter", xy=(5,1), size=12)
plt.xlabel('Semi-Major Axis [AU]',size=20)
plt.ylabel('Mass [M$_{Jup}$]', size=20)
# let's try to find out if the blobs above separate out by detection type
import seaborn as sns; sns.set()
transits = exoplanets[exoplanets['detection_type'] == 'Primary Transit']
radial_vel = exoplanets[exoplanets['detection_type'] == 'Radial Velocity']
imaging = exoplanets[exoplanets['detection_type'] == 'Imaging']
ttv = exoplanets[exoplanets['detection_type'] == 'TTV']
plt.loglog(transits['semi_major_axis'], transits['mass'], '.', label='Transit',markersize=12)
plt.loglog(radial_vel['semi_major_axis'], radial_vel['mass'], '.', label='Radial Vel', markersize=12)
plt.loglog(imaging['semi_major_axis'], imaging['mass'], '.', label='Direct Imaging', markersize=16)
plt.loglog(ttv['semi_major_axis'], ttv['mass'], '.', label='TTV', color='cyan', markersize=16)
plt.annotate("Earth", xy=(1,1/317.), size=12)
plt.annotate("Jupiter", xy=(5,1), size=12)
plt.xlabel('Semi-Major Axis [AU]', size=20)
plt.ylabel('Mass [M$_{Jup}$]', size=20)
plt.legend(loc=4, prop={'size':16})
# and now just for fun an xkcd style plot!
plt.xkcd()
plt.scatter(exoplanets['discovered'], exoplanets['radius']*11)
plt.xlabel('Year Discovered')
plt.ylabel('Radius [R_Earth]')
# to revert back to regular plotting:
plt.rcdefaults()
"""
Explanation: You can do fancier things with subplots like have different plots share the same axis, put smaller plots as insets to larger plots, etc. Again, take a look at things like the matplotlib library for examples of different plots.
Plotting Exoplanets
Let's try to make some plots with a new dataset. The file that we'll use is taken from exoplanets.eu.
End of explanation
"""
|
luisdelatorre012/luisdelatorre012.github.io
|
Using usaddress.ipynb
|
mit
|
import usaddress
addr='123 Main St. Suite 100 Chicago, IL'
address_tag = usaddress.tag(addr)
address_tag
"""
Explanation: This notebook describes setting up and testing the usaddress package from datamade.
The first step was installation. I couldn't get usaddress to build with pip, so I added conda forge and installed a Windows binary package:
conda config --add channels conda-forge
conda install usaddress
Next, I'll test usaddress on a sample address:
End of explanation
"""
from uszipcode import ZipcodeSearchEngine
search = ZipcodeSearchEngine()
city = address_tag[0]['PlaceName']
state = address_tag[0]['StateName']
search_results = search.by_city_and_state(city, state)
len(search_results)
"""
Explanation: It parsed the address into several components.
What I really want is a latitude and longitude.
I can use the awesome uszipcode package.
For this one, pip install uszipcode worked on Windows.
End of explanation
"""
coords = (search_results[0]['Latitude'], search_results[0]['Longitude'])
coords
"""
Explanation: This returned five search results. Since we're not going for pinpoint accuracy, we'll take the first one.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/uhh/cmip6/models/sandbox-2/atmos.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: UHH
Source ID: SANDBOX-2
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:41
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
kimkipyo/dss_git_kkp
|
통계, 머신러닝 복습/160614화_15일차_분류의 기초 Basic Classification/2.분류(classification)의 기초.ipynb
|
mit
|
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
y = np.array([1, 1, 1, 2, 2, 2])
plt.scatter(X.T[0], X.T[1], c=y, s=100, cmap=mpl.cm.brg)
plt.title("data")
plt.show()
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
model = QuadraticDiscriminantAnalysis().fit(X, y)
x = [[0, 0]]
p = model.predict_proba(x)[0]
plt.subplot(211)
plt.scatter(X.T[0], X.T[1], c=y, s=100, cmap=mpl.cm.brg)
plt.scatter(x[0][0], x[0][1], c='r', s=100)
plt.title("data")
plt.subplot(212)
plt.bar(model.classes_, p, align='center')
plt.title("conditional probability")
plt.axis([0, 3, 0, 1])
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.tight_layout()
plt.show()
x = [[-0.2, -0.1]]
p = model.predict_proba(x)[0]
plt.subplot(211)
plt.scatter(X.T[0], X.T[1], c=y, s=100, cmap=mpl.cm.brg)
plt.scatter(x[0][0], x[0][1], c='r', s=100)
plt.title("data")
plt.subplot(212)
plt.bar(model.classes_, p, align="center")
plt.title("conditional probability")
plt.axis([0, 3, 0, 1])
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.tight_layout()
plt.show()
x = [[0.2, 0.2]]
p = model.predict_proba(x)[0]
plt.subplot(211)
plt.scatter(X.T[0], X.T[1], c=y, s=100, cmap=mpl.cm.brg)
plt.scatter(x[0][0], x[0][1], c='r', s=100)
plt.title("data")
plt.subplot(212)
plt.bar(model.classes_, p, align="center")
plt.title("conditional probability")
plt.axis([0, 3, 0, 1])
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.tight_layout()
plt.show()
"""
Explanation: 분류(classification)의 기초
분류(classification)는 독립 변수 혹은 feature가 주어졌을 때 가장 연관성이 큰 종속변수 카테고리(클래스)를 계산하는 문제이다. 현실적인 문제로 바꾸어 말하면 어떤 표본에 대한 데이터가 주어졌을 때 그 표본이 어떤 카테고리 혹은 클래스에 속하는지를 알아내는 문제이기도 하다. 선택해야 할 카테고리 혹은 클래스가 미리 주어졌다는 점에서 보기가 주어진 객관식 시험 문제를 푸는 것과 비슷하다고 말할 수 있다.
분류 문제를 푸는 방법
분류 문제를 푸는 방법은 크게 두 가지로 나눌 수 있다. 하나는 주어진 데이터에 대해(conditionally) 각 카테고리 혹은 클래스가 정답일 확률(conditional probability)를 계산하는 방법이고 또 다른 하나는 카테고리를 나누는 경계면으로부터 상대적으로 어떤 위치에 있는 지를 나타내는 판별 함수(discriminant function)를 계산하는 방법이다.
조건부 확률(conditional probability) 기반
1-1 조건부 확률 기반 판별(discriminative) 모형
1-2 조건부 확률 기반 생성(generative) 모형
판별 함수(discriminant function) 기반
분류 모형의 종류
| 모형 | 방법론 |
|-|-|
| Linear/Quadratic Discriminant Analysis | 조건부 확률 기반 생성(generative) 모형 |
| Naive Bayes | 조건부 확률 기반 생성(generative) 모형 |
| Logistic Regression | 조건부 확률 기반 판별(discriminative) 모형 |
| Decision Tree | 조건부 확률 기반 판별(discriminative) 모형 |
| K Nearest Neighbor | 조건부 확률 기반 판별(discriminative) 모형 |
| Perceptron | 판별 함수(discriminant function) 기반 모형 |
| Support Vector Machine | 판별 함수(discriminant function) 기반 모형 |
| Neural Network (Deep Learning) | 판별 함수(discriminant function) 기반 모형 |
Decision Tree, K Nearest Neighbor
아주 직관적인 방식이다. 집값이 한강변에 접했느냐 아니냐에 따라 다르다.
층수, 복층 등의 룰을 세분화해서 적용하다보면 집값이 정해지는 방식이 Decision Tree방식
K Nearest Neighbor는 100만호에 대한 집값 정보를 모아서 누군가 새로운 집을 보여주면서 이 집의 가격을 예측해달라고 하면 그 집이랑 제일 비슷한 독립변수 K개를 모아서 다수결로 뽑아내서 예측하는 방법
Perceptron, Support Vector Machine, Neural Network (Deep Learning)
이것들은 경계선을 만든다. discriminant function이라는 함수가 그 경계선의 기준을 알려준다.
조건부 확률 방법
출력 $y$ 는 클래스 $C_1, \cdots, C_K$ 중의 하나의 값을 가진다고 가정하자. 조건부 확률 방법은 출력 $y$ 가 이 각각의 클래스 값일 확률을 모두 계산해서 그 중 확률이 큰 클래스를 선택하는 방법이다. 물론 확률은 가지고 있는 모든 데이터에 의존하는 조건부 확률이어야 한다.
$$
\begin{eqnarray}
P_1 &=& P(y=C_1 \mid x_{1:N}, y_{1:N}, x_{N+1} ) \
\vdots & & \vdots \
P_K &=& P(y=C_K \mid x_{1:N}, y_{1:N}, x_{N+1} )\
\end{eqnarray}
$$
Scikit-Learn 에서 조건부 확률을 사용하는 분류 모형들은 모두 predict_proba 메서드와 predict_log_proba 메서드를 지원한다. 이 메서드들은 독립 변수 $x$가 주어지면 종속 변수 $y$의 모든 카테고리 값에 대해 조건부 확률 또는 조건부 확률의 로그값을 계산한다.
(가짜 확률을 만드는 이유? 나중에 배울 앙상블과 관련되어 있음(pass))
QDA (Quadratic Discriminant Analysis)
End of explanation
"""
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
news = fetch_20newsgroups(subset='all')
model = Pipeline([
('vect', TfidfVectorizer(stop_words='english')),
('nb', MultinomialNB()),
])
model.fit(news.data, news.target)
x = news.data[:1]
y = model.predict(x)[0]
print(x[0])
print("=" * 80)
print("Actual Category:", news.target_names[news.target[0]])
print("Predicted Category:", news.target_names[y])
plt.subplot(211)
plt.bar(model.classes_, model.predict_proba(x)[0], align='center')
plt.xlim(-1, 20)
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.subplot(212)
plt.bar(model.classes_, model.predict_log_proba(x)[0], align='center')
plt.xlim(-1, 20)
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.show()
"""
Explanation: 나이브 베이지안 (Naive Bayesian) 모형
End of explanation
"""
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
X0, y = make_classification(n_features=1, n_redundant=0, n_informative=1, n_clusters_per_class=1, random_state=4)
model = LogisticRegression().fit(X0, y)
xx = np.linspace(-3, 3, 100)
sigm = 1.0/(1+np.exp(-model.coef_[0][0]*xx - model.intercept_[0]))
plt.subplot(211)
plt.plot(xx, sigm)
plt.scatter(X0, y, marker='o', c=y, s=100)
plt.scatter(X0[0], model.predict(X0[:1]), marker='o', s=300, c='r', lw=5, alpha=0.5)
plt.plot(xx, model.predict(xx[:, np.newaxis]) > 0.5, lw=2)
plt.scatter(X0[0], model.predict_proba(X0[:1])[0][1], marker='x', s=300, c='r', lw=5, alpha=0.5)
plt.axvline(X0[0], c='r', lw=2, alpha=0.5)
plt.xlim(-3, 3)
plt.subplot(212)
plt.bar(model.classes_, model.predict_proba(X0[:1])[0], align="center")
plt.xlim(-1, 2)
plt.gca().xaxis.grid(False)
plt.xticks(model.classes_)
plt.title("conditional probability")
plt.tight_layout()
plt.show()
"""
Explanation: 조건부 확률을 추정하는 방법도 크게 판별 모형(Discriminative Model)과 생성 모형(Generative Model)으로 나누어진다.
조건부 확률 추정 방법 1 - 판별 모형 Discriminative Models
판별 모형(Discriminative Models)은 조건부 확률 $p(y \mid x)$ 이 몇 개의 모수(parametric)를 가지는 함수 형태로 표시될 수 있다고 가정하고 모수를 추정하는 방법이다. 카테고리에 따른 독립 변수의 분포인 우도(likelihood) $p(x \mid y)$ 를 알 필요가 없다.
로지스틱 회귀 모형(Logistic Regression)이나 의사 결정 나무(Decision Tree)는 판별 모형에 속한다.
로지스틱 회귀 모형
End of explanation
"""
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
iris = load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
tree = DecisionTreeClassifier(criterion='entropy', max_depth=4, random_state=0).fit(X_train, y_train)
test_idx=range(105,150)
resolution=0.01
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = mpl.colors.ListedColormap(colors[:len(np.unique(y_combined))])
x1_min, x1_max = X_combined[:, 0].min() - 1, X_combined[:, 0].max() + 1
x2_min, x2_max = X_combined[:, 1].min() - 1, X_combined[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = tree.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], s=80, label=cl)
"""
Explanation: 파란색 선이 1일 확률인데 밑에가 1일 확률의 높이이고 위에가 0일 확률의 높이다.
의사 결정 나무
End of explanation
"""
rv1 = sp.stats.norm(-2,1.5);
rv2 = sp.stats.norm(2,1.5);
N = 30
x1 = rv1.rvs(N)
x2 = rv2.rvs(N)
x = np.c_[x1, x2] #c_ column 스택방식
y = np.c_[np.ones(N), np.zeros(N)]
idx = np.random.shuffle([i for i in range(N)])
x = x[idx]
y = y[idx]
xx = np.linspace(-5,5,1000)
marginal = 0.5 * rv1.pdf(xx) + 0.5 * rv2.pdf(xx) #여기 나온 3줄이 중요하다.
cond1 = 0.5 * rv1.pdf(xx)/marginal
cond2 = 0.5 * rv2.pdf(xx)/marginal
plt.subplot(311)
plt.plot(xx, rv1.pdf(xx), label="P(X|y=1)")
plt.plot(xx, rv2.pdf(xx), label="P(X|y=2)")
plt.title("Likelihood")
plt.legend()
plt.subplot(312)
plt.scatter(x, y, s=100)
plt.title("Target")
plt.subplot(313)
plt.plot(xx, cond1, label="class 1 = P(X|y=1)")
plt.plot(xx, cond2, label="class 2 = P(X|y=2)")
plt.title("Conditional Density")
plt.legend()
plt.tight_layout()
plt.show()
"""
Explanation: decision tree에서는 pdf가 부드럽지 않고 각이 졌다. decision tree에서는 중간이 없다. 무조건 경계선 기준으로 자른다.
조건부 확률 추정 방법 2 - 생성 모형 Generative Models
조건부 확률을 추정하는 두번째 방법은 베이지안 규칙을 사용하여 결합 확률 $p(x,y)$ 또는 우도(likelihood) $p(x \mid y)$에서 최종 조건부 확률 $p(y \mid x)$을 계산하는 것이다. 이 방법은 생성 모형(Generative Models)이라고 한다.
$$ p(y \mid x) = \dfrac{p(x,y)}{p(x)} = \dfrac{p(x \mid y)p(y)}{p(x)}$$
사전 확률 $p(y)$는 전체 확률의 법칙(Law of total probability)에서 계산할 수 있다.
$$ p(x) = \int p(x,y) dy = \int p(x \mid y)p(y) $$
$$ p(x) = \sum_{k=1}^K p(x,y_k) = \sum_{k=1}^K p(x \mid y_k) p(y_k) $$
End of explanation
"""
from sklearn.linear_model import Perceptron
from sklearn.datasets import load_iris
iris = load_iris()
idx = np.in1d(iris.target, [0, 2])
X = iris.data[idx, 0:2]
y = iris.target[idx]
model = Perceptron(n_iter=100, eta0=0.1, random_state=1).fit(X, y)
XX_min = X[:, 0].min() - 1; XX_max = X[:, 0].max() + 1;
YY_min = X[:, 1].min() - 1; YY_max = X[:, 1].max() + 1;
XX, YY = np.meshgrid(np.linspace(XX_min, XX_max, 1000), np.linspace(YY_min, YY_max, 1000))
ZZ = model.predict(np.c_[XX.ravel(), YY.ravel()]).reshape(XX.shape)
cmap = mpl.colors.ListedColormap(sns.color_palette("Set2"))
plt.contourf(XX, YY, ZZ, cmap=cmap)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
idx = [0, 20, 70, 80]
plt.scatter(X[idx, 0], X[idx, 1], c=y[idx], s=200, alpha=0.5)
for i in idx:
plt.annotate(i, xy=(X[i, 0], X[i, 1]+0.15))
plt.grid(False)
plt.show()
plt.bar(range(len(idx)), model.decision_function(X[idx]), align="center")
plt.xticks(range(len(idx)), idx)
plt.gca().xaxis.grid(False)
plt.title("Discriminant Function")
plt.show()
from mpl_toolkits.mplot3d import Axes3D
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data[:, :2]
y = iris.target
idx = np.logical_or(iris.target == 0, iris.target == 1)
X = iris.data[idx, :3]
y = iris.target[idx]
fig = plt.figure()
ax = Axes3D(fig, elev=20, azim=10)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, marker='o', s=100, cmap=mpl.cm.jet);
ax.plot_surface(np.array([[4, 4], [7, 7]]), np.array([[2, 4.5], [2, 4.5]]),
np.array([[2, 4], [2, 4]]), color='g', alpha=.3);
"""
Explanation: Likelihood 그림에서 파란선은 P(X|y=1), 초록선은 P(X|y=2)
class1은 y값이 1이고 class2는 y값이 0이다.
분별 함수(Discriminant Function) 기반 모형
또 다른 분류 방법은 동일한 클래스가 모여 있는 영역과 그 영역을 나누는 경계면(boundary plane)을 정의하는 것이다.
이 경계면은 경계면으로부터의 거리를 계산하는 $f(x)$ 형태의 함수인 분별 함수(Discriminant Function)로 정의된다.
$$ \text{boundary plane}: \;\; f(x) = 0 $$
$$ \text{class 1}: \;\; f(x) > 0 $$
$$ \text{class 0}: \;\; f(x) < 0 $$
Scikit-Learn 에서 분별 함수 기반의 모형은 분별 함수 값을 출력하는 decision_function 메서드를 제공한다.
boundary plane: f(x)=0 => 이 식으로 선을 나타낸다.
plane은 2차원 상에서는 선이고 1차원에서는 기준점이고 3차원 이상에서는 면
그래서 기본적으로 클래스가 2개인 경우 밖에 사용하지 못한다. 왜냐하면 부호로만 보기 때문에. 기본적으로는 by class인 경우만 풀 수 있다. 쓸려면 변형해야 한다. 부호는 바로 선에부터의 거리를 나타낸다.
퍼셉트론(Perceptron)
End of explanation
"""
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
model = svm.NuSVC().fit(X, Y)
Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=3, linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
idx = [0, 20, 40, 60]
plt.scatter(X[idx, 0], X[idx, 1], c=Y[idx], s=200, alpha=0.5)
for i in idx:
plt.annotate(i, xy=(X[i, 0], X[i, 1]+0.15), color='white')
plt.grid(False)
plt.axis([-3, 3, -3, 3])
plt.show()
plt.bar(range(len(idx)), model.decision_function(X[idx]), align="center")
plt.xticks(range(len(idx)), idx)
plt.gca().xaxis.grid(False)
plt.title("Discriminant Function")
plt.show()
"""
Explanation: 커널 SVM (Kernel Support Vector Machine)
End of explanation
"""
|
bspalding/research_public
|
advanced_sample_analyses/drafts/Different definitions of momentum.ipynb
|
apache-2.0
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
k = 30
start = '2014-01-01'
end = '2015-01-01'
pricing = get_pricing('PEP', fields='price', start_date=start, end_date=end)
fundamentals = init_fundamentals()
num_shares = get_fundamentals(query(fundamentals.earnings_report.basic_average_shares,)
.filter(fundamentals.company_reference.primary_symbol == 'PEP',), end)
x = np.log(pricing)
v = x.diff()
m = get_pricing('PEP', fields='volume', start_date=start, end_date=end)/num_shares.values[0,0]
p0 = pd.rolling_sum(v, k)
p1 = pd.rolling_sum(m*v, k)
p2 = p1/pd.rolling_sum(m, k)
p3 = pd.rolling_mean(v, k)/pd.rolling_std(v, k)
f, (ax1, ax2) = plt.subplots(2,1)
ax1.plot(p0)
ax1.plot(p1)
ax1.plot(p2)
ax1.plot(p3)
ax1.set_title('Momentum of PEP')
ax1.legend(['p(0)', 'p(1)', 'p(2)', 'p(3)'], bbox_to_anchor=(1.1, 1))
ax2.plot(p0)
ax2.plot(p1)
ax2.plot(p2)
ax2.plot(p3)
ax2.axis([0, 300, -0.005, 0.005])
ax2.set_xlabel('Time');
"""
Explanation: Different definitions of momentum
By Evgenia "Jenny" Nitishinskaya
Notebook released under the Creative Commons Attribution 4.0 License.
A momentum strategy is one that buys stocks whose prices seem to be rising and sells those whose prices seem to be falling. That is, it assumes that if a stock price has a lot of momentum in some direction, it will keep moving in that direction. In this notebook I try out the different definitions of momentum described in http://arxiv.org/pdf/1208.2775.pdf. They define 4 different measures, called $p^{(1)}$, $p^{(0)}$, $p^{(2)}$, and $p^{(3)}$.
Their approach is based in physics, where the momentum is defined as $p = mv$, the product of the mass and the velocity. First, they define $x(t)$ to be the log of the price of the security. Conveniently, the return on the security is then the derivative of $x(t)$, which is called the velocity $v(t)$. Then they suggest a number of different definitions of mass $m(t)$; in the examples below, we'll use the inverse of standard deviation and turnover rate as mass. This works with our analogy because the more volatile or the less liquid an asset (the smaller its mass), the easier it is to move its price (i.e. change its position). The different momenta are then defined (for a lookback window $k$) as:
$$p^{(0)}(t) = \sum_{i=0}^{k-1} v(t-i)$$
$$p^{(1)}(t) = \sum_{i=0}^{k-1} m(t-i) v(t-i)$$
$$p^{(2)}(t) = \frac{\sum_{i=0}^{k-1} m(t-i) v(t-i)}{\sum_{i=0}^{k-1} m(t-i)}$$
$$p^{(3)}(t) = \frac{\mu(v(t-k+1),\ldots, v(t))}{\sigma(v(t-k+1),\ldots, v(t))} $$
First, let's just implement the different momentum definitions, and plot the rolling momenta for one stock:
End of explanation
"""
def get_p(prices, m, d, k):
""" Returns the dth-degree rolling momentum of data using lookback window length k """
x = np.log(prices)
v = x.diff()
m = np.array(m)
if d == 0:
return pd.rolling_sum(v, k)
elif d == 1:
return pd.rolling_sum(m*v, k)
elif d == 2:
return pd.rolling_sum(m*v, k)/pd.rolling_sum(m, k)
elif d == 3:
return pd.rolling_mean(v, k)/pd.rolling_std(v, k)
"""
Explanation: In order to use this in a strategy, we should wrap our momentum calculator in a function:
End of explanation
"""
# Load the assets we want to trade
start = '2010-01-01'
end = '2015-01-01'
assets = sorted(['STX', 'WDC', 'CBI', 'JEC', 'VMC', 'PG', 'AAPL', 'PEP', 'AON', 'DAL'])
data = get_pricing(assets, start_date='2010-01-01', end_date='2015-01-01').loc['price', :, :]
# Get turnover rate for the assets
fundamentals = init_fundamentals()
num_shares = get_fundamentals(query(fundamentals.earnings_report.basic_average_shares,)
.filter(fundamentals.company_reference.primary_symbol.in_(assets),), end)
turnover = get_pricing(assets, fields='volume', start_date=start, end_date=end)/num_shares.values[0]
# Plot the prices just for fun
data.plot(figsize=(10,7), colors=['r', 'g', 'b', 'k', 'c', 'm', 'orange',
'chartreuse', 'slateblue', 'silver'])
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.ylabel('Price')
plt.xlabel('Time');
# Calculate all the rolling momenta for the data and compute daily ranking of assets by momentum
lookback = 30
ps = np.array([np.array(get_p(data, turnover, j, lookback).T) for j in range(4)])
orders = [[ps[j].T[i].argsort() for i in range(len(ps[0,0]))] for j in range(4)]
ranks = [[orders[j][i].argsort() for i in range(len(orders[1]))] for j in range(4)]
# Cast data to numpy array for easier manipulation
data_array = np.array(data)
# Simulate going long on high-momentum stocks and short low-momentum stocks
# Our first 2*lookback - 2 values will be NaN since we used 2 lookback windows, so start on day 2*lookback
tots = [[0]*4 for j in range(len(data) - 2*lookback)]
for t in range(2*lookback, len(ranks[0]) - 2*lookback):
tots[t] = list(tots[t-1])
# Only update portfolio every 2*lookback days
if t%(2*lookback):
continue
# Go long top quintile of stocks and short bottom quintile
shorts = np.array([[int(x < 2)for x in ranks[j][t]] for j in range(4)])
longs = np.array([[int(x > 7) for x in ranks[j][t]] for j in range(4)])
# How many shares of each stock are in $1000
shares_in_1k = 1000/data_array[t]
# Go long and short $1000 each in the specified stocks, then clear holdings in 2*lookback days
returns = (data_array[t+2*lookback]*shares_in_1k - [1000]*len(assets))*(longs - shorts)
tots[t] += np.sum(returns, 1)
# Adjust so that tots[t] is actually money on day t
tots = [[0,0,0,0]]*2*lookback + tots
# Plot total money earned using the 3 different momentum definitions
plt.plot(tots)
plt.title('Cash in portfolio')
plt.legend(['p(0)', 'p(1)', 'p(2)', 'p(3)'], loc=4)
plt.xlabel('Time')
plt.ylabel('$');
"""
Explanation: Now we implement the strategy described in the paper: pick a holding period, and every period rebalance your portfolio to be long \$1000 each in the highest-momentum stocks and short \$1000 each in the lowest-momentum stocks. In the code below, my holding period is simply twice the lookback period.
End of explanation
"""
|
CalPolyPat/phys202-2015-work
|
assignments/assignment10/ODEsEx02.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
"""
Explanation: Ordinary Differential Equations Exercise 1
Imports
End of explanation
"""
def lorentz_derivs(yvec, t, sigma, rho, beta):
"""Compute the the derivatives for the Lorentz system at yvec(t)."""
dx = sigma*(yvec[1]-yvec[0])
dy = yvec[0]*(rho-yvec[2])-yvec[1]
dz = yvec[0]*yvec[1]-beta*yvec[2]
return [dx,dy,dz]
assert np.allclose(lorentz_derivs((1,1,1),0, 1.0, 1.0, 2.0),[0.0,-1.0,-1.0])
"""
Explanation: Lorenz system
The Lorenz system is one of the earliest studied examples of a system of differential equations that exhibits chaotic behavior, such as bifurcations, attractors, and sensitive dependence on initial conditions. The differential equations read:
$$ \frac{dx}{dt} = \sigma(y-x) $$
$$ \frac{dy}{dt} = x(\rho-z) - y $$
$$ \frac{dz}{dt} = xy - \beta z $$
The solution vector is $[x(t),y(t),z(t)]$ and $\sigma$, $\rho$, and $\beta$ are parameters that govern the behavior of the solutions.
Write a function lorenz_derivs that works with scipy.integrate.odeint and computes the derivatives for this system.
End of explanation
"""
def solve_lorentz(ic, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
"""Solve the Lorenz system for a single initial condition.
Parameters
----------
ic : array, list, tuple
Initial conditions [x,y,z].
max_time: float
The max time to use. Integrate with 250 points per time unit.
sigma, rho, beta: float
Parameters of the differential equation.
Returns
-------
soln : np.ndarray
The array of the solution. Each row will be the solution vector at that time.
t : np.ndarray
The array of time points used.
"""
t = np.linspace(0,max_time, max_time*250)
return odeint(lorentz_derivs, ic, t, args = (sigma, rho, beta)), t
assert True # leave this to grade solve_lorenz
"""
Explanation: Write a function solve_lorenz that solves the Lorenz system above for a particular initial condition $[x(0),y(0),z(0)]$. Your function should return a tuple of the solution array and time array.
End of explanation
"""
N = 5
colors = plt.cm.hot(np.linspace(0,1,N))
for i in range(N):
# To use these colors with plt.plot, pass them as the color argument
print(colors[i])
def plot_lorentz(N=10, max_time=4.0, sigma=10.0, rho=28.0, beta=8.0/3.0):
"""Plot [x(t),z(t)] for the Lorenz system.
Parameters
----------
N : int
Number of initial conditions and trajectories to plot.
max_time: float
Maximum time to use.
sigma, rho, beta: float
Parameters of the differential equation.
"""
f = plt.figure(figsize=(15, N*8))
np.random.seed(1)
colors = plt.cm.hot(np.linspace(0,1,N))
for n in range(N):
plt.subplot(N,1,n)
x0 = np.random.uniform(-15, 15)
y0 = np.random.uniform(-15, 15)
z0 = np.random.uniform(-15, 15)
soln, t = solve_lorentz([x0,y0,z0], max_time, sigma, rho, beta)
plt.plot(soln[:,0], soln[:, 2], color=colors[n])
plot_lorentz()
assert True # leave this to grade the plot_lorenz function
"""
Explanation: Write a function plot_lorentz that:
Solves the Lorenz system for N different initial conditions. To generate your initial conditions, draw uniform random samples for x, y and z in the range $[-15,15]$. Call np.random.seed(1) a single time at the top of your function to use the same seed each time.
Plot $[x(t),z(t)]$ using a line to show each trajectory.
Color each line using the hot colormap from Matplotlib.
Label your plot and choose an appropriate x and y limit.
The following cell shows how to generate colors that can be used for the lines:
End of explanation
"""
interact(plot_lorentz, N=(1,50,1), max_time=(1,10,1), sigma=(0.0, 50.0, 0.1), rho=(0.0, 50.0, 0.1), beta=fixed(8./3.))
"""
Explanation: Use interact to explore your plot_lorenz function with:
max_time an integer slider over the interval $[1,10]$.
N an integer slider over the interval $[1,50]$.
sigma a float slider over the interval $[0.0,50.0]$.
rho a float slider over the interval $[0.0,50.0]$.
beta fixed at a value of $8/3$.
End of explanation
"""
|
kimkipyo/dss_git_kkp
|
통계, 머신러닝 복습/160524화_7일차_기초 확률론 3 - 확률 모형 Probability Models(단변수 분포)/4.Student-t 분포.ipynb
|
mit
|
import pandas.io.data as web
symbols = ['^GDAXI', '^GSPC', 'YHOO', 'MSFT']
data = pd.DataFrame()
for sym in symbols:
data[sym] = web.DataReader(sym, data_source='yahoo', start='1/1/2006', end='12/31/2016')['Adj Close']
data = data.dropna()
(data / data.ix[0] * 100).plot()
plt.show()
log_returns = np.log(data / data.shift(1))
log_returns.hist(bins=50)
plt.show()
for i, sym in enumerate(symbols):
ax = plt.subplot(2, 2, i+1)
sp.stats.probplot(log_returns[sym].dropna(), plot=ax)
plt.tight_layout()
plt.show()
"""
Explanation: Student-t 분포
Fat tail 현상
데이터 분석 실무에서는 자연에서 발생하는 현상 중 많은 것들을 정규 분포를 따르는 확률 변수로 모형화하여 사용하고 있다. 그러나 이러한 데이터들을 자세히 살펴보면 실제로는 정규분포보다 양 끝단의 비중이 더 큰것을 알 수 있다. 이를 fat tail 현상이라고 한다.
예를 들어 주식의 수익률은 보통 정규 분포를 따르는 것으로 모형화하여 사용한다. 실제로 어느 정도 정규 분포를 따르는지 데이터에서 확인해 보자.
End of explanation
"""
xx = np.linspace(-4, 4, 100)
for df in [1, 2, 5, 10, 20]:
rv = sp.stats.t(df=df)
plt.plot(xx, rv.pdf(xx), label=("student-t (dof = %d)" % df))
plt.plot(xx, sp.stats.norm().pdf(xx), label="Normal", lw=5, alpha=0.5)
plt.legend()
"""
Explanation: 이렇게 Fat tail 을 가진 데이터 모형에 적합한 것이 다음에 정의하는 student t 분포이다.
student-t 분포
중심극한정리에 따라 무한대로 가면 정규분포가 나오지만 그게 아닌 경우에 4,5개만 구하면 student-t분포로 구해야 한다.
왜냐하면Fat tail 현상
이렇게 Fat tail 을 가진 데이터 모형에 적합한 것이 다음에 정의하는 student t 분포이다.
dof가 자유도
가우시안 정규 분포를 따르는 확률 변수 X의 n개의 샘플 x1,⋯,xn의 합(또는 평균)은 student-t 분포를 따른다.
student-t 분포의 확률 밀도 함수는 다음 수식에 의해 정의된다. 가우시안 정규 분포와 달리 정수값을 가지는 자유도(degree of freedom)라는 모수(parameter) $\nu$를 추가적으로 가진다.
$$ f(x) = \frac{\Gamma(\frac{\nu+1}{2})} {\sqrt{\nu\pi}\,\Gamma(\frac{\nu}{2})} \left(1+\frac{(x-\mu)^2}{\nu\sigma^2} \right)^{!-\frac{\nu+1}{2}} $$
이 식에서 $\Gamma$는 감마(Gamma) 함수라는 특수 함수이다.
student-t 분포의 확률 밀도 함수를 그리면 다음과 같다. SciPy stats 서브패키지의 t 클래스를 사용한다. 이 때 모수 df는 자유도, loc는 기댓값, scale은 표준 편차를 설정한다.
그림에서 자유도 $\nu$가 작으면 가우시안 정규 분포보다 분산이 크고 fat tail을 보이지만 자유도가 증가할수록 가우시안 정규 분포로 수렴하는 것을 볼 수 있다.
End of explanation
"""
|
QuantScientist/Deep-Learning-Boot-Camp
|
day02-PyTORCH-and-PyCUDA/PyCUDA/01 PyCUDA verify CUDA 8.0.ipynb
|
mit
|
# Ignore numpy warnings
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
%matplotlib inline
# Some defaults:
plt.rcParams['figure.figsize'] = (12, 6) # Default plot size
"""
Explanation: Deep Learning Bootcamp November 2017, GPU Computing for Data Scientists
<img src="../images/bcamp.png" align="center">
Using CUDA, Jupyter, PyCUDA and PyTorch
01 PyCUDA verify CUDA 8.0
Web: https://www.meetup.com/Tel-Aviv-Deep-Learning-Bootcamp/events/241762893/
Notebooks: <a href="https://github.com/QuantScientist/Data-Science-PyCUDA-GPU"> On GitHub</a>
Shlomo Kashani
<img src="../images/gtx.png" width="35%" align="center">
End of explanation
"""
%reset -f
import pycuda
from pycuda import compiler
import pycuda.driver as drv
import pycuda.driver as cuda
"""
Explanation: PyCUDA Imports
The compute unified device architecture (CUDA) is a heterogeneous sequential-parallel programming model and software environment that allows for access to the NVIDIA’s GPU resources via so-called kernels.
Several programming languages including C/C++, Fortran, and Python are supported for written kernels.
Compared to the other non-scripting languages, Python emphasizes quick development and offers a comprehensive mathematics library that has been widely adopted by scientific communities.
PyCUDA involves using Python as a wrapper to the CUDA C kernels, and features Python’s automatic memory management, error checking, and requires no user-visible compilation, which makes it very suitable for interactive testing and quick prototyping in our applications.
End of explanation
"""
drv.init()
print("%d device(s) found." % drv.Device.count())
for ordinal in range(drv.Device.count()):
dev = drv.Device(ordinal)
print "Device #%d: %s" % (ordinal, dev.name())
print " Compute Capability: %d.%d" % dev.compute_capability()
print " Total Memory: %s KB" % (dev.total_memory()//(1024))
atts = [(str(att), value)
for att, value in dev.get_attributes().iteritems()]
atts.sort()
for att, value in atts:
print " %s: %s" % (att, value)
import pycuda.autoinit
import pycuda.driver as cuda
(free,total)=cuda.mem_get_info()
print("Global memory occupancy:%f%% free"%(free*100/total))
for devicenum in range(cuda.Device.count()):
device=cuda.Device(devicenum)
attrs=device.get_attributes()
#Beyond this point is just pretty printing
print("\n===Attributes for device %d"%devicenum)
for (key,value) in attrs.iteritems():
print("%s:%s"%(str(key),str(value)))
! jupyter-nbconvert "01 PyCUDA verify CUDA 8.0.ipynb" --to slides --reveal-prefix=reveal.js --post serve --ServerPostProcessor.ip="0.0.0.0"
"""
Explanation: Available CUDA Devices
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples
|
notebooks/official/workbench/subscriber_churn_prediction/telecom-subscriber-churn-prediction.ipynb
|
apache-2.0
|
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform \
google-cloud-storage \
category_encoders \
seaborn \
sklearn \
pandas \
fsspec \
gcsfs -q
"""
Explanation: Telecom subscriber churn prediction on Vertex AI
<table align="left">
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/workbench/subscriber_churn_prediction/telecom-subscriber-churn-prediction.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/workbench/subscriber_churn_prediction/telecom-subscriber-churn-prediction.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/workbench/subscriber_churn_prediction/telecom-subscriber-churn-prediction.ipynb">
<img src="https://lh3.googleusercontent.com/UiNooY4LUgW_oTvpsNhPpQzsstV5W8F7rYgxgGBD85cWJoLmrOzhVs_ksK_vgx40SHs7jCqkTkCk=e14-rj-sc0xffffff-h130-w32" alt="Vertex AI logo">
Open in Vertex AI Workbench
</a>
</td>
</table>
<br/><br/><br/>
Table of contents
Overview
Dataset
Objective
Costs
Perform EDA
Train a logistic regression model using scikit-learn
Evaluate the trained model
Save the model to a Cloud Storage path
Create a model with Explainable AI support in Vertex AI
Get explanations from the model
Clean up
Overview
<a name="section-1"></a>
This example demonstrates building a subscriber churn prediction model on a telecom customer churn dataset. The generated churn model is further deployed to Vertex AI Endpoints and explanations are generated using the Explainable AI feature of Vertex AI.
Dataset
<a name="section-2"></a>
The dataset used in this tutorial is Telecom-Customer Churn dataset publicly available on Kaggle. See Customer Churn Prediction 2020. This dataset is used to build and deploy a churn prediction model using Vertex AI in this notebook.
Objective
<a name="section-3"></a>
This tutorial shows you how to do exploratory data analysis, preprocess data, train, deploy and get predictions from a churn prediction model on a tabular churn dataset. The objectives of this tutorial are as follows:
Load data from a Cloud Storage path
Perform exploratory data analysis (EDA)
Preprocess the data
Train a scikit-learn model
Evaluate the scikit-learn model
Save the model to a Cloud Storage path
Create a model and an endpoint in Vertex AI
Deploy the trained model to an endpoint
Generate predictions and explanations on test data from the hosted model
Undeploy the model resource
Costs
<a name="section-4"></a>
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Installation
Install the following packages to run this notebook.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
"""
Explanation: Before you begin
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the Vertex AI API.
If you are running this notebook locally, you will need to install the Cloud SDK.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands.
Set your project ID
If you don't know your project ID, you may be able to get your project ID using gcloud.
End of explanation
"""
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
! gcloud config set project $PROJECT_ID
"""
Explanation: Otherwise, set your project ID here.
End of explanation
"""
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions.
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex AI" into the filter box, and select Vertex AI Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
BUCKET_URI = "gs://" + BUCKET_NAME
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you create a model in Vertex AI using the Cloud SDK, you give a Cloud Storage path where the trained model is saved.
In this tutorial, Vertex AI saves the trained model to a Cloud Storage bucket. Using this model artifact, you can then
create Vertex AI model and endpoint resources in order to serve
online predictions.
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_URI
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_URI
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
# configure to don't display the warnings
import warnings
import category_encoders as ce
import joblib
import seaborn as sns
from google.cloud import aiplatform, storage
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, plot_roc_curve
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
warnings.filterwarnings("ignore")
"""
Explanation: Tutorial
Import required libraries
End of explanation
"""
df = pd.read_csv(
"gs://cloud-samples-data/vertex-ai/managed_notebooks/telecom_churn_prediction/train.csv"
)
print(df.shape)
df.head()
"""
Explanation: Load data from Cloud Storage using Pandas
The Telecom-Customer Churn dataset from Kaggle is made available on a public Cloud Storage bucket at:
gs://cloud-samples-data/vertex-ai/managed_notebooks/telecom_churn_prediction/train.csv
Use Pandas to read data directly from the URI.
End of explanation
"""
df.info()
"""
Explanation: Perform EDA
<a name="section-5"></a>
Check the data types and null counts of the fields.
End of explanation
"""
df["churn"].value_counts(normalize=True)
"""
Explanation: The current dataset doesn't have any null or empty fields in it.
Check the class imbalance.
End of explanation
"""
categ_cols = ["state", "area_code", "international_plan", "voice_mail_plan"]
target = "churn"
num_cols = [i for i in df.columns if i not in categ_cols and i != target]
print(len(categ_cols), len(num_cols))
"""
Explanation: There are 14% churners in the data which is not bad for training a churn prediction model. If the class imbalance seems high, oversampling or undersampling techniques can be considered to balance the class distribution.
Separate the caetgorical and numerical columns.
End of explanation
"""
for i in categ_cols:
df[i].value_counts().plot(kind="bar")
plt.title(i)
plt.show()
print(num_cols)
df["number_vmail_messages"].describe()
"""
Explanation: Plot the level distribution for the categorical columns.
End of explanation
"""
for i in num_cols:
# check the Price field's distribution
_, ax = plt.subplots(1, 2, figsize=(10, 4))
df[i].plot(kind="box", ax=ax[0])
df[i].plot(kind="hist", ax=ax[1])
plt.title(i)
plt.show()
# check pairplots for selected features
selected_features = [
"total_day_calls",
"total_eve_calls",
"number_customer_service_calls",
"number_vmail_messages",
"account_length",
"total_day_charge",
"total_eve_charge",
]
sns.pairplot(df[selected_features])
plt.show()
"""
Explanation: Check the distributions for the numerical columns.
End of explanation
"""
plt.figure(figsize=(12, 10))
sns.heatmap(df[num_cols].corr(), annot=True)
plt.show()
"""
Explanation: Plot a heat map of the correlation matrix for the numerical features.
End of explanation
"""
drop_cols = [
"total_day_charge",
"total_eve_charge",
"total_night_charge",
"total_intl_charge",
]
df.drop(columns=drop_cols, inplace=True)
num_cols = list(set(num_cols).difference(set(drop_cols)))
df.shape
"""
Explanation: Observations from EDA
There are many levels/categories in the categorical field <b>state</b>. In further steps, creating one-hot encoding vectors for this field would increase the columns drastically and so a binary encoding technique will be considered for encoding this field.
There are only 9% of customers in the data who have had international plans.
There are only a few customers who make frequent calls to customer service.
Only 25% of the customers had at least 16 voicemail messages and thus there was skewness in the distribution of the number_vmail_messages field.
Most of the feature combinations in the pair plot show a circular pattern that suggests that there is almost no correlation between the corresponding two features.
There seems to be a high correlation between minutes and charge. Either one of them can be dropped to avoid multi-collinearity or redundant features in the data.
Preprocess the data
Drop the fields corresponding to the highly-correlated features.
End of explanation
"""
encoder = ce.BinaryEncoder(cols=["state"], return_df=True)
data_encoded = encoder.fit_transform(df)
data_encoded.head()
"""
Explanation: Binary encode the state feature (as there are many levels/categories).
End of explanation
"""
def encode_cols(data, col):
# Creating a dummy variable for the variable 'CategoryID' and dropping the first one.
categ = pd.get_dummies(data[col], prefix=col, drop_first=True)
# Adding the results to the master dataframe
data = pd.concat([data, categ], axis=1)
return data
for i in categ_cols + [target]:
if i != "state":
data_encoded = encode_cols(data_encoded, i)
data_encoded.drop(columns=[i], inplace=True)
data_encoded.shape
"""
Explanation: One-hot encode (drop the first level-column to avoid dummy-variable trap scenarios) the remaining categorical variables.
End of explanation
"""
data_encoded.head()
"""
Explanation: Check the data.
End of explanation
"""
data_encoded.columns
"""
Explanation: Check the columns.
End of explanation
"""
X = data_encoded[[i for i in data_encoded.columns if i not in ["churn_yes"]]].copy()
y = data_encoded["churn_yes"].copy()
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
print(X_train.shape, X_test.shape)
"""
Explanation: Split the data into train and test sets.
End of explanation
"""
sc = MinMaxScaler()
X_train.loc[:, num_cols] = sc.fit_transform(X_train[num_cols])
X_test.loc[:, num_cols] = sc.transform(X_test[num_cols])
"""
Explanation: Scale the numerical data using MinMaxScaler.
End of explanation
"""
model = LogisticRegression(class_weight="balanced")
model = model.fit(X_train, y_train)
"""
Explanation: Train a logistic regression model using scikit-learn
<a name="section-6"></a>
The argument class_weight adjusts the class weights to the target feature.
End of explanation
"""
plot_roc_curve(model, X_train, y_train, drop_intermediate=False)
plt.show()
# plot the ROC for the model on test data
plot_roc_curve(model, X_test, y_test, drop_intermediate=False)
plt.show()
"""
Explanation: Evaluate the trained model
<a name="section-7"></a>
Plot the ROC and show AUC on train and test sets
Plot the ROC for the model on train data.
End of explanation
"""
y_train_pred = model.predict_proba(X_train)[:, 1]
numbers = [float(x) / 10 for x in range(10)]
y_train_pred_df = pd.DataFrame({"true": y_train, "pred": y_train_pred})
for i in numbers:
y_train_pred_df[i] = y_train_pred_df.pred.map(lambda x: 1 if x > i else 0)
"""
Explanation: Determine the optimal threshold for the binary classification
In general, the logistic regression model outputs probability scores between 0 and 1 and a threshold needs to be determined to assign a class label. Depending on the sensitivity (true-positive rate) and specificity (true-negative rate) of the model, an optimal threshold can be determined.
Create columns with 10 different probability cutoffs.
End of explanation
"""
cutoff_df = pd.DataFrame(columns=["prob", "accuracy", "sensitivity", "specificity"])
# compute the parameters for each threshold considered
for i in numbers:
cm1 = confusion_matrix(y_train_pred_df.true, y_train_pred_df[i])
total1 = sum(sum(cm1))
accuracy = (cm1[0, 0] + cm1[1, 1]) / total1
speci = cm1[0, 0] / (cm1[0, 0] + cm1[0, 1])
sensi = cm1[1, 1] / (cm1[1, 0] + cm1[1, 1])
cutoff_df.loc[i] = [i, accuracy, sensi, speci]
# Let's plot accuracy sensitivity and specificity for various probabilities.
cutoff_df.plot.line(x="prob", y=["accuracy", "sensitivity", "specificity"])
plt.title("Comparison of performance across various thresholds")
plt.show()
"""
Explanation: Now calculate accuracy, sensitivity, and specificity for various probability cutoffs.
End of explanation
"""
threshold = 0.5
# Evaluate train and test sets
y_test_pred = model.predict_proba(X_test)[:, 1]
# to get the performance stats, lets define a handy function
def print_stats(y_true, y_pred):
# Confusion matrix
confusion = confusion_matrix(y_true=y_true, y_pred=y_pred)
print("Confusion Matrix: ")
print(confusion)
TP = confusion[1, 1] # true positive
TN = confusion[0, 0] # true negatives
FP = confusion[0, 1] # false positives
FN = confusion[1, 0] # false negatives
# Let's see the sensitivity or recall of our logistic regression model
sensitivity = TP / float(TP + FN)
print("sensitivity = ", sensitivity)
# Let us calculate specificity
specificity = TN / float(TN + FP)
print("specificity = ", specificity)
# Calculate false postive rate - predicting conversion when customer didn't convert
fpr = FP / float(TN + FP)
print("False positive rate = ", fpr)
# positive predictive value
precision = TP / float(TP + FP)
print("precision = ", precision)
# accuracy
accuracy = (TP + TN) / (TP + TN + FP + FN)
print("accuracy = ", accuracy)
return
y_train_pred_sm = [1 if i > threshold else 0 for i in y_train_pred]
y_test_pred_sm = [1 if i > threshold else 0 for i in y_test_pred]
# Print the metrics for the model
# on train data
print("Train Data : ")
print_stats(y_train, y_train_pred_sm)
print("\n", "*" * 30, "\n")
# on test data
print("Test Data : ")
print_stats(y_test, y_test_pred_sm)
"""
Explanation: In general, a model with balanced sensitivity and specificity is preferred. In the current case, the threshold where the sensitivity and specifity curves intersect can be considered an optimal threshold.
End of explanation
"""
FILE_NAME = "model.joblib"
joblib.dump(model, FILE_NAME)
# Upload the saved model file to Cloud Storage
BLOB_PATH = (
"[your-blob-path]" # leave blank if no folders inside the bucket are needed.
)
if BLOB_PATH == ("[your-blob-path]"):
BLOB_PATH = ""
BLOB_NAME = BLOB_PATH + FILE_NAME
bucket = storage.Client().bucket(BUCKET_NAME)
blob = bucket.blob(BLOB_NAME)
blob.upload_from_filename(FILE_NAME)
"""
Explanation: While the model's sensitivity and specificity are looking decent, the precision can be considered low. This type of situation may be acceptable to some extent because from a business standpoint in the telecom industry, it still makes sense to identify churners even though it means there'd be some mis-classifications of non-churners as churners.
Save the model to a Cloud Storage path
<a name="section-8"></a>
Save the trained model to a local file model.joblib.
End of explanation
"""
# Set the model display name
MODEL_DISPLAY_NAME = "[your-model-display-name]" # @param {type:"string"}
if MODEL_DISPLAY_NAME == "[your-model-display-name]":
MODEL_DISPLAY_NAME = "subscriber_churn_model"
ARTIFACT_GCS_PATH = f"gs://{BUCKET_NAME}/{BLOB_PATH}"
# Feature-name(Inp_feature) and Output-name(Model_output) can be arbitrary
exp_metadata = {"inputs": {"Inp_feature": {}}, "outputs": {"Model_output": {}}}
from google.cloud.aiplatform_v1.types import SampledShapleyAttribution
# Create a Vertex AI model resource with support for explanations
from google.cloud.aiplatform_v1.types.explanation import ExplanationParameters
aiplatform.init(project=PROJECT_ID, location=REGION)
model = aiplatform.Model.upload(
display_name=MODEL_DISPLAY_NAME,
artifact_uri=ARTIFACT_GCS_PATH,
serving_container_image_uri="us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-0:latest",
explanation_metadata=exp_metadata,
explanation_parameters=ExplanationParameters(
sampled_shapley_attribution=SampledShapleyAttribution(path_count=25)
),
)
model.wait()
print(model.display_name)
print(model.resource_name)
"""
Explanation: Create a model with Explainable AI support in Vertex AI
<a name="section-9"></a>
Before creating a model, configure the explanations for the model. For further details, see Configuring explanations in Vertex AI.
Set a display name for the model resource.
End of explanation
"""
ENDPOINT_DISPLAY_NAME = "[your-endpoint-display-name]" # @param {type:"string"}
if ENDPOINT_DISPLAY_NAME == "[your-endpoint-display-name]":
ENDPOINT_DISPLAY_NAME = "subsc_churn_endpoint"
endpoint = aiplatform.Endpoint.create(
display_name=ENDPOINT_DISPLAY_NAME, project=PROJECT_ID, location=REGION
)
print(endpoint.display_name)
print(endpoint.resource_name)
"""
Explanation: Alternatively, the following gcloud command can be used to create the model resource. The explanation-metadata.json file consists of the metadata that is used to configure explanations for the model resource.
gcloud beta ai models upload \
--region=$REGION \
--display-name=$MODEL_DISPLAY_NAME \
--container-image-uri="us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-0:latest" \
--artifact-uri=$ARTIFACT_GCS_PATH \
--explanation-method=sampled-shapley \
--explanation-path-count=25 \
--explanation-metadata-file=explanation-metadata.json
Create an endpoint
End of explanation
"""
DEPLOYED_MODEL_NAME = "[deployment-model-name]" # @param {type:"string"}
MACHINE_TYPE = "n1-standard-4"
if DEPLOYED_MODEL_NAME == "[deployment-model-name]":
DEPLOYED_MODEL_NAME = "subsc_churn_deployment"
# deploy the model to the endpoint
model.deploy(
endpoint=endpoint,
deployed_model_display_name=DEPLOYED_MODEL_NAME,
machine_type=MACHINE_TYPE,
)
model.wait()
print(model.display_name)
print(model.resource_name)
"""
Explanation: Deploy the model to the created endpoint
Configure the depoyment name, machine-type, and other parameters for the deployment.
End of explanation
"""
endpoint.list_models()
"""
Explanation: To ensure the model is deployed, the ID of the deployed model can be checked using the endpoint.list_models() method.
End of explanation
"""
# format a test instance as the request's payload
test_json = [X_test.iloc[0].tolist()]
"""
Explanation: Get explanations from the deployed model
<a name="section-10"></a>
Get explanations for a test instance from the hosted model.
End of explanation
"""
features = X_train.columns.to_list()
def plot_attributions(attrs):
"""
Function to plot the features and their attributions for an instance
"""
rows = {"feature_name": [], "attribution": []}
for i, val in enumerate(features):
rows["feature_name"].append(val)
rows["attribution"].append(attrs["Inp_feature"][i])
attr_df = pd.DataFrame(rows).set_index("feature_name")
attr_df.plot(kind="bar")
plt.show()
return
def explain_tabular_sample(project: str, location: str, endpoint, instances: list):
"""
Function to make an explanation request for the specified payload and generate feature attribution plots
"""
aiplatform.init(project=project, location=location)
# endpoint = aiplatform.Endpoint(endpoint_id)
response = endpoint.explain(instances=instances)
print("#" * 10 + "Explanations" + "#" * 10)
for explanation in response.explanations:
print(" explanation")
# Feature attributions.
attributions = explanation.attributions
for attribution in attributions:
print(" attribution")
print(" baseline_output_value:", attribution.baseline_output_value)
print(" instance_output_value:", attribution.instance_output_value)
print(" output_display_name:", attribution.output_display_name)
print(" approximation_error:", attribution.approximation_error)
print(" output_name:", attribution.output_name)
output_index = attribution.output_index
for output_index in output_index:
print(" output_index:", output_index)
plot_attributions(attribution.feature_attributions)
print("#" * 10 + "Predictions" + "#" * 10)
for prediction in response.predictions:
print(prediction)
return response
# Get explanations for the test instance
prediction = explain_tabular_sample(PROJECT_ID, REGION, endpoint, test_json)
"""
Explanation: Get explanations and plot the feature attributions
End of explanation
"""
# Undeploy model
endpoint.undeploy_all()
# Delete the endpoint
endpoint.delete()
# Delete the model
model.delete()
# Delete the Cloud Storage bucket
delete_bucket = True
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil -m rm -r $BUCKET_URI
"""
Explanation: Clean up
<a name="section-11"></a>
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
* Vertex AI Model
* Vertex AI Endpoint
* Cloud Storage bucket
Set delete_bucket to True to delete the Cloud Storage bucket.
End of explanation
"""
|
brunoalano/hdbscan
|
notebooks/Benchmarking scalability of clustering implementations-v0.7.ipynb
|
bsd-3-clause
|
import hdbscan
import debacl
import fastcluster
import sklearn.cluster
import scipy.cluster
import sklearn.datasets
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set_context('poster')
sns.set_palette('Paired', 10)
sns.set_color_codes()
"""
Explanation: Benchmarking Performance and Scaling of Python Clustering Algorithms
There are a host of different clustering algorithms and implementations thereof for Python. The performance and scaling can depend as much on the implementation as the underlying algorithm. Obviously a well written implementation in C or C++ will beat a naive implementation on pure Python, but there is more to it than just that. The internals and data structures used can have a large impact on performance, and can even significanty change asymptotic performance. All of this means that, given some amount of data that you want to cluster your options as to algorithm and implementation maybe significantly constrained. I'm both lazy, and prefer empirical results for this sort of thing, so rather than analyzing the implementations and deriving asymptotic performance numbers for various implementations I'm just going to run everything and see what happens.
To begin with we need to get together all the clustering implementations, along with some plotting libraries so we can see what is going on once we've got data. Obviously this is not an exhaustive collection of clustering implementations, so if I've left off your favourite I apologise, but one has to draw a line somewhere.
The implementations being test are:
Sklearn (which implements several algorithms):
K-Means clustering
DBSCAN clustering
Agglomerative clustering
Spectral clustering
Affinity Propagation
Scipy (which provides basic algorithms):
K-Means clustering
Agglomerative clustering
Fastcluster (which provides very fast agglomerative clustering in C++)
DeBaCl (Density Based Clustering; similar to a mix of DBSCAN and Agglomerative)
HDBSCAN (A robust hierarchical version of DBSCAN)
Obviously a major factor in performance will be the algorithm itself. Some algorithms are simply slower -- often, but not always, because they are doing more work to provide a better clustering.
End of explanation
"""
def benchmark_algorithm(dataset_sizes, cluster_function, function_args, function_kwds,
dataset_dimension=10, dataset_n_clusters=10, max_time=45, sample_size=2):
# Initialize the result with NaNs so that any unfilled entries
# will be considered NULL when we convert to a pandas dataframe at the end
result = np.nan * np.ones((len(dataset_sizes), sample_size))
for index, size in enumerate(dataset_sizes):
for s in range(sample_size):
# Use sklearns make_blobs to generate a random dataset with specified size
# dimension and number of clusters
data, labels = sklearn.datasets.make_blobs(n_samples=size,
n_features=dataset_dimension,
centers=dataset_n_clusters)
# Start the clustering with a timer
start_time = time.time()
cluster_function(data, *function_args, **function_kwds)
time_taken = time.time() - start_time
# If we are taking more than max_time then abort -- we don't
# want to spend excessive time on slow algorithms
if time_taken > max_time:
result[index, s] = time_taken
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size),
result.flatten()]).T, columns=['x','y'])
else:
result[index, s] = time_taken
# Return the result as a dataframe for easier handling with seaborn afterwards
return pd.DataFrame(np.vstack([dataset_sizes.repeat(sample_size),
result.flatten()]).T, columns=['x','y'])
"""
Explanation: Now we need some benchmarking code at various dataset sizes. Because some clustering algorithms have performance that can vary quite a lot depending on the exact nature of the dataset we'll also need to run several times on randomly generated datasets of each size so as to get a better idea of the average case performance.
We also need to generalise over algorithms which don't necessarily all have the same API. We can resolve that by taking a clustering function, argument tuple and keywords dictionary to let us do semi-arbitrary calls (fortunately all the algorithms do at least take the dataset to cluster as the first parameter).
Finally some algorithms scale poorly, and I don't want to spend forever doing clustering of random datasets so we'll cap the maximum time an algorithm can use; once it has taken longer than max time we'll just abort there and leave the remaining entries in our datasize by samples matrix unfilled.
In the end this all amounts to a fairly straightforward set of nested loops (over datasizes and number of samples) with calls to sklearn to generate mock data and the clustering function inside a timer. Add in some early abort and we're done.
End of explanation
"""
dataset_sizes = np.hstack([np.arange(1, 6) * 500, np.arange(3,7) * 1000, np.arange(4,17) * 2000])
"""
Explanation: Comparison of all ten implementations
Now we need a range of dataset sizes to test out our algorithm. Since the scaling performance is wildly different over the ten implementations we're going to look at it will be beneficial to have a number of very small dataset sizes, and increasing spacing as we get larger, spanning out to 32000 datapoints to cluster (to begin with). Numpy provides convenient ways to get this done via arange and vector multiplication. We'll start with step sizes of 500, then shift to steps of 1000 past 3000 datapoints, and finally steps of 2000 past 6000 datapoints.
End of explanation
"""
k_means = sklearn.cluster.KMeans(10)
k_means_data = benchmark_algorithm(dataset_sizes, k_means.fit, (), {})
dbscan = sklearn.cluster.DBSCAN(eps=1.25)
dbscan_data = benchmark_algorithm(dataset_sizes, dbscan.fit, (), {})
scipy_k_means_data = benchmark_algorithm(dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {})
scipy_single_data = benchmark_algorithm(dataset_sizes,
scipy.cluster.hierarchy.single, (), {})
fastclust_data = benchmark_algorithm(dataset_sizes,
fastcluster.linkage_vector, (), {})
hdbscan_ = hdbscan.HDBSCAN()
hdbscan_data = benchmark_algorithm(dataset_sizes, hdbscan_.fit, (), {})
debacl_data = benchmark_algorithm(dataset_sizes,
debacl.geom_tree.geomTree, (5, 5), {'verbose':False})
agglomerative = sklearn.cluster.AgglomerativeClustering(10)
agg_data = benchmark_algorithm(dataset_sizes,
agglomerative.fit, (), {}, sample_size=4)
spectral = sklearn.cluster.SpectralClustering(10)
spectral_data = benchmark_algorithm(dataset_sizes,
spectral.fit, (), {}, sample_size=6)
affinity_prop = sklearn.cluster.AffinityPropagation()
ap_data = benchmark_algorithm(dataset_sizes,
affinity_prop.fit, (), {}, sample_size=3)
"""
Explanation: Now it is just a matter of running all the clustering algorithms via our benchmark function to collect up all the requsite data. This could be prettier, rolled up into functions appropriately, but sometimes brute force is good enough. More importantly (for me) since this can take a significant amount of compute time, I wanted to be able to comment out algorithms that were slow or I was uninterested in easily. Which brings me to a warning for you the reader and potential user of the notebook: this next step is very expensive. We are running ten different clustering algorithms multiple times each on twenty two different dataset sizes -- and some of the clustering algorithms are slow (we are capping out at forty five seconds per run). That means that the next cell can take an hour or more to run. That doesn't mean "Don't try this at home" (I actually encourage you to try this out yourself and play with dataset parameters and clustering parameters) but it does mean you should be patient if you're going to!
End of explanation
"""
sns.regplot(x='x', y='y', data=k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=hdbscan_data, order=2,
label='HDBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=scipy_single_data, order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=debacl_data, order=2,
label='DeBaCl Geom Tree', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=spectral_data, order=2,
label='Sklearn Spectral', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=agg_data, order=2,
label='Sklearn Agglomerative', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=ap_data, order=2,
label='Sklearn Affinity Propagation', x_estimator=np.mean)
plt.gca().axis([0, 34000, 0, 120])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Clustering Implementations')
plt.legend()
"""
Explanation: Now we need to plot the results so we can see what is going on. The catch is that we have several datapoints for each dataset size and ultimately we would like to try and fit a curve through all of it to get the general scaling trend. Fortunately seaborn comes to the rescue here by providing regplot which plots a regression through a dataset, supports higher order regression (we should probably use order two as most algorithms are effectively quadratic) and handles multiple datapoints for each x-value cleanly (using the x_estimator keyword to put a point at the mean and draw an error bar to cover the range of data).
End of explanation
"""
large_dataset_sizes = np.arange(1,16) * 4000
hdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')
large_hdbscan_boruvka_data = benchmark_algorithm(large_dataset_sizes,
hdbscan_boruvka.fit, (), {},
max_time=90, sample_size=1)
k_means = sklearn.cluster.KMeans(10)
large_k_means_data = benchmark_algorithm(large_dataset_sizes,
k_means.fit, (), {},
max_time=90, sample_size=1)
dbscan = sklearn.cluster.DBSCAN(eps=1.25, min_samples=5)
large_dbscan_data = benchmark_algorithm(large_dataset_sizes,
dbscan.fit, (), {},
max_time=90, sample_size=1)
large_fastclust_data = benchmark_algorithm(large_dataset_sizes,
fastcluster.linkage_vector, (), {},
max_time=90, sample_size=1)
large_scipy_k_means_data = benchmark_algorithm(large_dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {},
max_time=90, sample_size=1)
large_scipy_single_data = benchmark_algorithm(large_dataset_sizes,
scipy.cluster.hierarchy.single, (), {},
max_time=90, sample_size=1)
"""
Explanation: A few features stand out. First of all there appear to be essentially two classes of implementation, with DeBaCl being an odd case that falls in the middle. The fast implementations tend to be implementations of single linkage agglomerative clustering, K-means, and DBSCAN. The slow cases are largely from sklearn and include agglomerative clustering (in this case using Ward instead of single linkage).
For practical purposes this means that if you have much more than 10000 datapoints your clustering options are significantly constrained: sklearn spectral, agglomerative and affinity propagation are going to take far too long. DeBaCl may still be an option, but given that the hdbscan library provides "robust single linkage clustering" equivalent to what DeBaCl is doing (and with effectively the same runtime as hdbscan as it is a subset of that algorithm) it is probably not the best choice for large dataset sizes.
So let's drop out those slow algorithms so we can scale out a little further and get a closer look at the various algorithms that managed 32000 points in under thirty seconds. There is almost undoubtedly more to learn as we get ever larger dataset sizes.
Comparison of fast implementations
Let's compare the six fastest implementations now. We can scale out a little further as well; based on the curves above it looks like we should be able to comfortably get to 60000 data points without taking much more than a minute per run. We can also note that most of these implementations weren't that noisy so we can get away with a single run per dataset size.
End of explanation
"""
sns.regplot(x='x', y='y', data=large_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2,
label='HDBSCAN Boruvka', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_single_data, order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
plt.gca().axis([0, 64000, 0, 150])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Fastest Clustering Implementations')
plt.legend()
"""
Explanation: Again we can use seaborn to do curve fitting and plotting, exactly as before.
End of explanation
"""
large_scipy_single_data.tail(10)
"""
Explanation: Clearly something has gone woefully wrong with the curve fitting for the scipy single linkage implementation, but what exactly? If we look at the raw data we can see.
End of explanation
"""
size_of_array = 44000 * (44000 - 1) / 2 # from pdist documentation
bytes_in_array = size_of_array * 8 # Since doubles use 8 bytes
gigabytes_used = bytes_in_array / (1024.0 ** 3) # divide out to get the number of GB
gigabytes_used
"""
Explanation: It seems that at around 44000 points we hit a wall and the runtimes spiked. A hint is that I'm running this on a laptop with 8GB of RAM. Both single linkage algorithms use scipy.spatial.pdist to compute pairwise distances between points, which returns an array of shape (n(n-1)/2, 1) of doubles. A quick computation shows that that array of distances is quite large once we nave 44000 points:
End of explanation
"""
sns.regplot(x='x', y='y', data=large_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_hdbscan_boruvka_data, order=2,
label='HDBSCAN Boruvka', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_fastclust_data, order=2,
label='Fastcluster Single Linkage', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=large_scipy_single_data[:8], order=2,
label='Scipy Single Linkage', x_estimator=np.mean)
plt.gca().axis([0, 64000, 0, 150])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of Fastest Clustering Implementations')
plt.legend()
"""
Explanation: If we assume that my laptop is keeping much other than that distance array in RAM then clearly we are going to spend time paging out the distance array to disk and back and hence we will see the runtimes increase dramatically as we become disk IO bound. If we just leave off the last element we can get a better idea of the curve, but keep in mind that the scipy single linkage implementation does not scale past a limit set by your available RAM.
End of explanation
"""
huge_dataset_sizes = np.arange(1,11) * 20000
k_means = sklearn.cluster.KMeans(10)
huge_k_means_data = benchmark_algorithm(huge_dataset_sizes,
k_means.fit, (), {},
max_time=120, sample_size=2, dataset_dimension=10)
dbscan = sklearn.cluster.DBSCAN(eps=1.5)
huge_dbscan_data = benchmark_algorithm(huge_dataset_sizes,
dbscan.fit, (), {},
max_time=120, sample_size=2, dataset_dimension=10)
huge_scipy_k_means_data = benchmark_algorithm(huge_dataset_sizes,
scipy.cluster.vq.kmeans, (10,), {},
max_time=120, sample_size=2, dataset_dimension=10)
hdbscan_boruvka = hdbscan.HDBSCAN(algorithm='boruvka_kdtree')
huge_hdbscan_data = benchmark_algorithm(huge_dataset_sizes,
hdbscan_boruvka.fit, (), {},
max_time=240, sample_size=4, dataset_dimension=10)
huge_fastcluster_data = benchmark_algorithm(huge_dataset_sizes,
fastcluster.linkage_vector, (), {},
max_time=240, sample_size=2, dataset_dimension=10)
sns.regplot(x='x', y='y', data=huge_k_means_data, order=2,
label='Sklearn K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_dbscan_data, order=2,
label='Sklearn DBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_scipy_k_means_data, order=2,
label='Scipy K-Means', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_hdbscan_data, order=2,
label='HDBSCAN', x_estimator=np.mean)
sns.regplot(x='x', y='y', data=huge_fastcluster_data, order=2,
label='Fastcluster', x_estimator=np.mean)
plt.gca().axis([0, 200000, 0, 240])
plt.gca().set_xlabel('Number of data points')
plt.gca().set_ylabel('Time taken to cluster (s)')
plt.title('Performance Comparison of K-Means and DBSCAN')
plt.legend()
"""
Explanation: If we're looking for scaling we can write off the scipy single linkage implementation -- if even we didn't hit the RAM limit the $O(n^2)$ scaling is going to quickly catch up with us. Fastcluster has the same asymptotic scaling, but is heavily optimized to being the constant down much lower -- at this point it is still keeping close to the faster algorithms. It's asymtotics will still catch up with it eventually however.
In practice this is going to mean that for larger datasets you are going to be very constrained in what algorithms you can apply: if you get enough datapoints only K-Means, DBSCAN, and HDBSCAN will be left. This is somewhat disappointing, paritcularly as K-Means is not a particularly good clustering algorithm, paricularly for exploratory data analysis.
With this in mind it is worth looking at how these last several implementations perform at much larger sizes, to see, for example, when fastscluster starts to have its asymptotic complexity start to pull it away.
Comparison of high performance implementations
At this point we can scale out to 200000 datapoints easily enough, so let's push things at least that far so we can start to really see scaling effects.
End of explanation
"""
import statsmodels.formula.api as sm
time_samples = [1000, 2000, 5000, 10000, 25000, 50000, 75000, 100000, 250000, 500000, 750000,
1000000, 2500000, 5000000, 10000000, 50000000, 100000000, 500000000, 1000000000]
def get_timing_series(data, quadratic=True):
if quadratic:
data['x_squared'] = data.x**2
model = sm.ols('y ~ x + x_squared', data=data).fit()
predictions = [model.params.dot([1.0, i, i**2]) for i in time_samples]
return pd.Series(predictions, index=pd.Index(time_samples))
else: # assume n log(n)
data['xlogx'] = data.x * np.log(data.x)
model = sm.ols('y ~ x + xlogx', data=data).fit()
predictions = [model.params.dot([1.0, i, i*np.log(i)]) for i in time_samples]
return pd.Series(predictions, index=pd.Index(time_samples))
"""
Explanation: Now the some differences become clear. The asymptotic complexity starts to kick in with fastcluster failing to keep up. In turn HDBSCAN and DBSCAN, while having sub-$O(n^2)$ complexity, can't achieve $O(n \log(n))$ at this dataset dimension, and start to curve upward precipitously. Finally it demonstrates again how much of a difference implementation can make: the sklearn implementation of K-Means is far better than the scipy implementation. Since HDBSCAN clustering is a lot better than K-Means (unless you have good reasons to assume that the clusters partition your data and are all drawn from Gaussian distributions) and the scaling is still pretty good I would suggest that unless you have a truly stupendous amount of data you wish to cluster then the HDBSCAN implementation is a good choice.
But should I get a coffee?
So we know which implementations scale and which don't; a more useful thing to know in practice is, given a dataset, what can I run interactively? What can I run while I go and grab some coffee? How about a run over lunch? What if I'm willing to wait until I get in tomorrow morning? Each of these represent significant breaks in productivity -- once you aren't working interactively anymore your productivity drops measurably, and so on.
We can build a table for this. To start we'll need to be able to approximate how long a given clustering implementation will take to run. Fortunately we already gathered a lot of that data; if we load up the statsmodels package we can fit the data (with a quadratic or $n\log n$ fit depending on the implementation; DBSCAN and HDBSCAN get caught here, since while they are under $O(n^2)$ scaling, they don't have an easily described model, so I'll model them as $n^2$ for now) and use the resulting model to make our predictions. Obviously this has some caveats: if you fill your RAM with a distance matrix your runtime isn't going to fit the curve.
I've hand built a time_samples list to give a reasonable set of potential data sizes that are nice and human readable. After that we just need a function to fit and build the curves.
End of explanation
"""
ap_timings = get_timing_series(ap_data)
spectral_timings = get_timing_series(spectral_data)
agg_timings = get_timing_series(agg_data)
debacl_timings = get_timing_series(debacl_data)
fastclust_timings = get_timing_series(large_fastclust_data.ix[:10,:].copy())
scipy_single_timings = get_timing_series(large_scipy_single_data.ix[:10,:].copy())
hdbscan_boruvka = get_timing_series(huge_hdbscan_data, quadratic=True)
#scipy_k_means_timings = get_timing_series(huge_scipy_k_means_data, quadratic=False)
dbscan_timings = get_timing_series(huge_dbscan_data, quadratic=True)
k_means_timings = get_timing_series(huge_k_means_data, quadratic=False)
timing_data = pd.concat([ap_timings, spectral_timings, agg_timings, debacl_timings,
scipy_single_timings, fastclust_timings, hdbscan_boruvka,
dbscan_timings, k_means_timings
], axis=1)
timing_data.columns=['AffinityPropagation', 'Spectral', 'Agglomerative',
'DeBaCl', 'ScipySingleLinkage', 'Fastcluster',
'HDBSCAN', 'DBSCAN', 'SKLearn KMeans'
]
def get_size(series, max_time):
return series.index[series < max_time].max()
datasize_table = pd.concat([
timing_data.apply(get_size, max_time=30),
timing_data.apply(get_size, max_time=300),
timing_data.apply(get_size, max_time=3600),
timing_data.apply(get_size, max_time=8*3600)
], axis=1)
datasize_table.columns=('Interactive', 'Get Coffee', 'Over Lunch', 'Overnight')
datasize_table
"""
Explanation: Now we run that for each of our pre-existing datasets to extrapolate out predicted performance on the relevant dataset sizes. A little pandas wrangling later and we've produced a table of roughly how large a dataset you can tackle in each time frame with each implementation. I had to leave out the scipy KMeans timings because the noise in timing results caused the model to be unrealistic at larger data sizes. Note how the $O(n\log n)$ algorithms utterly dominate here. In the meantime, for medium sizes data sets you can still get quite a lot done with HDBSCAN.
End of explanation
"""
|
srcole/qwm
|
burrito/Burrito_Rankings.ipynb
|
mit
|
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style("white")
"""
Explanation: San Diego Burrito Analytics: Rankings
Scott Cole
21 May 2016
This notebook ranks each taco shop along each dimension
imports
End of explanation
"""
import util
df = util.load_burritos()
N = df.shape[0]
"""
Explanation: Load data
End of explanation
"""
m_Location = ['Location','N','Yelp','Google','Hunger','Cost','Volume','Tortilla','Temp','Meat','Fillings','Meat:filling',
'Uniformity','Salsa','Synergy','Wrap','overall']
# Calculate the mean of each of the metrics above for each taco shop
tacoshops = df.Location.unique()
TS = len(tacoshops)
dfmean = pd.DataFrame(np.nan, index=range(TS), columns=m_Location)
for ts in range(TS):
dfmean.loc[ts] = df.loc[df.Location==tacoshops[ts]].mean()
dfmean['N'][ts] = sum(df.Location == tacoshops[ts])
dfmean.Location = tacoshops
Ncutoff = 5
dfToRank = dfmean.loc[dfmean.N>=Ncutoff]
dfToRank
m_Rank = ['Location','Cost','Volume','Tortilla','Temp','Meat','Fillings','Meat:filling', 'Uniformity','Salsa','Synergy','Wrap','overall']
TS = len(dfToRank)
dfRanked = pd.DataFrame(np.nan, index=range(TS), columns=m_Rank)
dfRanked.Location[:] = dfToRank.Location
for m in m_Rank[1:]:
if m == 'Cost':
dfRanked[m][:] = dfToRank[m].rank(ascending=1)
else:
dfRanked[m][:] = dfToRank[m].rank(ascending=0)
dfRanked
"""
Explanation: Average each metric over each Location
End of explanation
"""
|
vangj/py-bbn
|
jupyter/some-features.ipynb
|
apache-2.0
|
import json
from pybbn.graph.variable import Variable
from pybbn.graph.node import BbnNode
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.dag import Bbn
a = BbnNode(Variable(0, 'a', ['t', 'f']), [0.2, 0.8])
b = BbnNode(Variable(1, 'b', ['t', 'f']), [0.1, 0.9, 0.9, 0.1])
bbn = Bbn().add_node(a).add_node(b) \
.add_edge(Edge(a, b, EdgeType.DIRECTED))
# serialize to JSON file
s = json.dumps(Bbn.to_dict(bbn))
with open('simple-bbn.json', 'w') as f:
f.write(s)
print(bbn)
"""
Explanation: Showing some additional features
Serialization-deserialization (serde) a BBN
Let's serialize (save) a BBN and deserialize it for later use. The BBN is a simple two node network $a \rightarrow b$.
Serializing
End of explanation
"""
# deserialize from JSON file
with open('simple-bbn.json', 'r') as f:
d = json.loads(f.read())
bbn = Bbn.from_dict(d)
print(bbn)
"""
Explanation: Deserializing
End of explanation
"""
from pybbn.pptc.inferencecontroller import InferenceController
from pybbn.graph.jointree import JoinTree
a = BbnNode(Variable(0, 'a', ['t', 'f']), [0.2, 0.8])
b = BbnNode(Variable(1, 'b', ['t', 'f']), [0.1, 0.9, 0.9, 0.1])
bbn = Bbn().add_node(a).add_node(b) \
.add_edge(Edge(a, b, EdgeType.DIRECTED))
jt = InferenceController.apply(bbn)
with open('simple-join-tree.json', 'w') as f:
d = JoinTree.to_dict(jt)
j = json.dumps(d, sort_keys=True, indent=2)
f.write(j)
print(jt)
"""
Explanation: Serde a join tree
Serializing
End of explanation
"""
with open('simple-join-tree.json', 'r') as f:
j = f.read()
d = json.loads(j)
jt = JoinTree.from_dict(d)
jt = InferenceController.apply_from_serde(jt)
print(jt)
"""
Explanation: Deserializing
End of explanation
"""
# you have built a BBN
a = BbnNode(Variable(0, 'a', ['t', 'f']), [0.2, 0.8])
b = BbnNode(Variable(1, 'b', ['t', 'f']), [0.1, 0.9, 0.9, 0.1])
bbn = Bbn().add_node(a).add_node(b) \
.add_edge(Edge(a, b, EdgeType.DIRECTED))
# you have built a junction tree from the BBN
# let's call this "original" junction tree the left-hand side (lhs) junction tree
lhs_jt = InferenceController.apply(bbn)
# you may just update the CPTs with the original junction tree structure
# the algorithm to find/build the junction tree is avoided
# the CPTs are updated
rhs_jt = InferenceController.reapply(lhs_jt, {0: [0.3, 0.7], 1: [0.2, 0.8, 0.8, 0.2]})
# let's print out the marginal probabilities and see how things changed
# print the marginal probabilities for the lhs junction tree
print('lhs probabilities')
for node in lhs_jt.get_bbn_nodes():
potential = lhs_jt.get_bbn_potential(node)
print(node)
print(potential)
print('>')
# print the marginal probabilities for the rhs junction tree
print('rhs probabilities')
for node in rhs_jt.get_bbn_nodes():
potential = rhs_jt.get_bbn_potential(node)
print(node)
print(potential)
print('>')
"""
Explanation: Updating the conditional probability tables (CPTs) of a BBN nodes in a junction tree
Sometimes, you may want to update the CPTs of BBN nodes in a junction tree; the junction tree may be expensive to build for large graph, and you want to avoid building it since it is a computationally expensive procedure. Below, we show how to reuse a junction tree with updated CPTs.
End of explanation
"""
|
probml/pyprobml
|
notebooks/book1/09/naive_bayes_mnist_jax.ipynb
|
mit
|
import numpy as np
try:
import torchvision
except ModuleNotFoundError:
%pip install -qq torchvision
import torchvision
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
!mkdir figures # for saving plots
key = jax.random.PRNGKey(1)
# helper function to show images
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
# modified from https://raw.githubusercontent.com/d2l-ai/d2l-en/master/d2l/torch.py
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
img = np.array(img)
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
"""
Explanation: Please find torch implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/09/naive_bayes_mnist_torch.ipynb
<a href="https://colab.research.google.com/github/arpitvaghela/probml-notebooks/blob/main/notebooks-d2l/naive_bayes_mnist_jax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Naive Bayes classifiers
We show how to implement Naive Bayes classifiers from scratch.
We use binary features, and 2 classes.
Based on sec 18.9 of http://d2l.ai/chapter_appendix-mathematics-for-deep-learning/naive-bayes.html.
End of explanation
"""
mnist_train = torchvision.datasets.MNIST(
root="./temp",
train=True,
transform=lambda x: jnp.array([jnp.array(x) / 255]),
download=True,
)
mnist_test = torchvision.datasets.MNIST(
root="./temp",
train=False,
transform=lambda x: jnp.array([jnp.array(x) / 255]),
download=True,
)
print(mnist_train)
image, label = mnist_train[2]
print(type(image))
print(image.shape)
print(type(label))
print(label)
image[0, 15:20, 15:20] # not binary (pytorch rescales to 0:1)
[jnp.min(image), jnp.max(image)]
print(mnist_train[0][0].shape) # (1,28,28)
indices = [0, 1]
xx = jnp.stack([mnist_train[i][0] for i in indices])
print(xx.shape)
xx = jnp.stack([mnist_train[i][0] for i in indices], axis=1)
print(xx.shape)
xx = jnp.stack([mnist_train[i][0] for i in indices], axis=1).squeeze(0)
print(xx.shape)
# convert from torch.tensor to numpy, extract subset of indices, optionally binarize
def get_data(data, indices=None, binarize=True):
N = len(data)
if indices is None:
indices = range(0, N)
X = jnp.stack([data[i][0] for i in indices], axis=1).squeeze(0) # (N,28,28)
if binarize:
X = X > 0.5
y = jnp.array([data[i][1] for i in indices])
return X, y
indices = range(0, 10)
images, labels = get_data(mnist_train, indices, False)
print([images.shape, labels.shape])
print(images[0, 15:20, 15:20]) # not binary
_ = show_images(images, 1, 10)
indices = range(0, 10)
images, labels = get_data(mnist_train, indices, True)
print([images.shape, labels.shape])
print(images[0, 15:20, 15:20]) # binary
_ = show_images(images, 1, 10)
X_train, y_train = get_data(mnist_train)
X_test, y_test = get_data(mnist_test)
print(X_train.shape)
print(type(X_train))
print(X_train[0, 15:20, 15:20])
"""
Explanation: Get data
We use a binarized version of MNIST.
End of explanation
"""
n_y = jnp.zeros(10)
for y in range(10):
n_y = n_y.at[y].set((y_train == y).sum())
P_y = n_y / n_y.sum()
P_y
# Training set is not equally balanced across classes...
print(jnp.unique(y_train))
from collections import Counter
cnt = Counter(np.asarray(y_train))
print(cnt.keys())
print(cnt.values())
"""
Explanation: Training
End of explanation
"""
n_x = jnp.zeros((10, 28, 28))
for y in range(10):
n_x = n_x.at[y].set(X_train[y_train == y].sum(axis=0))
# using pseudo counts of 1
# P_xy = (n_x + 1) / (n_y + 1).reshape(10, 1, 1)
P_xy = (n_x + 1) / (n_y + 2).reshape(10, 1, 1)
print(P_xy.shape)
print(type(P_xy))
show_images(P_xy, 1, 10)
plt.tight_layout()
plt.savefig("nbc_mnist_centroids.pdf", dpi=300)
"""
Explanation: We use add-one smoothing for class conditional Bernoulli distributions.
End of explanation
"""
log_P_xy = jnp.log(P_xy)
log_P_xy_neg = jnp.log(1 - P_xy)
log_P_y = jnp.log(P_y)
def bayes_pred_stable(x):
# x = x.unsqueeze(0) # (28, 28) -> (1, 28, 28)
x = jnp.expand_dims(x, 0) # (28, 28) -> (1, 28, 28)
p_xy = log_P_xy * x + log_P_xy_neg * (1 - x) # select the 0 and 1 pixels
p_xy = p_xy.reshape(10, -1).sum(axis=1) # p(x|y)
return p_xy + log_P_y
def predict(X):
return jnp.array([jnp.argmax(bayes_pred_stable(x)) for x in X])
# image, label = mnist_test[0]
image = X_test[0]
label = y_test[0]
py = bayes_pred_stable(image)
print(py)
print("ytrue ", label, "yhat ", np.argmax(py))
print(predict([image]))
indices = range(0, 10)
X1, y1 = get_data(mnist_test, indices, True)
preds = predict(X1)
show_images(X1, 1, 10, titles=[str(d) for d in preds])
plt.tight_layout()
plt.savefig("nbc_mnist_preds.pdf", dpi=300)
indices = range(5, 10)
X1, y1 = get_data(mnist_test, indices, True)
preds = predict(X1)
show_images(X1, 1, 5, titles=[str(d) for d in preds])
plt.tight_layout()
plt.savefig("nbc_mnist_preds.pdf", dpi=300)
indices = range(30, 40)
X1, y1 = get_data(mnist_test, indices, True)
preds = predict(X1)
_ = show_images(X1, 1, 10, titles=[str(d) for d in preds])
preds = predict(X_test)
float(jnp.count_nonzero(preds == y_test)) / len(y_test) # test accuracy
"""
Explanation: Testing
End of explanation
"""
|
datascience-practice/data-quest
|
python_introduction/intermediate/indexing-and-more-functions.ipynb
|
mit
|
x = 3
# The loop body will execute three times. Once when x == 3, once when x == 4, and once when x == 5.
# Then x < 6 will evaluate to False, and it will stop.
# 3, 4, and 5 will be printed out.
while x < 6:
print(x)
# Using += is a shorter way of saying x = x + 1. It will add one to x.
x += 1
b = 10
"""
Explanation: 2: Writing a while loop
Instructions
Create a while loop that tests if b is greater than 5. If it is, the loop body should print b out, then subtract one from it.
End of explanation
"""
while b > 5:
print(b)
b -= 1
"""
Explanation: Answer
End of explanation
"""
available_count = 0
desired_dog = "Great Dane"
available_dogs = ["Labrador", "Poodle", "Sheepdog", "Great Dane", "Pomeranian", "Great Dane", "Collie"]
# Let's say we are searching for two dogs of the same breed to adopt.
# We'll loop through the dogs.
for dog in available_dogs:
# If our desired dog is found.
if dog == desired_dog:
# Increment the counter.
available_count += 1
# We only want two dogs, so we can stop searching after we find them.
if available_count == 2:
break
tiger_count = 0
desired_tiger = "Bengal"
available_tigers = ["Bengal", "Dressed up poodle", "Siberian", "Sumatran", "Bengal", "Housecat", "Hobbes"]
"""
Explanation: 3: Using the break keyword
Instructions
Let's say we want two "Bengal" tigers from available_tigers for our nature reserve.
Write a for loop that increments tiger_count when it finds one, and breaks after finding two.
End of explanation
"""
for t in available_tigers:
if t == "Bengal":
tiger_count += 1
if tiger_count == 2:
break
"""
Explanation: Answer
End of explanation
"""
column_names = ['year',
'month',
'carrier',
'carrier_name',
'airport',
'airport_name',
'arr_flights',
'arr_del15',
'carrier_ct',
'weather_ct',
'nas_ct',
'security_ct',
'late_aircraft_ct',
'arr_cancelled',
'arr_diverted',
'arr_delay',
'carrier_delay',
'weather_delay',
'nas_delay',
'security_delay',
'late_aircraft_delay']
# It's pretty easy to get a column name from a column number.
# The third column contains the carrier (same as the airline).
print(column_names[2])
"""
Explanation: 4: Finding a column number from a name
Instructions
Write a function that will get the column number from the column name.
Use it to get the column number for the "arr_delay" column and assign it to the arr_delay variable.
Use it to get the column number for the "weather_delay" column and assign it to the weather_delay variable.
End of explanation
"""
def number_by_name(name):
found = None
for i, nm in enumerate(column_names):
if nm == name:
found = i
break
return found
arr_delay = number_by_name("arr_delay")
weather_delay = number_by_name("weather_delay")
print(arr_delay, weather_delay)
"""
Explanation: Answer
End of explanation
"""
from flight_delays import flight_delays
# Prints the last row in flight_delays
print(flight_delays[-1])
# Prints the second to last row in flight_delays
print(flight_delays[-2])
# Prints the third to last and second to last rows in flight_delays (remember that slicing only goes up to but not including the second number)
# This will get the rows at index -3 and -2
print(flight_delays[-3:-1])
"""
Explanation: 5: Using negative indexing
Instructions
Use negative indexing to assign the third to last row in flight_delays to third_to_last.
Use negative slicing to assign the fourth, third, and second to last rows in flight_delays to end_slice
End of explanation
"""
third_to_last = flight_delays[-3]
end_slice = flight_delays[-4:-1]
print(third_to_last, end_slice)
"""
Explanation: Answer
End of explanation
"""
# Leaving the first number in the slice blank means "start from the beginning of the list, inclusive"
# This code will get the rows at index 0, 1, 2, 3, and 4.
first_five_rows = flight_delays[:5]
# We can also leave the last number blank to get all rows up to and including the last one.
# This will get the rows at index -5, -4, -3, -2, and -1
last_five_rows = flight_delays[-5:]
"""
Explanation: 6: Indexing up to the end or from the beginning
Instructions
Assign the first 10 rows of flight_delays to first_ten_rows.
Assign the last 10 rows of flight_delays to last_ten_rows.
End of explanation
"""
first_ten_rows = flight_delays[:10]
last_ten_rows = flight_delays[-10:]
print(first_ten_rows, last_ten_rows)
"""
Explanation: Answer
End of explanation
"""
def column_number_from_name(column_name):
column_number = None
for i, column in enumerate(column_names):
if column == column_name:
column_nudef column_number_from_name(column_name):
column_number = None
for i, column in enumerate(column_names):
if column == column_name:
column_number = i
return column_number
# Get the column number of the arr_flight column
# This column counts the total number of arriving flights for a carrier in a given airport
arr_flights_column = column_number_from_name("arr_flights")
# Extract all of the values in the column using a list comprehension
# We need to convert the values to float because they are strings initially
arr_flights = [float(row[arr_flights_column]) for row in flight_delays]
# Now we can use the sum() function to add together all of the values.
total_arriving_flights = sum(arr_flights)mber = i
return column_number
# Get the column number of the arr_flight column
# This column counts the total number of arriving flights for a carrier in a given airport
arr_flights_column = column_number_from_name("arr_flights")
# Extract all of the values in the column using a list comprehension
# We need to convert the values to float because they are strings initially
arr_flights = [float(row[arr_flights_column]) for row in flight_delays]
# Now we can use the sum() function to add together all of the values.
total_arriving_flights = sum(arr_flights)
"""
Explanation: 7: Finding the percentage of delayed flights
Instructions
Sum together the values in the "arr_del15" column. This is the total number of arriving flights in each airport that were delayed more than 15 minutes.
Then, divide the number of delayed flights by total_arriving_flights. Assign the result to delayed_percentage.
End of explanation
"""
arr_del15 = [float(row[column_number_from_name("arr_del15")]) for row in flight_delays]
print(arr_del15)
total_arr_del15 = sum(arr_del15)
delayed_percentage = total_arr_del15/total_arriving_flights
print(delayed_percentage)
"""
Explanation: Answer
End of explanation
"""
def column_number_from_name(column_name):
column_number = None
for i, column in enumerate(column_names):
if column == column_name:
column_number = i
return column_number
average_delay_time = None
"""
Explanation: 8: Finding the average delay time
Instructions
Find the sum of the "arr_delay" column.
Then, divide it by the sum of the "arr_del15" column to get the average number of minutes a plane was delayed.
Assign the result to average_delay_time.
End of explanation
"""
sum_arr_delay = sum([float(row[column_number_from_name("arr_delay")]) for row in flight_delays])
arr_del15 = [float(row[column_number_from_name("arr_del15")]) for row in flight_delays]
sum_arr_del15 = sum(arr_del15)
average_delay_time = sum_arr_delay/sum_arr_del15
print(average_delay_time)
"""
Explanation: Answer
End of explanation
"""
def column_number_from_name(column_name):
column_number = None
for i, column in enumerate(column_names):
if column == column_name:
column_number = i
return column_number
def sum_column(column_name):
column_number = column_number_from_name(column_name)
column = [float(_[column_number]) for _ in flight_delays]
return sum(column)
weather_delay_sum = sum_column("weather_delay")
arr_del15_sum = sum_column("arr_del15")
average_weather_delay_time = weather_delay_sum/arr_del15_sum
print(average_weather_delay_time)
"""
Explanation: 9: Making a function to calculate the delay
Instructions
Make a function that takes a column name as input, and returns the column sum.
Then use the function to take the sum of the "weather_delay" column, and divide it by the sum of the "arr_del15" column.
Assign the result to average_weather_delay_time.
End of explanation
"""
def divide(x=1, y=1):
return x/y
# Use positional arguments, which will implicitly pass 10 to x and 5 to y.
print(divide(10,5))
# Use named arguments, which will pass the values to the named variable.
print(divide(y=10, x=5))
# If we use named arguments, the order doesn't matter
print(divide(x=5, y=10))
# But we can't have any positional arguments after we use a named argument
print(divide(5, 20))
print(divide(100, 30))
"""
Explanation: 11: Named arguments to functions
Instructions
Fix the statements above so the code runs properly.
The first statement should divide 5 by 20, and the second should divide 100 by 30.
End of explanation
"""
print(divide(5, y=20))
print(divide(100, 30))
"""
Explanation: Answer
End of explanation
"""
def multiply(a, b=2, c=1):
return a * b * c
# This will multiply 5 * 2 * 1
print(multiply(5))
# This will multiply 6 * 4 * 1
print(multiply(5, 4))
# This will multiply 5 * 2 * 1
print(multiply(a=5))
# This will multiply 6 * 2 * 4
print(multiply(a=6, c=4))
# Invalid, because we didn't fill the a variable, which doesn't have a default.
print(multiply(4, b=3))
# Invalid, because we didn't fill the a variable.
print(multiply(c=3))
"""
Explanation: 12: Optional arguments to a function
Instructions
Fix the last two statements so that they work properly.
The first statement should multiply 4 * 3 * 1
The second statement should multiply 3 * 2 * 3
End of explanation
"""
|
AlJohri/DAT-DC-12
|
notebooks/intro-numpy.ipynb
|
mit
|
%matplotlib inline
import traceback
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Introduction to NumPy
Forked from Lecture 2 of Scientific Python Lectures by J.R. Johansson
End of explanation
"""
%%time
total = 0
for i in range(100000):
total += i
%%time
total = np.arange(100000).sum()
%%time
l = list(range(0, 1000000))
ltimes5 = [x * 5 for x in l]
%%time
l = np.arange(1000000)
ltimes5 = l * 5
"""
Explanation: Why NumPy?
End of explanation
"""
import numpy as np
"""
Explanation: Introduction
The numpy package (module) is used in almost all numerical computation using Python. It is a package that provide high-performance vector, matrix and higher-dimensional data structures for Python. It is implemented in C and Fortran so when calculations are vectorized (formulated with vectors and matrices), performance is very good.
To use numpy you need to import the module, using for example:
End of explanation
"""
# a vector: the argument to the array function is a Python list
v = np.array([1,2,3,4])
v
# a matrix: the argument to the array function is a nested Python list
M = np.array([[1, 2], [3, 4]])
M
"""
Explanation: In the numpy package the terminology used for vectors, matrices and higher-dimensional data sets is array.
Creating numpy arrays
There are a number of ways to initialize new numpy arrays, for example from
a Python list or tuples
using functions that are dedicated to generating numpy arrays, such as arange, linspace, etc.
reading data from files
From lists
For example, to create new vector and matrix arrays from Python lists we can use the numpy.array function.
End of explanation
"""
type(v), type(M)
"""
Explanation: The v and M objects are both of the type ndarray that the numpy module provides.
End of explanation
"""
v.shape
M.shape
"""
Explanation: The difference between the v and M arrays is only their shapes. We can get information about the shape of an array by using the ndarray.shape property.
End of explanation
"""
M.size
"""
Explanation: The number of elements in the array is available through the ndarray.size property:
End of explanation
"""
np.shape(M)
np.size(M)
"""
Explanation: Equivalently, we could use the function numpy.shape and numpy.size
End of explanation
"""
M.dtype
"""
Explanation: So far the numpy.ndarray looks awefully much like a Python list (or nested list). Why not simply use Python lists for computations instead of creating a new array type?
There are several reasons:
Python lists are very general. They can contain any kind of object. They are dynamically typed. They do not support mathematical functions such as matrix and dot multiplications, etc. Implementing such functions for Python lists would not be very efficient because of the dynamic typing.
Numpy arrays are statically typed and homogeneous. The type of the elements is determined when the array is created.
Numpy arrays are memory efficient.
Because of the static typing, fast implementation of mathematical functions such as multiplication and addition of numpy arrays can be implemented in a compiled language (C and Fortran is used).
Using the dtype (data type) property of an ndarray, we can see what type the data of an array has:
End of explanation
"""
try:
M[0,0] = "hello"
except ValueError as e:
print(traceback.format_exc())
"""
Explanation: We get an error if we try to assign a value of the wrong type to an element in a numpy array:
End of explanation
"""
M = np.array([[1, 2], [3, 4]], dtype=complex)
M
"""
Explanation: If we want, we can explicitly define the type of the array data when we create it, using the dtype keyword argument:
End of explanation
"""
# create a range
x = np.arange(0, 10, 1) # arguments: start, stop, step
x
x = np.arange(-1, 1, 0.1)
x
"""
Explanation: Common data types that can be used with dtype are: int, float, complex, bool, object, etc.
We can also explicitly define the bit size of the data types, for example: int64, int16, float128, complex128.
Using array-generating functions
For larger arrays it is inpractical to initialize the data manually, using explicit python lists. Instead we can use one of the many functions in numpy that generate arrays of different forms. Some of the more common are:
arange
End of explanation
"""
# using linspace, both end points ARE included
np.linspace(0, 10, 25)
np.logspace(0, 10, 10, base=np.e)
"""
Explanation: linspace and logspace
End of explanation
"""
x, y = np.mgrid[0:5, 0:5] # similar to meshgrid in MATLAB
x
y
"""
Explanation: mgrid
End of explanation
"""
# uniform random numbers in [0,1]
np.random.rand(5,5)
# standard normal distributed random numbers
np.random.randn(5,5)
"""
Explanation: random data
End of explanation
"""
# a diagonal matrix
np.diag([1,2,3])
# diagonal with offset from the main diagonal
np.diag([1,2,3], k=1)
"""
Explanation: diag
End of explanation
"""
np.zeros((3,3))
np.ones((3,3))
"""
Explanation: zeros and ones
End of explanation
"""
!head ../data/stockholm_td_adj.dat
data = np.genfromtxt('../data/stockholm_td_adj.dat')
data.shape
fig, ax = plt.subplots(figsize=(14,4))
ax.plot(data[:,0]+data[:,1]/12.0+data[:,2]/365, data[:,5])
ax.axis('tight')
ax.set_title('tempeatures in Stockholm')
ax.set_xlabel('year')
ax.set_ylabel('temperature (C)');
"""
Explanation: File I/O
Comma-separated values (CSV)
A very common file format for data files is comma-separated values (CSV), or related formats such as TSV (tab-separated values). To read data from such files into Numpy arrays we can use the numpy.genfromtxt function. For example,
End of explanation
"""
M = np.random.rand(3,3)
M
np.savetxt("../data/random-matrix.csv", M)
!cat ../data/random-matrix.csv
np.savetxt("../data/random-matrix.csv", M, fmt='%.5f') # fmt specifies the format
!cat ../data/random-matrix.csv
"""
Explanation: Using numpy.savetxt we can store a Numpy array to a file in CSV format:
End of explanation
"""
np.save("../data/random-matrix.npy", M)
!file ../data/random-matrix.npy
np.load("../data/random-matrix.npy")
"""
Explanation: Numpy's native file format
Useful when storing and reading back numpy array data. Use the functions numpy.save and numpy.load:
End of explanation
"""
M.itemsize # bytes per element
M.nbytes # number of bytes
M.ndim # number of dimensions
"""
Explanation: More properties of the numpy arrays
End of explanation
"""
# v is a vector, and has only one dimension, taking one index
v[0]
# M is a matrix, or a 2 dimensional array, taking two indices
M[1,1]
"""
Explanation: Manipulating arrays
Indexing
We can index elements in an array using square brackets and indices:
End of explanation
"""
M
M[1]
"""
Explanation: If we omit an index of a multidimensional array it returns the whole row (or, in general, a N-1 dimensional array)
End of explanation
"""
M[1,:] # row 1
M[:,1] # column 1
"""
Explanation: The same thing can be achieved with using : instead of an index:
End of explanation
"""
M[0,0] = 1
M
# also works for rows and columns
M[1,:] = 0
M[:,2] = -1
M
"""
Explanation: We can assign new values to elements in an array using indexing:
End of explanation
"""
A = np.array([1,2,3,4,5])
A
A[1:3]
"""
Explanation: Index slicing
Index slicing is the technical name for the syntax M[lower:upper:step] to extract part of an array:
End of explanation
"""
A[1:3] = [-2,-3]
A
"""
Explanation: Array slices are mutable: if they are assigned a new value the original array from which the slice was extracted is modified:
End of explanation
"""
A[::] # lower, upper, step all take the default values
A[::2] # step is 2, lower and upper defaults to the beginning and end of the array
A[:3] # first three elements
A[3:] # elements from index 3
"""
Explanation: We can omit any of the three parameters in M[lower:upper:step]:
End of explanation
"""
A = np.array([1,2,3,4,5])
A[-1] # the last element in the array
A[-3:] # the last three elements
"""
Explanation: Negative indices counts from the end of the array (positive index from the begining):
End of explanation
"""
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
A
# a block from the original array
A[1:4, 1:4]
# strides
A[::2, ::2]
"""
Explanation: Index slicing works exactly the same way for multidimensional arrays:
End of explanation
"""
row_indices = [1, 2, 3]
A[row_indices]
col_indices = [1, 2, -1] # remember, index -1 means the last element
A[row_indices, col_indices]
"""
Explanation: Fancy indexing
Fancy indexing is the name for when an array or list is used in-place of an index:
End of explanation
"""
B = np.array([n for n in range(5)])
B
row_mask = np.array([True, False, True, False, False])
B[row_mask]
# same thing
row_mask = np.array([1,0,1,0,0], dtype=bool)
B[row_mask]
"""
Explanation: We can also use index masks: If the index mask is an Numpy array of data type bool, then an element is selected (True) or not (False) depending on the value of the index mask at the position of each element:
End of explanation
"""
x = np.arange(0, 10, 0.5)
x
mask = (5 < x) * (x < 7.5)
mask
x[mask]
"""
Explanation: This feature is very useful to conditionally select elements from an array, using for example comparison operators:
End of explanation
"""
indices = np.where(mask)
indices
x[indices] # this indexing is equivalent to the fancy indexing x[mask]
"""
Explanation: Functions for extracting data from arrays and creating arrays
where
The index mask can be converted to position index using the where function
End of explanation
"""
np.diag(A)
np.diag(A, -1)
"""
Explanation: diag
With the diag function we can also extract the diagonal and subdiagonals of an array:
End of explanation
"""
v2 = np.arange(-3,3)
v2
row_indices = [1, 3, 5]
v2[row_indices] # fancy indexing
v2.take(row_indices)
"""
Explanation: take
The take function is similar to fancy indexing described above:
End of explanation
"""
np.take([-3, -2, -1, 0, 1, 2], row_indices)
"""
Explanation: But take also works on lists and other objects:
End of explanation
"""
which = [1, 0, 1, 0]
choices = [[-2,-2,-2,-2], [5,5,5,5]]
np.choose(which, choices)
"""
Explanation: choose
Constructs an array by picking elements from several arrays:
End of explanation
"""
v1 = np.arange(0, 5)
v1 * 2
v1 + 2
A * 2, A + 2
"""
Explanation: Linear algebra
Vectorizing code is the key to writing efficient numerical calculation with Python/Numpy. That means that as much as possible of a program should be formulated in terms of matrix and vector operations, like matrix-matrix multiplication.
Scalar-array operations
We can use the usual arithmetic operators to multiply, add, subtract, and divide arrays with scalar numbers.
End of explanation
"""
A * A # element-wise multiplication
v1 * v1
"""
Explanation: Element-wise array-array operations
When we add, subtract, multiply and divide arrays with each other, the default behaviour is element-wise operations:
End of explanation
"""
A.shape, v1.shape
A * v1
"""
Explanation: If we multiply arrays with compatible shapes, we get an element-wise multiplication of each row:
End of explanation
"""
np.dot(A, A)
"""
Explanation: Matrix algebra
What about matrix mutiplication? There are two ways. We can either use the dot function, which applies a matrix-matrix, matrix-vector, or inner vector multiplication to its two arguments:
End of explanation
"""
A @ A
np.dot(A, v1)
np.dot(v1, v1)
"""
Explanation: Python 3 has a new operator for using infix notation with matrix multiplication.
End of explanation
"""
M = np.matrix(A)
v = np.matrix(v1).T # make it a column vector
v
M * M
M * v
# inner product
v.T * v
# with matrix objects, standard matrix algebra applies
v + M*v
"""
Explanation: Alternatively, we can cast the array objects to the type matrix. This changes the behavior of the standard arithmetic operators +, -, * to use matrix algebra.
End of explanation
"""
v = np.matrix([1,2,3,4,5,6]).T
M.shape, v.shape
import traceback
try:
M * v
except ValueError as e:
print(traceback.format_exc())
"""
Explanation: If we try to add, subtract or multiply objects with incomplatible shapes we get an error:
End of explanation
"""
C = np.matrix([[1j, 2j], [3j, 4j]])
C
np.conjugate(C)
"""
Explanation: See also the related functions: inner, outer, cross, kron, tensordot. Try for example help(np.kron).
Array/Matrix transformations
Above we have used the .T to transpose the matrix object v. We could also have used the transpose function to accomplish the same thing.
Other mathematical functions that transform matrix objects are:
End of explanation
"""
C.H
"""
Explanation: Hermitian conjugate: transpose + conjugate
End of explanation
"""
np.real(C) # same as: C.real
np.imag(C) # same as: C.imag
"""
Explanation: We can extract the real and imaginary parts of complex-valued arrays using real and imag:
End of explanation
"""
np.angle(C+1) # heads up MATLAB Users, angle is used instead of arg
abs(C)
"""
Explanation: Or the complex argument and absolute value
End of explanation
"""
np.linalg.inv(C) # equivalent to C.I
C.I * C
"""
Explanation: Matrix computations
Inverse
End of explanation
"""
np.linalg.det(C)
np.linalg.det(C.I)
"""
Explanation: Determinant
End of explanation
"""
# reminder, the tempeature dataset is stored in the data variable:
np.shape(data)
"""
Explanation: Data processing
Often it is useful to store datasets in Numpy arrays. Numpy provides a number of functions to calculate statistics of datasets in arrays.
For example, let's calculate some properties from the Stockholm temperature dataset used above.
End of explanation
"""
# the temperature data is in column 3
np.mean(data[:,3])
"""
Explanation: mean
End of explanation
"""
np.std(data[:,3]), np.var(data[:,3])
"""
Explanation: The daily mean temperature in Stockholm over the last 200 years has been about 6.2 C.
standard deviations and variance
End of explanation
"""
# lowest daily average temperature
data[:,3].min()
# highest daily average temperature
data[:,3].max()
"""
Explanation: min and max
End of explanation
"""
d = np.arange(0, 10)
d
# sum up all elements
np.sum(d)
# product of all elements
np.prod(d+1)
# cummulative sum
np.cumsum(d)
# cummulative product
np.cumprod(d+1)
# same as: diag(A).sum()
np.trace(A)
"""
Explanation: sum, prod, and trace
End of explanation
"""
!head -n 3 ../data/stockholm_td_adj.dat
"""
Explanation: Computations on subsets of arrays
We can compute with subsets of the data in an array using indexing, fancy indexing, and the other methods of extracting data from an array (described above).
For example, let's go back to the temperature dataset:
End of explanation
"""
np.unique(data[:,1]) # the month column takes values from 1 to 12
mask_feb = data[:,1] == 2
# the temperature data is in column 3
np.mean(data[mask_feb,3])
"""
Explanation: The dataformat is: year, month, day, daily average temperature, low, high, location.
If we are interested in the average temperature only in a particular month, say February, then we can create a index mask and use it to select only the data for that month using:
End of explanation
"""
months = np.arange(1,13)
monthly_mean = [np.mean(data[data[:,1] == month, 3]) for month in months]
fig, ax = plt.subplots()
ax.bar(months, monthly_mean)
ax.set_xlabel("Month")
ax.set_ylabel("Monthly avg. temp.");
"""
Explanation: With these tools we have very powerful data processing capabilities at our disposal. For example, to extract the average monthly average temperatures for each month of the year only takes a few lines of code:
End of explanation
"""
m = np.random.rand(3,3)
m
# global max
m.max()
# max in each column
m.max(axis=0)
# max in each row
m.max(axis=1)
"""
Explanation: Calculations with higher-dimensional data
When functions such as min, max, etc. are applied to a multidimensional arrays, it is sometimes useful to apply the calculation to the entire array, and sometimes only on a row or column basis. Using the axis argument we can specify how these functions should behave:
End of explanation
"""
A
n, m = A.shape
B = A.reshape((1,n*m))
B
B[0,0:5] = 5 # modify the array
B
A # and the original variable is also changed. B is only a different view of the same data
"""
Explanation: Many other functions and methods in the array and matrix classes accept the same (optional) axis keyword argument.
Reshaping, resizing and stacking arrays
The shape of an Numpy array can be modified without copying the underlaying data, which makes it a fast operation even for large arrays.
End of explanation
"""
B = A.flatten()
B
B[0:5] = 10
B
A # now A has not changed, because B's data is a copy of A's, not refering to the same data
"""
Explanation: We can also use the function flatten to make a higher-dimensional array into a vector. But this function create a copy of the data.
End of explanation
"""
v = np.array([1,2,3])
v.shape
# make a column matrix of the vector v
v[:, np.newaxis]
# column matrix
v[:, np.newaxis].shape
# row matrix
v[np.newaxis, :].shape
"""
Explanation: Adding a new dimension: newaxis
With newaxis, we can insert new dimensions in an array, for example converting a vector to a column or row matrix:
End of explanation
"""
a = np.array([[1, 2], [3, 4]])
# repeat each element 3 times
np.repeat(a, 3)
# tile the matrix 3 times
np.tile(a, 3)
"""
Explanation: Stacking and repeating arrays
Using function repeat, tile, vstack, hstack, and concatenate we can create larger vectors and matrices from smaller ones:
tile and repeat
End of explanation
"""
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
np.concatenate((a, b.T), axis=1)
"""
Explanation: concatenate
End of explanation
"""
np.vstack((a,b))
np.hstack((a,b.T))
"""
Explanation: hstack and vstack
End of explanation
"""
A = np.array([[1, 2], [3, 4]])
A
# now B is referring to the same array data as A
B = A
# changing B affects A
B[0,0] = 10
B
A
"""
Explanation: Copy and "deep copy"
To achieve high performance, assignments in Python usually do not copy the underlaying objects. This is important for example when objects are passed between functions, to avoid an excessive amount of memory copying when it is not necessary (technical term: pass by reference).
End of explanation
"""
B = np.copy(A)
# now, if we modify B, A is not affected
B[0,0] = -5
B
A
"""
Explanation: If we want to avoid this behavior, so that when we get a new completely independent object B copied from A, then we need to do a so-called "deep copy" using the function copy:
End of explanation
"""
v = np.array([1,2,3,4])
for element in v:
print(element)
M = np.array([[1,2], [3,4]])
for row in M:
print("row", row)
for element in row:
print(element)
"""
Explanation: Iterating over array elements
Generally, we want to avoid iterating over the elements of arrays whenever we can (at all costs). The reason is that in a interpreted language like Python (or MATLAB/R), iterations are really slow compared to vectorized operations.
However, sometimes iterations are unavoidable. For such cases, the Python for loop is the most convenient way to iterate over an array:
End of explanation
"""
for row_idx, row in enumerate(M):
print("row_idx", row_idx, "row", row)
for col_idx, element in enumerate(row):
print("col_idx", col_idx, "element", element)
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
# each element in M is now squared
M
"""
Explanation: When we need to iterate over each element of an array and modify its elements, it is convenient to use the enumerate function to obtain both the element and its index in the for loop:
End of explanation
"""
def theta(x):
"""
Scalar implemenation of the Heaviside step function.
"""
if x >= 0:
return 1
else:
return 0
try:
theta(np.array([-3,-2,-1,0,1,2,3]))
except Exception as e:
print(traceback.format_exc())
"""
Explanation: Vectorizing functions
As mentioned several times by now, to get good performance we should try to avoid looping over elements in our vectors and matrices, and instead use vectorized algorithms. The first step in converting a scalar algorithm to a vectorized algorithm is to make sure that the functions we write work with vector inputs.
End of explanation
"""
theta_vec = np.vectorize(theta)
%%time
theta_vec(np.array([-3,-2,-1,0,1,2,3]))
"""
Explanation: OK, that didn't work because we didn't write the Theta function so that it can handle a vector input...
To get a vectorized version of Theta we can use the Numpy function vectorize. In many cases it can automatically vectorize a function:
End of explanation
"""
def theta(x):
"""
Vector-aware implemenation of the Heaviside step function.
"""
return 1 * (x >= 0)
%%time
theta(np.array([-3,-2,-1,0,1,2,3]))
# still works for scalars as well
theta(-1.2), theta(2.6)
"""
Explanation: We can also implement the function to accept a vector input from the beginning (requires more effort but might give better performance):
End of explanation
"""
M
if (M > 5).any():
print("at least one element in M is larger than 5")
else:
print("no element in M is larger than 5")
if (M > 5).all():
print("all elements in M are larger than 5")
else:
print("all elements in M are not larger than 5")
"""
Explanation: Using arrays in conditions
When using arrays in conditions,for example if statements and other boolean expressions, one needs to use any or all, which requires that any or all elements in the array evalutes to True:
End of explanation
"""
M.dtype
M2 = M.astype(float)
M2
M2.dtype
M3 = M.astype(bool)
M3
"""
Explanation: Type casting
Since Numpy arrays are statically typed, the type of an array does not change once created. But we can explicitly cast an array of some type to another using the astype functions (see also the similar asarray function). This always create a new array of new type:
End of explanation
"""
%reload_ext version_information
%version_information numpy
"""
Explanation: Further reading
http://numpy.scipy.org - Official Numpy Documentation
http://scipy.org/Tentative_NumPy_Tutorial - Official Numpy Quickstart Tutorial (highly recommended)
http://www.scipy-lectures.org/intro/numpy/index.html - Scipy Lectures: Lecture 1.3
Versions
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/building_production_ml_systems/labs/3_kubeflow_pipelines.ipynb
|
apache-2.0
|
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
pip freeze | grep kfp || pip install kfp
from os import path
import kfp
import kfp.compiler as compiler
import kfp.components as comp
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.notebook
"""
Explanation: Kubeflow pipelines
Learning Objectives:
1. Learn how to deploy a Kubeflow cluster on GCP
1. Learn how to create a experiment in Kubeflow
1. Learn how to package you code into a Kubeflow pipeline
1. Learn how to run a Kubeflow pipeline in a repeatable and traceable way
Introduction
In this notebook, we will first setup a Kubeflow cluster on GCP.
Then, we will create a Kubeflow experiment and a Kubflow pipeline from our taxifare machine learning code. At last, we will run the pipeline on the Kubeflow cluster, providing us with a reproducible and traceable way to execute machine learning code.
End of explanation
"""
HOST = # TODO: fill in the HOST information for the cluster
BUCKET = # TODO: fill in the GCS bucket
"""
Explanation: Setup a Kubeflow cluster on GCP
TODO 1
To deploy a Kubeflow cluster
in your GCP project, use the AI Platform pipelines:
Go to AI Platform Pipelines in the GCP Console.
Create a new instance
Hit "Configure"
Check the box "Allow access to the following Cloud APIs"
Hit "Create New Cluster"
Hit "Deploy"
When the cluster is ready, go back to the AI Platform pipelines page and click on "SETTINGS" entry for your cluster.
This will bring up a pop up with code snippets on how to access the cluster
programmatically.
Copy the "host" entry and set the "HOST" variable below with that.
End of explanation
"""
client = # TODO: create a Kubeflow client
"""
Explanation: Create an experiment
TODO 2
We will start by creating a Kubeflow client to pilot the Kubeflow cluster:
End of explanation
"""
client.list_experiments()
"""
Explanation: Let's look at the experiments that are running on this cluster. Since you just launched it, you should see only a single "Default" experiment:
End of explanation
"""
exp = # TODO: create an experiment called 'taxifare'
"""
Explanation: Now let's create a 'taxifare' experiment where we could look at all the various runs of our taxifare pipeline:
End of explanation
"""
client.list_experiments()
"""
Explanation: Let's make sure the experiment has been created correctly:
End of explanation
"""
# Builds the taxifare trainer container in case you skipped the optional part of lab 1
!taxifare/scripts/build.sh
# Pushes the taxifare trainer container to gcr/io
!taxifare/scripts/push.sh
# Builds the KF component containers and push them to gcr/io
!cd pipelines && make components
"""
Explanation: Packaging your code into Kubeflow components
We have packaged our taxifare ml pipeline into three components:
* ./components/bq2gcs that creates the training and evaluation data from BigQuery and exports it to GCS
* ./components/trainjob that launches the training container on AI-platform and exports the model
* ./components/deploymodel that deploys the trained model to AI-platform as a REST API
Each of these components has been wrapped into a Docker container, in the same way we did with the taxifare training code in the previous lab.
If you inspect the code in these folders, you'll notice that the main.py or main.sh files contain the code we previously executed in the notebooks (loading the data to GCS from BQ, or launching a training job to AI-platform, etc.). The last line in the Dockerfile tells you that these files are executed when the container is run.
So we just packaged our ml code into light container images for reproducibility.
We have made it simple for you to build the container images and push them to the Google Cloud image registry gcr.io in your project:
End of explanation
"""
%%writefile bq2gcs.yaml
name: bq2gcs
description: |
This component creates the training and
validation datasets as BiqQuery tables and export
them into a Google Cloud Storage bucket at
gs://<BUCKET>/taxifare/data.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-bq2gcs you just created
args: ["--bucket", {inputValue: Input Bucket}]
%%writefile trainjob.yaml
name: trainjob
description: |
This component trains a model to predict that taxi fare in NY.
It takes as argument a GCS bucket and expects its training and
eval data to be at gs://<BUCKET>/taxifare/data/ and will export
the trained model at gs://<BUCKET>/taxifare/model/.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-trainjob you just created
args: [{inputValue: Input Bucket}]
%%writefile deploymodel.yaml
name: deploymodel
description: |
This component deploys a trained taxifare model on GCP as taxifare:dnn.
It takes as argument a GCS bucket and expects the model to deploy
to be found at gs://<BUCKET>/taxifare/model/export/savedmodel/
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-deployment you just created
args: [{inputValue: Input Bucket}]
"""
Explanation: Now that the container images are pushed to the registry in your project, we need to create yaml files describing to Kubeflow how to use these containers. It boils down essentially to
* describing what arguments Kubeflow needs to pass to the containers when it runs them
* telling Kubeflow where to fetch the corresponding Docker images
In the cells below, we have three of these "Kubeflow component description files", one for each of our components.
TODO 3
IMPORTANT: Modify the image URI in the cell
below to reflect that you pushed the images into the gcr.io associated with your project.
End of explanation
"""
# TODO 3
PIPELINE_TAR = 'taxifare.tar.gz'
BQ2GCS_YAML = './bq2gcs.yaml'
TRAINJOB_YAML = './trainjob.yaml'
DEPLOYMODEL_YAML = './deploymodel.yaml'
@dsl.pipeline(
name='Taxifare',
description='Train a ml model to predict the taxi fare in NY')
def pipeline(gcs_bucket_name='<bucket where data and model will be exported>'):
bq2gcs_op = comp.load_component_from_file(BQ2GCS_YAML)
bq2gcs = bq2gcs_op(
input_bucket=gcs_bucket_name,
)
trainjob_op = # TODO: Load the yaml file for training
trainjob = # TODO: Add your code to run the training job
)
deploymodel_op = # TODO: Load the yaml file for deployment
deploymodel = # TODO: Addd your code to run model deployment
)
# TODO: Add the code to run 'trainjob' after 'bq2gcs' in the pipeline
# TODO: Add the code to run 'deploymodel' after 'trainjob' in the pipeline
"""
Explanation: Create a Kubeflow pipeline
The code below creates a kubeflow pipeline by decorating a regular function with the
@dsl.pipeline decorator. Now the arguments of this decorated function will be
the input parameters of the Kubeflow pipeline.
Inside the function, we describe the pipeline by
* loading the yaml component files we created above into a Kubeflow op
* specifying the order into which the Kubeflow ops should be run
End of explanation
"""
# TODO: Compile the pipeline functon above
ls $PIPELINE_TAR
"""
Explanation: The pipeline function above is then used by the Kubeflow compiler to create a Kubeflow pipeline artifact that can be either uploaded to the Kubeflow cluster from the UI, or programatically, as we will do below:
End of explanation
"""
# TODO 4
run = client.run_pipeline(
experiment_id= # TODO: Add code for experiment id
job_name= # TODO: Provide a jobname
pipeline_package_path= # TODO: Add code for pipeline zip file
params={
'gcs_bucket_name': BUCKET,
},
)
"""
Explanation: If you untar and uzip this pipeline artifact, you'll see that the compiler has transformed the
Python description of the pipeline into yaml description!
Now let's feed Kubeflow with our pipeline and run it using our client:
End of explanation
"""
|
genome-nexus/genome-nexus
|
notebooks/genome_nexus_python_example.ipynb
|
mit
|
from bravado.client import SwaggerClient
client = SwaggerClient.from_url('https://www.genomenexus.org/v2/api-docs',
config={"validate_requests":False,"validate_responses":False,"validate_swagger_spec":False})
print(client)
dir(client)
for a in dir(client):
client.__setattr__(a[:-len('-controller')], client.__getattr__(a))
variant = client.annotation.fetchVariantAnnotationGET(variant='17:g.41242962_41242963insGA').result()
dir(variant)
tc1 = variant.transcript_consequences[0]
dir(tc1)
print(tc1)
"""
Explanation: Programmatic Access to Genome Nexus
This notebook gives some examples in Python for programmatic access to http://genomenexus.org. You can run these examples after installing Jupyter. Easiest way for using Jupyter is installing the Python 3 version of anaconda: https://www.anaconda.com/download/. After having that you can install Jupyter with:
conda install jupyter
For these exampels we also require the Swagger API client reader Bravado. Unfortunately not yet available in anaconda, but you can get it through pip:
conda install pip
pip install bravado
Let's try connecting to the Genome Nexus API now:
End of explanation
"""
import seaborn as sns
%matplotlib inline
sns.set_style("white")
sns.set_context('talk')
import matplotlib.pyplot as plt
cbioportal = SwaggerClient.from_url('https://www.cbioportal.org/api/api-docs',
config={"validate_requests":False,"validate_responses":False})
print(cbioportal)
for a in dir(cbioportal):
cbioportal.__setattr__(a.replace(' ', '_').lower(), cbioportal.__getattr__(a))
dir(cbioportal)
muts = cbioportal.mutations.getMutationsInMolecularProfileBySampleListIdUsingGET(
molecularProfileId="msk_impact_2017_mutations", # {study_id}_mutations gives default mutations profile for study
sampleListId="msk_impact_2017_all", # {study_id}_all includes all samples
projection="DETAILED" # include gene info
).result()
import pandas as pd
mdf = pd.DataFrame([dict(m.__dict__['_Model__dict'],
**m.__dict__['_Model__dict']['gene'].__dict__['_Model__dict']) for m in muts])
mdf.groupby('uniqueSampleKey').studyId.count().plot(kind='hist', bins=400, xlim=(0,30))
plt.xlabel('Number of mutations in sample')
plt.ylabel('Number of samples')
plt.title('Number of mutations across samples in MSK-IMPACT (2017)')
sns.despine(trim=True)
mdf.variantType.astype(str).value_counts().plot(kind='bar')
plt.title('Types of mutations in MSK-IMPACT (2017)')
sns.despine(trim=False)
"""
Explanation: Connect with cBioPortal API
cBioPortal also uses Swagger for their API.
End of explanation
"""
snvs = mdf[(mdf.variantType == 'SNP') & (mdf.variantAllele != '-') & (mdf.referenceAllele != '-')].copy()
# need query string like 9:g.22125503G>C
snvs['hgvs_for_gn'] = snvs.chromosome.astype(str) + ":g." + snvs.startPosition.astype(str) + snvs.referenceAllele + '>' + snvs.variantAllele
assert(snvs['hgvs_for_gn'].isnull().sum() == 0)
import time
qvariants = list(set(snvs.hgvs_for_gn))
gn_results = []
chunk_size = 500
print("Querying {} variants".format(len(qvariants)))
for n, qvar in enumerate([qvariants[i:i + chunk_size] for i in range(0, len(qvariants), chunk_size)]):
try:
gn_results += client.annotation.fetchVariantAnnotationPOST(variants=qvar,fields=['hotspots']).result()
print("Querying [{}, {}]: Success".format(n*chunk_size, min(len(qvariants), n*chunk_size+chunk_size)))
except Exception as e:
print("Querying [{}, {}]: Failed".format(n*chunk_size, min(len(qvariants), n*chunk_size+chunk_size)))
pass
time.sleep(1) # add a delay, to not overload server
gn_dict = {v.id:v for v in gn_results}
def is_sift_high(variant):
return variant in gn_dict and \
len(list(filter(lambda x: x.sift_prediction == 'deleterious', gn_dict[variant].transcript_consequences))) > 0
def is_polyphen_high(variant):
return variant in gn_dict and \
len(list(filter(lambda x: x.polyphen_prediction == 'probably_damaging', gn_dict[variant].transcript_consequences))) > 0
"""
Explanation: Annotate cBioPortal mutations with Genome Nexus
For convenience sake we're using only SNVs here. Eventually there will be an endpoint to help convert pos, ref, alt to the hgvs notation.
End of explanation
"""
snvs['is_sift_high'] = snvs.hgvs_for_gn.apply(is_sift_high)
snvs['is_polyphen_high'] = snvs.hgvs_for_gn.apply(is_polyphen_high)
from matplotlib_venn import venn2
venn2(subsets=((snvs.is_sift_high & (~snvs.is_polyphen_high)).sum(),
(snvs.is_polyphen_high & (~snvs.is_sift_high)).sum(),
(snvs.is_polyphen_high & snvs.is_sift_high).sum()), set_labels=["SIFT","PolyPhen-2"])
plt.title("Variants as predicted to have a high impact in MSK-IMPACT (2017)")
"""
Explanation: Check overlap SIFT/PolyPhen-2
End of explanation
"""
|
austinjalexander/sandbox
|
python/py/NN.ipynb
|
mit
|
# activation function: rectified linear function
def g(a):
np.max(0,a)
"""
Explanation: $\textbf{w}$: connection weights
$b$: neuron bias
$g(\cdot)$: activation function
activation function examples:
linear: $g(a) = a$
sigmoid: $g(a) = \text{sigm}(a) = \frac{1}{1+\text{exp}(-a)} = \frac{1}{1+e^{-a}}$
hyperbolic tanget: $g(a) = \text{tanh}(a) = \frac{\text{exp}(a)-\text{exp}(-a)}{\text{exp}(a)+\text{exp}(-a)} = \frac{\text{exp}(2a)-1}{\text{exp}(2a)+1} = \frac{e^{a}-e^{-a}}{e^{a}+e^{-a}} = \frac{e^{2a}-1}{e^{2a}+1}$
rectified linear function: $g(a) = \text{reclin}(a) = \text{max}(0,a)$
End of explanation
"""
# neuron pre-activation (input)
def a(x):
return b + w.T.dot(x)
"""
Explanation: neuron pre-activation
$a(\textbf{x}) = b + \sum\limits_{i} w_{i}x_{i} = b + \textbf{w}^{T}\textbf{x}$
End of explanation
"""
# neuron activation (output)
def h(x):
return g(a(x))
"""
Explanation: neuron activation
$h(\textbf{x}) = g(a(\textbf{x}))$
End of explanation
"""
|
vadim-ivlev/STUDY
|
algorithms/.ipynb_checkpoints/tutorial_full-checkpoint.ipynb
|
mit
|
import networkx as nx
G = nx.Graph()
G
"""
Explanation: <!-- -*- coding: utf-8 -*- -->
Tutorial
This guide can help you start working with NetworkX.
Creating a graph
Create an empty graph with no nodes and no edges.
End of explanation
"""
G.add_node(1)
"""
Explanation: By definition, a Graph is a collection of nodes (vertices) along with
identified pairs of nodes (called edges, links, etc). In NetworkX, nodes can
be any hashable object e.g., a text string, an image, an XML object, another
Graph, a customized node object, etc.
Nodes
The graph G can be grown in several ways. NetworkX includes many graph
generator functions and facilities to read and write graphs in many formats.
To get started though we’ll look at simple manipulations. You can add one node
at a time,
End of explanation
"""
G.add_nodes_from([2, 3])
"""
Explanation: add a list of nodes,
End of explanation
"""
H = nx.path_graph(10)
G.add_nodes_from(H)
"""
Explanation: or add any iterable container of nodes. You can also add nodes along with node
attributes if your container yields 2-tuples (node, node_attribute_dict).
Node attributes are discussed further below.
End of explanation
"""
G.add_node(H)
"""
Explanation: Note that G now contains the nodes of H as nodes of G.
In contrast, you could use the graph H as a node in G.
End of explanation
"""
G.add_edge(1, 2)
e = (2, 3)
G.add_edge(*e) # unpack edge tuple*
"""
Explanation: The graph G now contains H as a node. This flexibility is very powerful as
it allows graphs of graphs, graphs of files, graphs of functions and much more.
It is worth thinking about how to structure your application so that the nodes
are useful entities. Of course you can always use a unique identifier in G
and have a separate dictionary keyed by identifier to the node information if
you prefer.
Edges
G can also be grown by adding one edge at a time,
End of explanation
"""
G.add_edges_from([(1, 2), (1, 3)])
"""
Explanation: by adding a list of edges,
End of explanation
"""
G.add_edges_from(H.edges)
"""
Explanation: or by adding any ebunch of edges. An ebunch is any iterable
container of edge-tuples. An edge-tuple can be a 2-tuple of nodes or a 3-tuple
with 2 nodes followed by an edge attribute dictionary, e.g.,
(2, 3, {'weight': 3.1415}). Edge attributes are discussed further below
End of explanation
"""
G.clear()
"""
Explanation: There are no complaints when adding existing nodes or edges. For example,
after removing all nodes and edges,
End of explanation
"""
G.add_edges_from([(1, 2), (1, 3)])
G.add_node(1)
G.add_edge(1, 2)
G.add_node("spam") # adds node "spam"
G.add_nodes_from("spam") # adds 4 nodes: 's', 'p', 'a', 'm'
G.add_edge(3, 'm')
"""
Explanation: we add new nodes/edges and NetworkX quietly ignores any that are
already present.
End of explanation
"""
G.number_of_nodes()
G.number_of_edges()
"""
Explanation: At this stage the graph G consists of 8 nodes and 3 edges, as can be seen by:
End of explanation
"""
list(G.nodes)
list(G.edges)
list(G.adj[1]) # or list(G.neighbors(1))
G.degree[1] # the number of edges incident to 1
"""
Explanation: We can examine the nodes and edges. Four basic graph properties facilitate
reporting: G.nodes, G.edges, G.adj and G.degree. These
are set-like views of the nodes, edges, neighbors (adjacencies), and degrees
of nodes in a graph. They offer a continually updated read-only view into
the graph structure. They are also dict-like in that you can look up node
and edge data attributes via the views and iterate with data attributes
using methods .items(), .data('span').
If you want a specific container type instead of a view, you can specify one.
Here we use lists, though sets, dicts, tuples and other containers may be
better in other contexts.
End of explanation
"""
G.edges([2, 'm'])
G.degree([2, 3])
"""
Explanation: One can specify to report the edges and degree from a subset of all nodes
using an nbunch. An nbunch is any of: None (meaning all nodes), a node,
or an iterable container of nodes that is not itself a node in the graph.
End of explanation
"""
G.remove_node(2)
G.remove_nodes_from("spam")
list(G.nodes)
G.remove_edge(1, 3)
"""
Explanation: One can remove nodes and edges from the graph in a similar fashion to adding.
Use methods
Graph.remove_node(),
Graph.remove_nodes_from(),
Graph.remove_edge()
and
Graph.remove_edges_from(), e.g.
End of explanation
"""
G.add_edge(1, 2)
H = nx.DiGraph(G) # create a DiGraph using the connections from G
list(H.edges())
edgelist = [(0, 1), (1, 2), (2, 3)]
H = nx.Graph(edgelist)
"""
Explanation: When creating a graph structure by instantiating one of the graph
classes you can specify data in several formats.
End of explanation
"""
G[1] # same as G.adj[1]
G[1][2]
G.edges[1, 2]
"""
Explanation: What to use as nodes and edges
You might notice that nodes and edges are not specified as NetworkX
objects. This leaves you free to use meaningful items as nodes and
edges. The most common choices are numbers or strings, but a node can
be any hashable object (except None), and an edge can be associated
with any object x using G.add_edge(n1, n2, object=x).
As an example, n1 and n2 could be protein objects from the RCSB Protein
Data Bank, and x could refer to an XML record of publications detailing
experimental observations of their interaction.
We have found this power quite useful, but its abuse
can lead to unexpected surprises unless one is familiar with Python.
If in doubt, consider using convert_node_labels_to_integers() to obtain
a more traditional graph with integer labels.
Accessing edges and neighbors
In addition to the views Graph.edges(), and Graph.adj(),
access to edges and neighbors is possible using subscript notation.
End of explanation
"""
G.add_edge(1, 3)
G[1][3]['color'] = "blue"
G.edges[1, 2]['color'] = "red"
"""
Explanation: You can get/set the attributes of an edge using subscript notation
if the edge already exists.
End of explanation
"""
FG = nx.Graph()
FG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])
for n, nbrs in FG.adj.items():
for nbr, eattr in nbrs.items():
wt = eattr['weight']
if wt < 0.5: print('(%d, %d, %.3f)' % (n, nbr, wt))
"""
Explanation: Fast examination of all (node, adjacency) pairs is achieved using
G.adjacency(), or G.adj.items().
Note that for undirected graphs, adjacency iteration sees each edge twice.
End of explanation
"""
for (u, v, wt) in FG.edges.data('weight'):
if wt < 0.5: print('(%d, %d, %.3f)' % (u, v, wt))
"""
Explanation: Convenient access to all edges is achieved with the edges property.
End of explanation
"""
G = nx.Graph(day="Friday")
G.graph
"""
Explanation: Adding attributes to graphs, nodes, and edges
Attributes such as weights, labels, colors, or whatever Python object you like,
can be attached to graphs, nodes, or edges.
Each graph, node, and edge can hold key/value attribute pairs in an associated
attribute dictionary (the keys must be hashable). By default these are empty,
but attributes can be added or changed using add_edge, add_node or direct
manipulation of the attribute dictionaries named G.graph, G.nodes, and
G.edges for a graph G.
Graph attributes
Assign graph attributes when creating a new graph
End of explanation
"""
G.graph['day'] = "Monday"
G.graph
"""
Explanation: Or you can modify attributes later
End of explanation
"""
G.add_node(1, time='5pm')
G.add_nodes_from([3], time='2pm')
G.nodes[1]
G.nodes[1]['room'] = 714
G.nodes.data()
"""
Explanation: Node attributes
Add node attributes using add_node(), add_nodes_from(), or G.nodes
End of explanation
"""
G.add_edge(1, 2, weight=4.7 )
G.add_edges_from([(3, 4), (4, 5)], color='red')
G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])
G[1][2]['weight'] = 4.7
G.edges[3, 4]['weight'] = 4.2
"""
Explanation: Note that adding a node to G.nodes does not add it to the graph, use
G.add_node() to add new nodes. Similarly for edges.
Edge Attributes
Add/change edge attributes using add_edge(), add_edges_from(),
or subscript notation.
End of explanation
"""
DG = nx.DiGraph()
DG.add_weighted_edges_from([(1, 2, 0.5), (3, 1, 0.75)])
DG.out_degree(1, weight='weight')
DG.degree(1, weight='weight')
list(DG.successors(1))
list(DG.neighbors(1))
"""
Explanation: The special attribute weight should be numeric as it is used by
algorithms requiring weighted edges.
Directed graphs
The DiGraph class provides additional properties specific to
directed edges, e.g.,
DiGraph.out_edges(), DiGraph.in_degree(),
DiGraph.predecessors(), DiGraph.successors() etc.
To allow algorithms to work with both classes easily, the directed versions of
neighbors() is equivalent to successors() while degree reports
the sum of in_degree and out_degree even though that may feel
inconsistent at times.
End of explanation
"""
H = nx.Graph(G) # convert G to undirected graph
"""
Explanation: Some algorithms work only for directed graphs and others are not well
defined for directed graphs. Indeed the tendency to lump directed
and undirected graphs together is dangerous. If you want to treat
a directed graph as undirected for some measurement you should probably
convert it using Graph.to_undirected() or with
End of explanation
"""
MG = nx.MultiGraph()
MG.add_weighted_edges_from([(1, 2, 0.5), (1, 2, 0.75), (2, 3, 0.5)])
dict(MG.degree(weight='weight'))
GG = nx.Graph()
for n, nbrs in MG.adjacency():
for nbr, edict in nbrs.items():
minvalue = min([d['weight'] for d in edict.values()])
GG.add_edge(n, nbr, weight = minvalue)
nx.shortest_path(GG, 1, 3)
"""
Explanation: Multigraphs
NetworkX provides classes for graphs which allow multiple edges
between any pair of nodes. The MultiGraph and
MultiDiGraph
classes allow you to add the same edge twice, possibly with different
edge data. This can be powerful for some applications, but many
algorithms are not well defined on such graphs.
Where results are well defined,
e.g., MultiGraph.degree() we provide the function. Otherwise you
should convert to a standard graph in a way that makes the measurement
well defined.
End of explanation
"""
petersen = nx.petersen_graph()
tutte = nx.tutte_graph()
maze = nx.sedgewick_maze_graph()
tet = nx.tetrahedral_graph()
"""
Explanation: Graph generators and graph operations
In addition to constructing graphs node-by-node or edge-by-edge, they
can also be generated by
Applying classic graph operations, such as:
subgraph(G, nbunch) - induced subgraph view of G on nodes in nbunch
union(G1,G2) - graph union
disjoint_union(G1,G2) - graph union assuming all nodes are different
cartesian_product(G1,G2) - return Cartesian product graph
compose(G1,G2) - combine graphs identifying nodes common to both
complement(G) - graph complement
create_empty_copy(G) - return an empty copy of the same graph class
convert_to_undirected(G) - return an undirected representation of G
convert_to_directed(G) - return a directed representation of G
Using a call to one of the classic small graphs, e.g.,
End of explanation
"""
K_5 = nx.complete_graph(5)
K_3_5 = nx.complete_bipartite_graph(3, 5)
barbell = nx.barbell_graph(10, 10)
lollipop = nx.lollipop_graph(10, 20)
"""
Explanation: Using a (constructive) generator for a classic graph, e.g.,
End of explanation
"""
er = nx.erdos_renyi_graph(100, 0.15)
ws = nx.watts_strogatz_graph(30, 3, 0.1)
ba = nx.barabasi_albert_graph(100, 5)
red = nx.random_lobster(100, 0.9, 0.9)
"""
Explanation: Using a stochastic graph generator, e.g.,
End of explanation
"""
nx.write_gml(red, "path.to.file")
mygraph = nx.read_gml("path.to.file")
"""
Explanation: Reading a graph stored in a file using common graph formats,
such as edge lists, adjacency lists, GML, GraphML, pickle, LEDA and others.
End of explanation
"""
G = nx.Graph()
G.add_edges_from([(1, 2), (1, 3)])
G.add_node("spam") # adds node "spam"
list(nx.connected_components(G))
sorted(d for n, d in G.degree())
nx.clustering(G)
"""
Explanation: For details on graph formats see Reading and writing graphs
and for graph generator functions see Graph generators
Analyzing graphs
The structure of G can be analyzed using various graph-theoretic
functions such as:
End of explanation
"""
sp = dict(nx.all_pairs_shortest_path(G))
sp[3]
"""
Explanation: Some functions with large output iterate over (node, value) 2-tuples.
These are easily stored in a dict structure if you desire.
End of explanation
"""
import matplotlib.pyplot as plt
"""
Explanation: See Algorithms for details on graph algorithms
supported.
Drawing graphs
NetworkX is not primarily a graph drawing package but basic drawing with
Matplotlib as well as an interface to use the open source Graphviz software
package are included. These are part of the networkx.drawing module and will
be imported if possible.
First import Matplotlib’s plot interface (pylab works too)
End of explanation
"""
G = nx.petersen_graph()
plt.subplot(121)
nx.draw(G, with_labels=True, font_weight='bold')
plt.subplot(122)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold')
"""
Explanation: You may find it useful to interactively test code using ipython -pylab,
which combines the power of ipython and matplotlib and provides a convenient
interactive mode.
To test if the import of networkx.drawing was successful draw G using one of
End of explanation
"""
plt.show()
"""
Explanation: when drawing to an interactive display. Note that you may need to issue a
Matplotlib
End of explanation
"""
options = {
'node_color': 'black',
'node_size': 100,
'width': 3,
}
plt.subplot(221)
nx.draw_random(G, **options)
plt.subplot(222)
nx.draw_circular(G, **options)
plt.subplot(223)
nx.draw_spectral(G, **options)
plt.subplot(224)
nx.draw_shell(G, nlist=[range(5,10), range(5)], **options)
"""
Explanation: command if you are not using matplotlib in interactive mode (see
Matplotlib FAQ
).
End of explanation
"""
G = nx.dodecahedral_graph()
shells = [[2, 3, 4, 5, 6], [8, 1, 0, 19, 18, 17, 16, 15, 14, 7], [9, 10, 11, 12, 13]]
nx.draw_shell(G, nlist=shells, **options)
"""
Explanation: You can find additional options via draw_networkx() and
layouts via layout.
You can use multiple shells with draw_shell().
End of explanation
"""
nx.draw(G)
plt.savefig("path.png")
"""
Explanation: To save drawings to a file, use, for example
End of explanation
"""
from networkx.drawing.nx_pydot import write_dot
pos = nx.nx_agraph.graphviz_layout(G)
nx.draw(G, pos=pos)
write_dot(G, 'file.dot')
"""
Explanation: writes to the file path.png in the local directory. If Graphviz and
PyGraphviz or pydot, are available on your system, you can also use
nx_agraph.graphviz_layout(G) or nx_pydot.graphviz_layout(G) to get the
node positions, or write the graph in dot format for further processing.
End of explanation
"""
|
google/lifetime_value
|
notebooks/kaggle_acquire_valued_shoppers_challenge/regression.ipynb
|
apache-2.0
|
import os
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
import tensorflow_probability as tfp
import tqdm
from typing import Sequence
# install and import ltv
!pip install -q git+https://github.com/google/lifetime_value
import lifetime_value as ltv
tfd = tfp.distributions
%config InlineBackend.figure_format='retina'
sns.set_style('whitegrid')
pd.options.mode.chained_assignment = None # default='warn'
"""
Explanation: Lifetime Value prediction for Kaggle Acquire Valued Customer Challenge
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/google/lifetime_value/blob/master/notebooks/kaggle_acquire_valued_shoppers_challenge/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/google/lifetime_value/blob/master/notebooks/kaggle_acquire_valued_shoppers_challenge/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
End of explanation
"""
COMPANY = '103600030' # @param { isTemplate: true, type: 'string'}
LOSS = 'ziln' # @param { isTemplate: true, type: 'string'} ['mse', 'ziln']
MODEL = 'dnn' # @param { isTemplate: true, type: 'string'} ['linear', 'dnn']
LEARNING_RATE = 0.0002 # @param { isTemplate: true}
EPOCHS = 400 # @param { isTemplate: true, type: 'integer'}
OUTPUT_CSV_FOLDER = '/tmp/lifetime-value/kaggle_acquire_valued_shoppers_challenge/result' # @param { isTemplate: true, type: 'string'}
CATEGORICAL_FEATURES = ['chain', 'dept', 'category', 'brand', 'productmeasure']
NUMERIC_FEATURES = ['log_calibration_value']
ALL_FEATURES = CATEGORICAL_FEATURES + NUMERIC_FEATURES
"""
Explanation: Global variables
End of explanation
"""
%%shell
if [ -e /tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions.csv ]
then
echo "File already exists, no need to download."
else
rm -rf /tmp/lifetime-value/acquire-valued-shoppers-challenge
mkdir -p /tmp/lifetime-value/acquire-valued-shoppers-challenge
cd /tmp/lifetime-value/acquire-valued-shoppers-challenge
kaggle competitions download -c acquire-valued-shoppers-challenge
echo "Unzip file. This may take 10 min."
gunzip transactions.csv.gz
fi
"""
Explanation: Data
Download data
Setup kaggle API correctly following https://www.kaggle.com/docs/api
%%shell
mkdir ~/.kaggle
echo \{\"username\":\"{your kaggle username}\",\"key\":\"{your kaggle api key}\"\} > ~/.kaggle/kaggle.json
pip install kaggle
End of explanation
"""
def load_transaction_data(company):
all_data_filename = '/tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions.csv'
one_company_data_filename = (
'/tmp/lifetime-value/acquire-valued-shoppers-challenge/transactions_company_{}.csv'
.format(COMPANY))
if os.path.isfile(one_company_data_filename):
df = pd.read_csv(one_company_data_filename)
else:
data_list = []
chunksize = 10**6
# 350 iterations
for chunk in tqdm.tqdm(pd.read_csv(all_data_filename, chunksize=chunksize)):
data_list.append(chunk.query("company=='{}'".format(company)))
df = pd.concat(data_list, axis=0)
df.to_csv(one_company_data_filename, index=None)
return df
"""
Explanation: Load transaction csv
End of explanation
"""
def preprocess(df):
df = df.query('purchaseamount>0')
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df['start_date'] = df.groupby('id')['date'].transform('min')
# Compute calibration values
calibration_value = (
df.query('date==start_date').groupby('id')
['purchaseamount'].sum().reset_index())
calibration_value.columns = ['id', 'calibration_value']
# Compute holdout values
one_year_holdout_window_mask = (
(df['date'] > df['start_date']) &
(df['date'] <= df['start_date'] + np.timedelta64(365, 'D')))
holdout_value = (
df[one_year_holdout_window_mask].groupby('id')
['purchaseamount'].sum().reset_index())
holdout_value.columns = ['id', 'holdout_value']
# Compute calibration attributes
calibration_attributes = (
df.query('date==start_date').sort_values(
'purchaseamount', ascending=False).groupby('id')[[
'chain', 'dept', 'category', 'brand', 'productmeasure'
]].first().reset_index())
# Merge dataframes
customer_level_data = (
calibration_value.merge(calibration_attributes, how='left',
on='id').merge(
holdout_value, how='left', on='id'))
customer_level_data['holdout_value'] = (
customer_level_data['holdout_value'].fillna(0.))
customer_level_data[CATEGORICAL_FEATURES] = (
customer_level_data[CATEGORICAL_FEATURES].fillna('UNKNOWN'))
# Specify data types
customer_level_data['log_calibration_value'] = (
np.log(customer_level_data['calibration_value']).astype('float32'))
customer_level_data['chain'] = (
customer_level_data['chain'].astype('category'))
customer_level_data['dept'] = (customer_level_data['dept'].astype('category'))
customer_level_data['brand'] = (
customer_level_data['brand'].astype('category'))
customer_level_data['category'] = (
customer_level_data['category'].astype('category'))
customer_level_data['label'] = (
customer_level_data['holdout_value'].astype('float32'))
return customer_level_data
"""
Explanation: Preprocess data
End of explanation
"""
def load_customer_level_csv(company):
customer_level_data_file = (
'/tmp/lifetime-value/acquire-valued-shoppers-challenge/customer_level_data_company_{}.csv'
.format(company))
if os.path.isfile(customer_level_data_file):
customer_level_data = pd.read_csv(customer_level_data_file)
else:
customer_level_data = preprocess(load_transaction_data(company))
for cat_col in CATEGORICAL_FEATURES:
customer_level_data[cat_col] = (
customer_level_data[cat_col].astype('category'))
for num_col in [
'log_calibration_value', 'calibration_value', 'holdout_value'
]:
customer_level_data[num_col] = (
customer_level_data[num_col].astype('float32'))
return customer_level_data
# Processes data. 350 iteration in total. May take 10min.
customer_level_data = load_customer_level_csv(COMPANY)
"""
Explanation: Load customer-level csv
End of explanation
"""
customer_level_data.label.apply(np.log1p).hist(bins=50)
"""
Explanation: We observe a mixture of zero and lognormal distribution of holdout value.
End of explanation
"""
def linear_split(df):
# get_dummies preserves numeric features.
x = pd.get_dummies(df[ALL_FEATURES], drop_first=True).astype('float32').values
y = df['label'].values
y0 = df['calibration_value'].values
x_train, x_eval, y_train, y_eval, y0_train, y0_eval = (
model_selection.train_test_split(
x, y, y0, test_size=0.2, random_state=123))
return x_train, x_eval, y_train, y_eval, y0_eval
def dnn_split(df):
for key in CATEGORICAL_FEATURES:
encoder = preprocessing.LabelEncoder()
df[key] = encoder.fit_transform(df[key])
y0 = df['calibration_value'].values
df_train, df_eval, y0_train, y0_eval = model_selection.train_test_split(
df, y0, test_size=0.2, random_state=123)
def feature_dict(df):
features = {k: v.values for k, v in dict(df[CATEGORICAL_FEATURES]).items()}
features['numeric'] = df[NUMERIC_FEATURES].values
return features
x_train, y_train = feature_dict(df_train), df_train['label'].values
x_eval, y_eval = feature_dict(df_eval), df_eval['label'].values
return x_train, x_eval, y_train, y_eval, y0_eval
"""
Explanation: Make train/eval
End of explanation
"""
def linear_model(output_units):
return tf.keras.experimental.LinearModel(output_units)
def embedding_dim(x):
return int(x**.25) + 1
def embedding_layer(vocab_size):
return tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim(vocab_size),
input_length=1),
tf.keras.layers.Flatten(),
])
def dnn_model(output_units, df):
numeric_input = tf.keras.layers.Input(
shape=(len(NUMERIC_FEATURES),), name='numeric')
embedding_inputs = [
tf.keras.layers.Input(shape=(1,), name=key, dtype=np.int64)
for key in CATEGORICAL_FEATURES
]
embedding_outputs = [
embedding_layer(vocab_size=df[key].nunique())(input)
for key, input in zip(CATEGORICAL_FEATURES, embedding_inputs)
]
deep_input = tf.keras.layers.concatenate([numeric_input] + embedding_outputs)
deep_model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(output_units),
])
return tf.keras.Model(
inputs=[numeric_input] + embedding_inputs, outputs=deep_model(deep_input))
"""
Explanation: Model
End of explanation
"""
if LOSS == 'mse':
loss = keras.losses.MeanSquaredError()
output_units = 1
if LOSS == 'ziln':
loss = ltv.zero_inflated_lognormal_loss
output_units = 3
if MODEL == 'linear':
x_train, x_eval, y_train, y_eval, y0_eval = linear_split(customer_level_data)
model = linear_model(output_units)
if MODEL == 'dnn':
x_train, x_eval, y_train, y_eval, y0_eval = dnn_split(customer_level_data)
model = dnn_model(output_units, customer_level_data)
model.compile(loss=loss, optimizer=keras.optimizers.Adam(lr=LEARNING_RATE))
callbacks = [
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', min_lr=1e-6),
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10),
]
history = model.fit(
x=x_train,
y=y_train,
batch_size=1024,
epochs=EPOCHS,
verbose=2,
callbacks=callbacks,
validation_data=(x_eval, y_eval)).history
pd.DataFrame(history)[['loss', 'val_loss']][2:].plot()
"""
Explanation: Train
End of explanation
"""
if LOSS == 'mse':
y_pred = model.predict(x=x_eval, batch_size=1024).flatten()
if LOSS == 'ziln':
logits = model.predict(x=x_eval, batch_size=1024)
y_pred = ltv.zero_inflated_lognormal_pred(logits).numpy().flatten()
df_pred = pd.DataFrame({
'y_true': y_eval,
'y_pred': y_pred,
})
df_pred.head(10)
"""
Explanation: Eval
End of explanation
"""
gain = pd.DataFrame({
'lorenz': ltv.cumulative_true(y_eval, y_eval),
'baseline': ltv.cumulative_true(y_eval, y0_eval),
'model': ltv.cumulative_true(y_eval, y_pred),
})
num_customers = np.float32(gain.shape[0])
gain['cumulative_customer'] = (np.arange(num_customers) + 1.) / num_customers
ax = gain[[
'cumulative_customer',
'lorenz',
'baseline',
'model',
]].plot(
x='cumulative_customer', figsize=(8, 5), legend=True)
ax.legend(['Groundtruth', 'Baseline', 'Model'], loc='upper left')
ax.set_xlabel('Cumulative Fraction of Customers')
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_xlim((0, 1.))
ax.set_ylabel('Cumulative Fraction of Total Lifetime Value')
ax.set_yticks(np.arange(0, 1.1, 0.1))
ax.set_ylim((0, 1.05))
ax.set_title('Gain Chart')
gini = ltv.gini_from_gain(gain[['lorenz', 'baseline', 'model']])
gini
"""
Explanation: Gini Coefficient
End of explanation
"""
df_decile = ltv.decile_stats(y_eval, y_pred)
df_decile
ax = df_decile[['label_mean', 'pred_mean']].plot.bar(rot=0)
ax.set_title('Decile Chart')
ax.set_xlabel('Prediction bucket')
ax.set_ylabel('Average bucket value')
ax.legend(['Label', 'Prediction'], loc='upper left')
"""
Explanation: Calibration
End of explanation
"""
def spearmanr(x1: Sequence[float], x2: Sequence[float]) -> float:
"""Calculates spearmanr rank correlation coefficient.
See https://docs.scipy.org/doc/scipy/reference/stats.html.
Args:
x1: 1D array_like.
x2: 1D array_like.
Returns:
correlation: float.
"""
return stats.spearmanr(x1, x2, nan_policy='raise')[0]
spearman_corr = spearmanr(y_eval, y_pred)
spearman_corr
"""
Explanation: Rank Correlation
End of explanation
"""
df_metrics = pd.DataFrame(
{
'company': COMPANY,
'model': MODEL,
'loss': LOSS,
'label_mean': y_eval.mean(),
'pred_mean': y_pred.mean(),
'label_positive': np.mean(y_eval > 0),
'decile_mape': df_decile['decile_mape'].mean(),
'baseline_gini': gini['normalized'][1],
'gini': gini['normalized'][2],
'spearman_corr': spearman_corr,
},
index=[0])
df_metrics[[
'company',
'model',
'loss',
'label_mean',
'pred_mean',
'label_positive',
'decile_mape',
'baseline_gini',
'gini',
'spearman_corr',
]]
"""
Explanation: All metrics together
End of explanation
"""
output_path = os.path.join(OUTPUT_CSV_FOLDER, COMPANY)
if not os.path.isdir(output_path):
os.makedirs(output_path)
output_file = os.path.join(output_path,
'{}_regression_{}.csv'.format(MODEL, LOSS))
df_metrics.to_csv(output_file, index=False)
"""
Explanation: Save
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.