repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
mne-tools/mne-tools.github.io
|
0.24/_downloads/772492bca9aff751a357f5e3e0163e67/50_cluster_between_time_freq.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
"""
Explanation: Non-parametric between conditions cluster statistic on single trial power
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
extracting epochs for 2 conditions
compute single trial power estimates
baseline line correct the power estimates (power ratios)
compute stats to see if the power estimates are significantly different
between conditions.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
"""
Explanation: Set parameters
End of explanation
"""
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
"""
Explanation: Factor to downsample the temporal dimension of the TFR computed by
tfr_morlet. Decimation occurs after frequency decomposition and can
be used to reduce memory usage (and possibly comptuational time of downstream
operations such as nonparametric statistics) if you don't need high
spectrotemporal resolution.
End of explanation
"""
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2], out_type='mask',
n_permutations=100, threshold=threshold, tail=0)
"""
Explanation: Compute statistic
End of explanation
"""
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
"""
Explanation: View time-frequency plots
End of explanation
"""
|
PerryGrossman/ds_jr
|
HourofCode2015.ipynb
|
mit
|
# you can also access this directly:
from PIL import Image
im = Image.open("DataScienceProcess.jpg")
im
#path=\'DataScienceProcess.jpg'
#image=Image.open(path)
"""
Explanation: Hour of Code 2015
For Mr. Clifford's Class (5C)
Perry Grossman
December 2015
Introduction
From the Hour of Code to the Power of Co
How to use programming skills for data analysis, or "data science," the new, hot term
<img src="http://www.niemanlab.org/images/drew-conway-data-science-venn-diagram.jpg">
<img src="http://qph.is.quoracdn.net/main-qimg-3504cc03d0a1581096eba9ef97cfd7eb?convert_to_webp=true">
End of explanation
"""
# Comments
# ls list of the files in this folder. See below.
This line will make an error because this line is not python code and this is a code cell.
# Leveraging
#http://localhost:8888/notebooks/Dropbox/Python/Harvard%20SEAS%20Tutorial/python-mastery-isr19-master/1-PythonReview.ipynb
ls # NOT PYTHON! command line
pwd # ALSO NOT PYTHON! Shows what folder you are in.
# math
1+2
4000*3
import math
math.sqrt(2)
2 ** (0.5)
637*532.6
from __future__ import division
1/2
(8+5)*4
# Create a variable
name = 'Perry Grossman'
# Print the variable
name
name[6]
"""
Explanation: Some Basic Things
Leveraging a tutorial by David Beazley, Ian Stokes-Rees and Continuum Analytics
http://localhost:8888/notebooks/Dropbox/Python/Harvard%20SEAS%20Tutorial/python-mastery-isr19-master/1-PythonReview.ipynb
and other resources
End of explanation
"""
from functools import partial
# https://docs.python.org/2/library/functools.html
from random import choice, randint
choice('yes no maybe'.split()) # split is a method
for i in range(10):
print("Call me " + choice('yes no maybe'.split()))
randint(1, 6)
# If you need dice, try this:
roll = partial(randint, 1, 20)
roll()
# how would you make 20 sided dice?
# Create a list of numbers
vals = [3, -8, 2, 7, 6, 2, 5, 12, 4, 9]
#Find the even numbers
evens = []
for v in vals:
if v%2 == 0:
evens.append(v)
#How is this working?
evens
squares = []
for v in vals:
squares.append(v*v)
squares
bigsquares = []
for v in vals:
s = v*v
if s > 10:
bigsquares.append(s)
bigsquares
"""
Explanation: Floor numbering is the numbering scheme used for a building's floors. There are two major schemes in use across the world. In one system, used in the majority of Europe, the ground floor is the floor on the ground and often has no number or is assigned the number zero. Therefore, the next floor up is assigned the number 1 and is the first floor.
The other system, used primarily in the United States and Canada, counts the bottom floor as number 1 or first floor.
https://en.wikipedia.org/wiki/Storey
End of explanation
"""
|
tennem01/pymks_overview
|
notebooks/checker_board.ipynb
|
mit
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Checkerboard Microstructure
Introduction - What are 2-Point Spatial Correlations (also called 2-Point Statistics)?
The purpose of this example is to introduce 2-point spatial correlations and how they are computed using PyMKS.
The example starts with some introductory information about spatial correlations. PyMKS is used to compute both the periodic and non-periodic 2-point spatial correlations (also referred to as 2-point statistics or autocorrelations and crosscorrelations) for a checkerboard microstructure. This is a relatively simple example that allows an easy discussion of how the spatial correlations capture the main features seen in the original microstructure.
n-Point Spatial Correlations
1-Point Spatial Correlations (or 1-point statistics)
n-point spatial correlations provide a way rigorously quantify material structure using statistics. As an introduction n-point spatial correlations, let's first discuss 1-point statistics. 1-point statistics are the probability that a specified local state will be found in any randomly selected spatial bin in a microstructure [1][2][3]. In this checkerboard example discussed here, there are two possible local states, one is colored white and the other is colored black. 1-point statistics compute the volume fractions of the local states in the microstructure. 1-point statistics are computed as
$$ f[l] = \frac{1}{S} \sum_s m[s,l] $$
In this equation, $f[l]$ is the probability of finding the local state $l$ in any randomly selected spatial bin in the microstructure, $m[s, l]$ is the microstructure function (the digital representation of the microstructure), $S$ is the total number of spatial bins in the microstructure and $s$ refers to a specific spatial bin.
While 1-point statistics provide information on the relative amounts of the different local states, it does not provide any information about how those local states are spatially arranged in the microstructure. Therefore, 1-point statistics are a limited set of metrics to describe the structure of materials.
2-Point Spatial Correlations
2-point spatial correlations (also known as 2-point statistics) contain information about the fractions of local states as well as the first order information on how the different local states are distributed in the microstructure.
2-point statistics can be thought of as the probability of having a vector placed randomly in the microstructure and having one end of the vector be on one specified local state and the other end on another specified local state. This vector could have any length or orientation that the discrete microstructure allows. The equation for 2-point statistics can found below.
$$ f[r \vert l, l'] = \frac{1}{S} \sum_s m[s, l] m[s + r, l'] $$
In this equation $ f[r \vert l, l']$ is the conditional probability of finding the local states $l$ and $l'$ at a distance and orientation away from each other defined by the vector $r$. All other variables are the same as those in the 1-point statistics equation. In the case that we have an eigen microstructure function (it only contains values of 0 or 1) and we are using an indicator basis, the the $r=0$ vector will recover the 1-point statistics.
When the 2 local states are the same $l = l'$, it is referred to as a autocorrelation. If the 2 local states are not the same it is referred to as a crosscorrelation.
Higher Order Spatial Statistics
Higher order spatial statistics are similar to 2-point statistics, in that they can be thought of in terms of conditional probabilities of finding specified local states separated by a prescribed set of vectors. 3-point statistics are the probability of finding three specified local states at the ends of a triangle (defined by 2 vectors) placed randomly in the material structure. 4-point statistics describes the probability of finding 4 local states at 4 locations (defined using 3 vectors) and so on.
While higher order statistics are a better metric to quantify the material structure, the 2-point statistics can be computed much faster than higher order spatial statistics, and still provide information about how the local states are distributed. For this reason, only 2-point statistics are implemented into PyMKS. Let us look at an example of computing the 2-point statistics for a checkerboard microstructure.
End of explanation
"""
from pymks.datasets import make_checkerboard_microstructure
square_size =
n_squares =
X = make_checkerboard_microstructure(square_size=square_size, n_squares=n_squares)
"""
Explanation: 2-Point Statistics for Checkerboard Microstructure
Let's first start with making a microstructure that looks like a 8 x 8 checkerboard. Although this type of microstructure may not resemble a physical system, it provides solutions that give some intuitive understanding of 2-point statistics.
We can create a checkerboard microstructure using make_checkerboard_microstructure function from pymks.datasets.
End of explanation
"""
from pymks.tools import draw_microstructures
print X.shape
"""
Explanation: Now let's take a look at how the microstructure looks.
End of explanation
"""
from pymks.stats import autocorrelate
from pymks import PrimitiveBasis
"""
Explanation: Compute Periodic 2-Point Statistics
Now that we have created a microstructure to work with, we can start computing the 2-point statistics. Let's start by looking at the periodic autocorrelations of the microstructure and then compute the periodic crosscorrelation. This can be done using the autocorrelate and crosscorrelate functions from pymks.states, and using the keyword argument periodic_axes to specify the axes that are periodic.
In order to compute 2-pont statistics, we need to select a basis to generate the microstructure function X_ from the microstructure X. Because we only have values of 0 or 1 in our microstructure we will using the PrimitiveBasis with n_states equal to 2.
End of explanation
"""
from pymks.tools import draw_autocorrelations
correlations = [('black', 'black'), ('white', 'white')]
"""
Explanation: We have now computed the autocorrelations.
Let's take a look at them using draw_autocorrelations from pymks.tools.
End of explanation
"""
center = (X_auto.shape[1] + 1) / 2
print 'Volume fraction of black phase', X_auto[0, center, center, 0]
print 'Volume fraction of white phase', X_auto[0, center, center, 1]
"""
Explanation: Notice that for this checkerboard microstructure, the autocorrelation for these 2 local states in the exact same. We have just computed the periodic autocorrelations for a perfectly periodic microstructure with equal volume fractions. In general this is not the case and the autocorrelations will be different as we will see later in this example.
As mentioned in the introduction, because we using an indicator basis and the we have eigen-microstructure functions (values are either 0 or 1), the (0, 0) vector equals the volume fraction.
Let's double check that both the phases have a volume fraction of 0.5.
End of explanation
"""
from pymks.stats import crosscorrelate
"""
Explanation: We can compute the cross-correlation of the microstructure function using the crosscorrelate function from pymks.stats
End of explanation
"""
from pymks.tools import draw_crosscorrelations
correlations = [('black', 'white')]
"""
Explanation: Let's take a look at the cross correlation using draw_crosscorrelations from pymks.tools.
End of explanation
"""
print 'Center value', X_cross[0, center, center, 0]
"""
Explanation: Notice that the crosscorrelation is the exact opposite of the 2 autocorrelations. The (0, 0) vector has a value of 0. This statistic reflects the probablity of 2 phases having the same location. In our microstructure, this probability is zero as we have not allowed the two phases (colored black and white) to co-exist in the same spatial voxel.
Let check that it is zero.
End of explanation
"""
from pymks.stats import correlate
"""
Explanation: Compute Non-Periodic 2-Point Statistics
We will now compute the non-periodic 2-point statistics for our microstructure. This time rather than using the autocorrelate and crosscorrelate functions, we will use the correlate function from pymks.stats. The correlate function computes all of the autocorrelations and crosscorrelations at the same time. We will computed the non-periodic statistics by omitting the keyword argument periodic_axes.
End of explanation
"""
from pymks.tools import draw_correlations
correlations = [('black', 'black'), ('white', 'white'), ('black', 'white')]
"""
Explanation: All or some of the correlations can be viewed using the draw_correlations function from pymks.tools. In this example we will look at all of them.
End of explanation
"""
print 'Volume fraction of black phase', X_corr[0, center, center, 0]
print 'Volume fraction of white phase', X_corr[0, center, center, 1]
"""
Explanation: Notice that the maximum values for the autocorrelations are higher than 0.5. We can still show that the centers or the (0, 0) vectors are still equal to the volume fractions.
End of explanation
"""
|
nick-youngblut/SIPSim
|
ipynb/bac_genome/priming_exp/validation_sample/X12C.700.14.05_fracRichness-moreDif.ipynb
|
mit
|
workDir = '/home/nick/notebook/SIPSim/dev/priming_exp/validation_sample/X12C.700.14_fracRichness-moreDif/'
genomeDir = '/home/nick/notebook/SIPSim/dev/priming_exp/genomes/'
allAmpFrags = '/home/nick/notebook/SIPSim/dev/bac_genome1210/validation/ampFrags.pkl'
otuTableFile = '/var/seq_data/priming_exp/data/otu_table.txt'
metaDataFile = '/var/seq_data/priming_exp/data/allsample_metadata_nomock.txt'
primerFile = '/home/nick/notebook/SIPSim/dev/515F-806R.fna'
cdhit_dir = '/home/nick/notebook/SIPSim/dev/priming_exp/CD-HIT/'
R_dir = '/home/nick/notebook/SIPSim/lib/R/'
figureDir = '/home/nick/notebook/SIPSim/figures/'
# total dataset files
#allAmpFrags = '/home/nick/notebook/SIPSim/dev/bac_genome1210/validation/ampFrags.pkl'
genomeAllDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/genomes/'
genomeAllIndex = '/home/nick/notebook/SIPSim/dev/bac_genome1210/genomes/genome_index.txt'
# simulation params
comm_richness = 6606
seq_per_fraction = ['lognormal', 10.096, 1.116]
# for making genome_map file for genome fragment simulation
taxonMapFile = os.path.join(cdhit_dir, 'target_taxa.txt')
genomeFilterFile = os.path.join(cdhit_dir, 'genomeFile_seqID_filt.txt')
abundFile = os.path.join('/home/nick/notebook/SIPSim/dev/priming_exp/exp_info', 'X12C.700.14_frac_OTU.txt')
# misc
nprocs = 20
"""
Explanation: Running SIPSim pipeline to simulate priming_exp gradient dataset
Basing simulation params off of priming_exp dataset
Basing starting community diversity on mean percent abundances in all fraction samples for the gradient
Other parameters are 'default'
Setting variables
End of explanation
"""
import glob
import cPickle as pickle
import copy
from IPython.display import Image
%load_ext rpy2.ipython
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
if not os.path.isdir(workDir):
os.makedirs(workDir)
"""
Explanation: Init
End of explanation
"""
%%R -i abundFile
# reading priming experiment OTU table
tbl.abund = read.delim(abundFile, sep='\t')
tbl.abund %>% head
%%R
tbl.comm = tbl.abund %>%
rename('taxon_name' = OTUId,
'rel_abund_perc' = mean_perc_abund) %>%
select(taxon_name, rel_abund_perc) %>%
mutate(library = '1',
rank = row_number(-rel_abund_perc)) %>%
arrange(rank)
tbl.comm %>% head
%%R
# rescaling rel_abund_perc so sum(rel_abund_perc) = 100
tbl.comm = tbl.comm %>%
group_by(library) %>%
mutate(total = sum(rel_abund_perc)) %>%
ungroup() %>%
mutate(rel_abund_perc = rel_abund_perc * 100 / total) %>%
select(library, taxon_name, rel_abund_perc, rank)
tbl.comm %>% head
%%R -i comm_richness
# number of OTUs
n.OTUs = tbl.comm$taxon_name %>% unique %>% length
cat('Number of OTUs:', n.OTUs, '\n')
# assertion
cat('Community richness = number of OTUs? ', comm_richness == n.OTUs, '\n')
%%R -i workDir
commFile = paste(c(workDir, 'comm.txt'), collapse='/')
write.table(tbl.comm, commFile, sep='\t', quote=F, row.names=F)
"""
Explanation: Creating a community file from the fraction relative abundances
End of explanation
"""
%%R -i workDir
commFile = paste(c(workDir, 'comm.txt'), collapse='/')
comm = read.delim(commFile, sep='\t')
comm %>% head
%%R -w 900 -h 350
ggplot(comm, aes(rank, rel_abund_perc)) +
geom_point() +
labs(x='Rank', y='% relative abundance', title='Priming experiment community abundance distribution') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Plotting community distribution
End of explanation
"""
%%R -i taxonMapFile -i genomeFilterFile
taxonMap = read.delim(taxonMapFile, sep='\t') %>%
select(target_genome, OTU) %>%
distinct()
taxonMap %>% nrow %>% print
taxonMap %>% head(n=3) %>% print
breaker = '----------------\n'
cat(breaker)
genomeFilter = read.delim(genomeFilterFile, sep='\t', header=F)
genomeFilter %>% nrow %>% print
genomeFilter %>% head(n=3) %>% print
cat(breaker)
comm = read.delim(commFile, sep='\t')
comm %>% nrow %>% print
comm %>% head(n=3) %>% print
%%R
taxonMap$OTU %>% table %>% sort(decreasing=T) %>% head
%%R
tbl.j = inner_join(taxonMap, genomeFilter, c('target_genome' = 'V1')) %>%
rename('fasta_file' = V2) %>%
select(OTU, fasta_file, target_genome)
tbl.j %>% head(n=3)
%%R
tbl.j$OTU %>% table %>% sort(decreasing=T) %>% head
%%R
tbl.j2 = inner_join(tbl.j, comm, c('OTU' = 'taxon_name'))
n.target.genomes = tbl.j2$OTU %>% unique %>% length
cat('Number of target OTUs: ', n.target.genomes, '\n')
cat('--------', '\n')
tbl.j2 %>% head(n=3)
%%R -i workDir
outFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/')
write.table(tbl.j2, outFile, sep='\t', quote=F, row.names=F, col.names=F)
"""
Explanation: Simulating fragments
Making a genome index file to map genome fasta files to OTUs
Will be used for community simulation
Just OTUs with association to genomes
End of explanation
"""
%%R -w 900 -h 350
ggplot(tbl.j2, aes(rank, rel_abund_perc)) +
geom_point(size=3, shape='O', color='red') +
labs(x='Rank', y='% relative abundance', title='Priming experiment community abundance distribution') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Plotting community abundance distribution of target genomes
End of explanation
"""
!cd $workDir; \
SIPSim fragments \
target_genome_index.txt \
--fp $genomeDir \
--fr $primerFile \
--fld skewed-normal,5000,2000,-5 \
--flr None,None \
--nf 10000 \
--np $nprocs \
--tbl \
2> ampFrags.log \
> ampFrags.txt
"""
Explanation: Simulating fragments of genomes that match priming_exp bulk OTUs
End of explanation
"""
%%R -i workDir
inFile = paste(c(workDir, 'ampFrags.txt'), collapse='/')
tbl = read.delim(inFile, sep='\t')
tbl %>% head(n=3)
%%R -w 950 -h 650
some.taxa = tbl$taxon_name %>% unique %>% head(n=20)
tbl.f = tbl %>%
filter(taxon_name %in% some.taxa)
ggplot(tbl.f, aes(fragGC, fragLength)) +
stat_density2d() +
labs(x='Fragment G+C', y='Fragment length (bp)') +
facet_wrap(~ taxon_name, ncol=5) +
theme_bw() +
theme(
text=element_text(size=16),
axis.title.y=element_text(vjust=1)
)
# re-running simulation with pickled file
!cd $workDir; \
SIPSim fragments \
target_genome_index.txt \
--fp $genomeDir \
--fr $primerFile \
--fld skewed-normal,5000,2000,-5 \
--flr None,None \
--nf 10000 \
--np $nprocs \
2> ampFrags.log \
> ampFrags.pkl
"""
Explanation: Plotting fragment length distribution
End of explanation
"""
!cd $workDir; \
SIPSim fragments \
$genomeAllIndex \
--fp $genomeAllDir \
--fr $primerFile \
--fld skewed-normal,5000,2000,-5 \
--flr None,None \
--nf 10000 \
--np $nprocs \
2> ampFragsAll.log \
> ampFragsAll.pkl
ampFragsAllFile = os.path.join(workDir, 'ampFragsAll.pkl')
"""
Explanation: Simulating fragments of total dataset with a greater diffusion
End of explanation
"""
%%R -i workDir
# loading files
## target genome index (just OTUs with associated genome)
inFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/')
tbl.target = read.delim(inFile, sep='\t', header=F)
colnames(tbl.target) = c('OTUId', 'fasta_file', 'genome_name')
## comm file of total community OTUs
commFile = paste(c(workDir, 'comm.txt'), collapse='/')
tbl.comm = read.delim(commFile, sep='\t')
%%R
# just OTUs w/out an associated genome
tbl.j = anti_join(tbl.comm, tbl.target, c('taxon_name' = 'OTUId'))
n.nontarget.genomes = tbl.j$taxon_name %>% length
cat('Number of non-target genomes: ', n.nontarget.genomes, '\n')
cat('---------\n')
tbl.j %>% head(n=5)
%%R -i comm_richness
# checking assumptions
cat('Target + nonTarget richness = total community richness?: ',
n.target.genomes + n.nontarget.genomes == comm_richness, '\n')
%%R -i workDir
# writing out non-target OTU file
outFile = paste(c(workDir, 'comm_nonTarget.txt'), collapse='/')
write.table(tbl.j, outFile, sep='\t', quote=F, row.names=F)
"""
Explanation: Appending fragments from randomly selected genomes of total dataset (n=1210)
This is to obtain the richness of the bulk soil community
Random OTUs will be named after non-target OTUs in comm file
Making list of non-target OTUs
End of explanation
"""
# List of non-target OTUs
inFile = os.path.join(workDir, 'comm_nonTarget.txt')
nonTarget = pd.read_csv(inFile, sep='\t')['taxon_name'].tolist()
print 'Number of non-target OTUs: {}'.format(len(nonTarget))
nonTarget[:4]
# loading amplicon fragments from full genome KDE dataset
inFile = os.path.join(workDir, 'ampFrags.pkl')
ampFrag_target = []
with open(inFile, 'rb') as iFH:
ampFrag_target = pickle.load(iFH)
print 'Target OTU richness: {}'.format(len(ampFrag_target))
# loading amplicon fragments from full genome KDE dataset
ampFrag_all = []
with open(allAmpFrags, 'rb') as iFH:
ampFrag_all = pickle.load(iFH)
print 'Count of frag-GC KDEs for all genomes: {}'.format(len(ampFrag_all))
# random selection from list
#target_richness = len(ampFrag_target)
target_richness = len(ampFrag_target)
richness_needed = comm_richness - target_richness
print 'Number of random taxa needed to reach richness: {}'.format(richness_needed)
if richness_needed > 0:
index = range(target_richness)
index = np.random.choice(index, richness_needed)
ampFrag_rand = []
for i in index:
sys.stderr.write('{},'.format(i))
ampFrag_rand.append(copy.deepcopy(ampFrag_all[i]))
else:
ampFrag_rand = []
# renaming randomly selected KDEs by non-target OTU-ID
for i in range(len(ampFrag_rand)):
ampFrag_rand[i][0] = nonTarget[i]
# appending random taxa to target taxa and writing
outFile = os.path.join(workDir, 'ampFrags_wRand.pkl')
with open(outFile, 'wb') as oFH:
x = ampFrag_target + ampFrag_rand
print 'Number of taxa in output: {}'.format(len(x))
pickle.dump(x, oFH)
"""
Explanation: Randomly selecting amplicon fragment length-GC KDEs from total genome pool
End of explanation
"""
!cd $workDir; \
SIPSim fragment_kde \
ampFrags_wRand.pkl \
> ampFrags_wRand_kde.pkl
"""
Explanation: Converting fragments to kde object
End of explanation
"""
!cd $workDir; \
SIPSim diffusion \
ampFrags_wRand_kde.pkl \
--np $nprocs \
> ampFrags_wRand_kde_dif.pkl
"""
Explanation: Adding diffusion
End of explanation
"""
!cd $workDir; \
SIPSim incorpConfigExample \
--percTaxa 0 \
--percIncorpUnif 100 \
> PT0_PI100.config
"""
Explanation: Making an incorp config file
End of explanation
"""
!cd $workDir; \
SIPSim isotope_incorp \
ampFrags_wRand_kde_dif.pkl \
PT0_PI100.config \
--comm comm.txt \
--np $nprocs \
> ampFrags_wRand_kde_dif_incorp.pkl
"""
Explanation: Adding isotope incorporation to BD distribution
End of explanation
"""
!cd $workDir; \
SIPSim BD_shift \
ampFrags_wRand_kde_dif.pkl \
ampFrags_wRand_kde_dif_incorp.pkl \
--np $nprocs \
> ampFrags_wRand_kde_dif_incorp_BD-shift.txt
"""
Explanation: Calculating BD shift from isotope incorporation
End of explanation
"""
!cd $workDir; \
SIPSim gradient_fractions \
comm.txt \
> fracs.txt
"""
Explanation: Simulating gradient fractions
End of explanation
"""
!cd $workDir; \
SIPSim OTU_table \
ampFrags_wRand_kde_dif_incorp.pkl \
comm.txt \
fracs.txt \
--abs 1e9 \
--np $nprocs \
> OTU_abs1e9.txt
"""
Explanation: Simulating an OTU table
End of explanation
"""
%%R -i workDir
setwd(workDir)
# loading file
tbl = read.delim('OTU_abs1e9.txt', sep='\t')
%%R
## BD for G+C of 0 or 100
BD.GCp0 = 0 * 0.098 + 1.66
BD.GCp100 = 1 * 0.098 + 1.66
%%R -w 800 -h 300
# plotting absolute abundances
tbl.s = tbl %>%
group_by(library, BD_mid) %>%
summarize(total_count = sum(count))
## plot
p = ggplot(tbl.s, aes(BD_mid, total_count)) +
geom_area(stat='identity', alpha=0.3, position='dodge') +
geom_histogram(stat='identity') +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16)
)
p
%%R -w 800 -h 300
# plotting number of taxa at each BD
tbl.nt = tbl %>%
filter(count > 0) %>%
group_by(library, BD_mid) %>%
summarize(n_taxa = n())
## plot
p = ggplot(tbl.nt, aes(BD_mid, n_taxa)) +
geom_area(stat='identity', alpha=0.3, position='dodge') +
geom_histogram(stat='identity') +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Number of taxa') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -w 800 -h 250
# plotting relative abundances
## plot
p = ggplot(tbl, aes(BD_mid, count, fill=taxon)) +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p + geom_area(stat='identity', position='dodge', alpha=0.5)
%%R -w 800 -h 250
p + geom_area(stat='identity', position='fill')
"""
Explanation: Plotting taxon abundances
End of explanation
"""
dist,loc,scale = seq_per_fraction
!cd $workDir; \
SIPSim OTU_subsample \
--dist $dist \
--dist_params mean:$loc,sigma:$scale \
--walk 2 \
--min_size 10000 \
--max_size 200000 \
OTU_abs1e9.txt \
> OTU_abs1e9_sub.txt
"""
Explanation: Subsampling from the OTU table
End of explanation
"""
%%R -h 300 -i workDir
setwd(workDir)
tbl = read.csv('OTU_abs1e9_sub.txt', sep='\t')
tbl.s = tbl %>%
group_by(library, fraction) %>%
summarize(total_count = sum(count)) %>%
ungroup() %>%
mutate(library = as.character(library))
ggplot(tbl.s, aes(total_count)) +
geom_density(fill='blue')
%%R -h 300 -w 600
setwd(workDir)
tbl.s = tbl %>%
group_by(fraction, BD_min, BD_mid, BD_max) %>%
summarize(total_count = sum(count))
ggplot(tbl.s, aes(BD_mid, total_count)) +
geom_point() +
geom_line() +
labs(x='Buoyant density', y='Total sequences') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Testing/Plotting seq count distribution of subsampled fraction samples
End of explanation
"""
%%R -i workDir
inFile = paste(c(workDir, 'target_genome_index.txt'), collapse='/')
tbl.target = read.delim(inFile, sep='\t', header=F)
colnames(tbl.target) = c('OTUId', 'genome_file', 'genome_ID', 'X', 'Y', 'Z')
tbl.target = tbl.target %>% distinct(OTUId)
cat('Number of target OTUs: ', tbl.target$OTUId %>% unique %>% length, '\n')
cat('----------\n')
tbl.target %>% head(n=3)
"""
Explanation: Getting list of target taxa
End of explanation
"""
%%R -w 800 -h 250
# plotting relative abundances
tbl = tbl %>%
group_by(fraction) %>%
mutate(rel_abund = count / sum(count))
## plot
p = ggplot(tbl, aes(BD_mid, count, fill=taxon)) +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p + geom_area(stat='identity', position='dodge', alpha=0.5)
%%R -w 800 -h 250
p = ggplot(tbl, aes(BD_mid, rel_abund, fill=taxon)) +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p + geom_area(stat='identity')
"""
Explanation: Plotting abundance distributions
End of explanation
"""
%%R
targets = tbl.target$OTUId %>% as.vector %>% unique
tbl.f = tbl %>%
filter(taxon %in% targets)
tbl.f %>% head
%%R -w 800 -h 250
# plotting absolute abundances
## plot
p = ggplot(tbl.f, aes(BD_mid, count, fill=taxon)) +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p + geom_area(stat='identity', position='dodge', alpha=0.5)
%%R -w 800 -h 250
# plotting relative abundances
p = ggplot(tbl.f, aes(BD_mid, rel_abund, fill=taxon)) +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p + geom_area(stat='identity')
"""
Explanation: Abundance distribution of just target taxa
End of explanation
"""
%%R -i metaDataFile
# loading priming_exp metadata file
meta = read.delim(metaDataFile, sep='\t')
meta %>% head(n=4)
%%R -i otuTableFile
# loading priming_exp OTU table
tbl.otu.true = read.delim(otuTableFile, sep='\t') %>%
select(OTUId, starts_with('X12C.700.14'))
tbl.otu.true %>% head(n=3)
%%R
# editing table
tbl.otu.true.w = tbl.otu.true %>%
gather('sample', 'count', 2:ncol(tbl.otu.true)) %>%
mutate(sample = gsub('^X', '', sample)) %>%
group_by(sample) %>%
mutate(rel_abund = count / sum(count)) %>%
ungroup() %>%
filter(count > 0)
tbl.otu.true.w %>% head(n=5)
%%R
tbl.true.j = inner_join(tbl.otu.true.w, meta, c('sample' = 'Sample'))
tbl.true.j %>% as.data.frame %>% head(n=3)
%%R -w 800 -h 300 -i workDir
# plotting number of taxa at each BD
tbl = read.csv('OTU_abs1e9_sub.txt', sep='\t')
tbl.nt = tbl %>%
filter(count > 0) %>%
group_by(library, BD_mid) %>%
summarize(n_taxa = n())
## plot
p = ggplot(tbl.nt, aes(BD_mid, n_taxa)) +
geom_area(stat='identity', alpha=0.5) +
geom_point() +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Number of taxa') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -w 700 -h 350
tbl.true.j.s = tbl.true.j %>%
filter(count > 0) %>%
group_by(sample, Density) %>%
summarize(n_taxa = sum(count > 0))
ggplot(tbl.true.j.s, aes(Density, n_taxa)) +
geom_area(stat='identity', alpha=0.5) +
geom_point() +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Number of taxa') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
"""
Explanation: Plotting 'true' taxon abundance distribution (from priming exp dataset)
End of explanation
"""
%%R -h 300 -w 600
tbl.true.j.s = tbl.true.j %>%
group_by(sample, Density) %>%
summarize(total_count = sum(count))
ggplot(tbl.true.j.s, aes(Density, total_count)) +
geom_point() +
geom_line() +
labs(x='Buoyant density', y='Total sequences') +
theme_bw() +
theme(
text = element_text(size=16)
)
"""
Explanation: Plotting total counts for each sample
End of explanation
"""
%%R
tbl.true.j.f = tbl.true.j %>%
filter(OTUId %in% targets) %>%
arrange(OTUId, Density) %>%
group_by(sample)
tbl.true.j.f %>% head(n=3) %>% as.data.frame
%%R -w 800 -h 250
# plotting relative abundances
## plot
ggplot(tbl.true.j.f, aes(Density, rel_abund, fill=OTUId)) +
geom_area(stat='identity') +
geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
"""
Explanation: Plotting abundance distribution of target OTUs
End of explanation
"""
%%R
tbl.f.e = tbl.f %>%
mutate(library = 'simulation') %>%
rename('density' = BD_mid) %>%
select(-BD_min, -BD_max)
tbl.true.e = tbl.true.j.f %>%
select('taxon' = OTUId,
'fraction' = sample,
'density' = Density,
count, rel_abund) %>%
mutate(library = 'true')
tbl.sim.true = rbind(tbl.f.e, tbl.true.e) %>% as.data.frame
tbl.f.e = data.frame()
tbl.true.e = data.frame()
tbl.sim.true %>% head(n=3)
%%R
# check
cat('Number of target taxa: ', tbl.sim.true$taxon %>% unique %>% length, '\n')
"""
Explanation: Combining true and simulated OTU tables for target taxa
End of explanation
"""
%%R -w 900 -h 3500
tbl.sim.true.f = tbl.sim.true %>%
ungroup() %>%
filter(density >= 1.6772) %>%
filter(density <= 1.7603) %>%
group_by(taxon) %>%
mutate(mean_rel_abund = mean(rel_abund)) %>%
ungroup()
tbl.sim.true.f$taxon = reorder(tbl.sim.true.f$taxon, -tbl.sim.true.f$mean_rel_abund)
ggplot(tbl.sim.true.f, aes(density, rel_abund, color=library)) +
geom_point() +
geom_line() +
theme_bw() +
facet_wrap(~ taxon, ncol=4, scales='free_y')
"""
Explanation: Abundance distributions of each target taxon
End of explanation
"""
|
lisa-1010/smart-tutor
|
code/test_drqn.ipynb
|
mit
|
data = d_utils.load_data(filename="../synthetic_data/test-n10000-l3-random.pickle")
dqn_data = d_utils.preprocess_data_for_dqn(data, reward_model="dense")
# Single Trace
print (dqn_data[0])
# First tuple in a trace
s,a,r,sp = dqn_data[0][0]
print (s)
print (a)
print (r)
print (sp)
# Last tuple
s,a,r,sp = dqn_data[0][-1]
print (s)
print (a)
print (r)
print (sp)
dqn_data_train, dqn_data_test = train_test_split(dqn_data, test_size=0.2)
"""
Explanation: Preprocessing Data for DRQN
We take the data from data generator and save them into traces of (s,a,r,sp) tuples.
Each trajectory corresponds to a trace.
If trajectory has length n, then trace will have length n-1. (since we need the next state sp)
End of explanation
"""
model_id = "test_model_drqn"
# Create the model object
model = drqn.DRQNModel(model_id, timesteps=2)
# Initialize trainer object inside the model
model.init_trainer()
# Creating training and validation data
train_buffer = ExperienceBuffer()
train_buffer.buffer = dqn_data_train
train_buffer.buffer_sz = len(train_buffer.buffer)
val_buffer = ExperienceBuffer()
val_buffer.buffer = dqn_data_test
val_buffer.buffer_sz = len(val_buffer.buffer)
# train the model (uses the previously initialized trainer object)
date_time_string = datetime.datetime.now().strftime("%m-%d-%Y_%H-%M-%S")
run_id = "{}".format(date_time_string)
model.train(train_buffer, val_buffer, n_epoch=2,
run_id=run_id, load_checkpoint=True)
# init evaluator of the model
model.init_evaluator()
# Create inputs (states / observations so far) to use for predictions
from drqn import stack_batch
train_batch = train_buffer.sample_in_order(4)
# make sure that batches are over multiple timesteps, should be of shape (batch_sz, n_timesteps, ?)
s_batch_train = stack_batch(train_batch[:, :, 0]) # current states
# Use model to predict next action
actions, q_vals = model.predict(s_batch_train, last_timestep_only=True)
q_vals
actions
# if we want to predict on data with different number of timesteps then we trained on,
# create a new model but using the same checkpoint
eval_model = drqn.DRQNModel(model_id, timesteps=10)
eval_model.init_evaluator()
# now the internal RNN will be unrolled over 10 timesteps.
# You can still pass in inputs that have fewer than 10, in which case remaining timesteps will be padded.
eval_model.predict(s_batch_train, last_timestep_only=True)
"""
Explanation: Creating a DRQN model and training it
End of explanation
"""
from drqn_tests import *
n_trajectories = 10
n_concepts = 5
horizon = 6
model_id = "test_model_drqn"
from simple_mdp import create_custom_dependency
dgraph = create_custom_dependency()
test_model = drqn.DRQNModel(model_id=model_id, timesteps=horizon)
test_model.init_evaluator()
learn_prob = 0.15
student = st.Student(n=n_concepts, p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0)
k = test_drqn_single(dgraph, student, horizon, test_model, DEBUG=True)
k
test_drqn_chunk(n_trajectories, dgraph, student, model_id, horizon)
"""
Explanation: Testing the model
End of explanation
"""
test_drqn(model_id=model_id)
"""
Explanation: Final Test Function:
End of explanation
"""
n_concepts = 4
use_student2 = True
student2_str = '2' if use_student2 else ''
learn_prob = 0.15
lp_str = '-lp{}'.format(int(learn_prob*100)) if not use_student2 else ''
n_students = 100000
seqlen = 7
filter_mastery = False
filter_str = '' if not filter_mastery else '-filtered'
policy = 'random'
filename = 'test{}-n{}-l{}{}-{}{}.pickle'.format(student2_str, n_students, seqlen,
lp_str, policy, filter_str)
#concept_tree = sm.create_custom_dependency()
concept_tree = cdg.ConceptDependencyGraph()
concept_tree.init_default_tree(n_concepts)
if not use_student2:
test_student = st.Student(n=n_concepts,p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0)
else:
test_student = st.Student2(n_concepts)
print(filename)
print ("Initializing synthetic data sets...")
dg.generate_data(concept_tree, student=test_student, n_students=n_students, filter_mastery=filter_mastery, seqlen=seqlen, policy=policy, filename="{}{}".format(dg.SYN_DATA_DIR, filename))
print ("Data generation completed. ")
data = d_utils.load_data(filename="../synthetic_data/{}".format(filename))
dqn_data = d_utils.preprocess_data_for_dqn(data, reward_model="dense")
dqn_data_train, dqn_data_test = train_test_split(dqn_data, test_size=0.2)
# Creating training and validation data
train_buffer = ExperienceBuffer()
train_buffer.buffer = dqn_data_train
train_buffer.buffer_sz = len(train_buffer.buffer)
val_buffer = ExperienceBuffer()
val_buffer.buffer = dqn_data_test
val_buffer.buffer_sz = len(val_buffer.buffer)
"""
Explanation: General Workflow
1. Create Data Set
End of explanation
"""
model_id = "test2_model_drqn_mid"
model = drqn.DRQNModel(model_id, timesteps=seqlen-1)
model.init_trainer()
# train the model (uses the previously initialized trainer object)
date_time_string = datetime.datetime.now().strftime("%m-%d-%Y_%H-%M-%S")
run_id = "{}".format(date_time_string)
model.train(train_buffer, val_buffer, n_epoch=32,
run_id=run_id, load_checkpoint=True)
"""
Explanation: 2. Create Model and Train
End of explanation
"""
test_drqn(model_id=model_id)
"""
Explanation: 3. Test Model in "real world" and calculate post test scores
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/mohc/cmip6/models/hadgem3-gc31-hh/ocean.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-hh', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: MOHC
Source ID: HADGEM3-GC31-HH
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:14
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
ecabreragranado/OpticaFisicaII
|
Trabajo Filtro Interferencial/.ipynb_checkpoints/TrabajoFiltrosweb-checkpoint.ipynb
|
gpl-3.0
|
from IPython.core.display import Image
Image("http://upload.wikimedia.org/wikipedia/commons/thumb/2/28/IEC60825_MPE_W_s.png/640px-IEC60825_MPE_W_s.png")
"""
Explanation: TRABAJO PROPUESTO SOBRE FILTROS INTERFERENCIALES
Consultar el manual de uso de los cuadernos interactivos (notebooks) que se encuentra disponible en el Campus Virtual
Grupo de trabajo
En esta celda los integrantes del grupo: modificar el texto
Juan Antonio Fernández
Alberto Pérez
Juan
Incluir las direcciones de correo electrónico
Introducción
El trabajo consiste en encontrar un filtro interferencial comercial que sirva para
proteger el ojo de la radiación visible de un puntero láser de alta potencia. El trabajo
se divide en las siguientes tareas:
Tarea 1. Exposición máxima permisible (MPE)
La exposicióm máxima permisible (MPE maximum permissible exposure) es la máxima densidad de potencia
o de energía (W/cm$^2$ o J/cm$^2$) de un haz de luz que puede alcanzar el ojo humano sin producir daño.
La MPE se mide en la córnea, y depende de la longitud de onda de la radiación y del tiempo de exposición.
En la siguiente figura se muestra la MPE en la córnea (en unidades de irradiancia (W/cm$^2$)) en función
del tiempo de exposición para distintos rangos del espectro electromagnético.
Figura de http://en.wikipedia.org/wiki/Laser_safety
End of explanation
"""
####
# Parámetros a modificar. INICIO
####
web_laser = 'http://www.punterolaser.com' # Incluir la dirección de la página web
web_anchura = '1100' # Valor en pixeles de la anchura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
web_altura = '800' # Valor en pixeles de la altura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
texto_web_laser='<iframe src= '+web_laser+' width='+web_anchura+'px, height='+web_altura+'px>'
from IPython.display import HTML
HTML(texto_web_laser)
"""
Explanation: Tarea 1 (a). Irradiancia máxima
Como estamos considerando el haz láser de un puntero que emite
en el visible, como tiempo de exposición emplearemos el tiempo que
se tarda en cerrar el párpado. Así con este tiempo de exposición
estimar de la gráfica la irradiancia máxima que puede alcanzar el
ojo.
Escribir el tiempo de exposición empleado y el correspondiente valor de la irradiancia.
Tiempo de exposición (parpadeo) = s
Irradiancia máxima permisible = W/cm$^2$
Tarea 1 (b). Potencia máxima
Vamos a considerar que el haz que alcanza nuestro ojo está colimado
con un tamaño equivalente al de nuestra pupila. Empleando dicho
tamaño calcular la potencia máxima que puede alcanzar nuestro ojo
sin provocar daño.
Escribir el tamaño de la pupila considerado, las operaciones y el resultado final de la potencia (en mW)
Diámetro o radio de la pupila = mm
Cálculos intermedios
Potencia máxima permisible = mW
Tarea 2. Elección del puntero láser
Buscar en internet información sobre un
puntero láser visible que sea de alta potencia.
Verificar que dicho puntero láser puede provocar daño ocular (teniendo en cuenta el resultado de la Tarea 1 (b))
Escribir aquí las características técnicas de dicho láser (potencia,
longitud de onda, etc.) y el precio. Incluir referencia sobre la
página web http:\
Vamos a incrustar en el notebook la página web empleada. Para ello escribimos la dirección de la página web en la celda de código siguiente.
End of explanation
"""
####
# Parámetros a modificar. INICIO
####
web_filtro = 'http://www.semrock.com/FilterDetails.aspx?id=LP02-224R-25' # Incluir la dirección de la página web
web_anchura = '1100' # Valor en pixeles de la anchura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
web_altura = '800' # Valor en pixeles de la altura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
texto_web_filtro='<iframe src= '+web_filtro+' width='+web_anchura+'px, height='+web_altura+'px>'
from IPython.display import HTML
HTML(texto_web_filtro)
"""
Explanation: Tarea 3. Elección del filtro interferencial
Vamos a buscar en internet un filtro interferencial
comercial que permita evitar el riesgo de daño ocular para el
puntero láser seleccionado. Se tratará de un filtro que bloquee
la longitud de onda del puntero láser.
Tarea 3 (a). Busqueda e información del filtro interferencial
Vamos a emplear la información accesible en la casa Semrock ( http://www.semrock.com/filters.aspx )
Seleccionar en esta página web un filtro adecuado. Pinchar sobre cada filtro (sobre la curva de transmitancia,
sobre el Part Number, o sobre Show Product Detail) para obtener más información. Escribir aquí
las características más relevantes del filtro seleccionado: transmitancia T, absorbancia o densidad óptica OD,
rango de longitudes de onda, precio, etc..
Vamos a incrustar en el notebook la página web con la información detallada del filtro seleccionado. Para ello escribimos
la dirección de dicha página web en la celda de código siguiente.
End of explanation
"""
%pylab inline
####
# Parámetros a modificar. INICIO
####
longitud_de_onda_laser = 530 # Incluir el valor de la longitud de onda del puntero láser seleccionado (en nm)
# Pintamos la gráfica original y un zoom empleando el rango de valores siguientes (para ver mejor la zona deseada)
longitud_de_onda_minina = 500 # Incluir el valor de la longitud de onda mímina (en nm) para hacer zoom
longitud_de_onda_maxima = 670 # Incluir el valor de la longitud de onda maxima (en nm) para hacer zoom
transmitancia_minina = 1e-8 # Incluir el valor de la transmitancia mímina para hacer zoom
transmitancia_maxima = 1 # Incluir el valor de la transmitancia máxima para hacer zoom
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
from numpy.core.defchararray import find
indice_igual=find(web_filtro,'=')
codigoID = web_filtro[indice_igual+1:-3]
Codigo_Filtro = codigoID
filename = 'http://www.semrock.com/_ProductData/Spectra/'+Codigo_Filtro+'_Spectrum.txt' # Dirección del fichero de datos
data=genfromtxt(filename,dtype=float,skip_header=4) # Carga los datos
longitud_de_onda=data[:,0];transmitancia=data[:,1];
figure(figsize(13,6))
subplot(1,2,1)
semilogy(longitud_de_onda,transmitancia)
xlabel('$\lambda$ (nm)');ylabel('T');title('Curva original')
subplot(1,2,2)
semilogy(longitud_de_onda,transmitancia)
xlabel('$\lambda$ (nm)');ylabel('T');title('Zoom')
axis([longitud_de_onda_minina, longitud_de_onda_maxima, transmitancia_minina, transmitancia_maxima]);
from scipy.interpolate import interp1d
f_transm = interp1d(data[:,0],data[:,1])
transm_para_laser = f_transm(longitud_de_onda_laser)
print "Transmitancia para la longitud de onda del puntero láser"
print transm_para_laser
"""
Explanation: Tarea 3 (b). Verificación del filtro
Empleando el dato de la transmitancia (T) a la longitud de onda del
puntero láser comprobar que dicho filtro evitará el riesgo de
lesión.
Para ello vamos a usar los datos de la transmitancia del filtro seleccionado
que aparecen en la página web de Semrock. Para cargar dichos datos en nuestro notebook se emplea
el código que identifica a dicho filtro y que se obtiene automáticamente de la dirección de la
página web seleccionada en el apartado anterior (Tarea 3(a)).
En la siguiente celda de código se representa la transmitancia del filtro en escala logarítmica
en función de la longitud de onda (en nm). A la izquierda se muestra la curva original obtenida
de Semrock y a la derecha se muestra un zoom de la misma en la región que nosotros elijamos.
Esta gráfica nos permite obtener el valor de la transmitancia a la longitud de onda de nuestro
puntero láser, por ello debemos escribir el valor de dicha longitud de onda en el código, y el
se encargará de calcular la transmitancia. El resultado aparece en la parte superior de las
gráficas.
End of explanation
"""
|
cing/rapwords
|
RapWordsTalk.ipynb
|
mit
|
import pandas as pd
import numpy as np
import glob
import re
from collections import defaultdict
"""
Explanation: Word! Automating a Hip-hop word of the day blog
Chris Ing, @jsci http://rapwords.tumblr.com (Soon: https://github.com/cing/rapwords/)
Requirements
standard library (re, glob, collections, html)
pandas (http://pandas.pydata.org/), numpy
wiktionaryparser (https://github.com/Suyash458/WiktionaryParser)
spotipy (https://github.com/plamere/spotipy) / Google Data API (https://github.com/google/google-api-python-client)
nltk (https://github.com/nltk/nltk)
pypronouncing (https://github.com/aparrish/pronouncingpy)
pytumblr (Python3 fork) (https://github.com/jabbalaci/pytumblr) / oauthlib / oauthlib_requests
End of explanation
"""
from html.parser import HTMLParser
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.strict = False
self.convert_charrefs= True
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
df_data = defaultdict(list)
for filename in glob.iglob('Lyrics/ohhla.com/*/*/*/*.txt', recursive=True):
with open(filename, 'r', encoding = "ISO-8859-1") as f:
stripped_lyrics = strip_tags(f.read())
artist = re.search('Artist:\s*(.*)\s*\n', stripped_lyrics)
song = re.search('Song:\s*(.*)\s*\n', stripped_lyrics)
lyrics = re.search('Typed by:\s*(.*)\s*\n([\s\S]*)', stripped_lyrics)
if artist is not None and song is not None and lyrics is not None:
df_data["filename"].append(filename)
df_data["artist"].append(artist.group(1))
df_data["song"].append(song.group(1))
df_data["lyrics"].append(lyrics.group(2).lower()) # group(1) is the transcriber
rap_data = pd.DataFrame(df_data)
rap_data.iloc[105:120]
rap_data.shape
"""
Explanation: Loading Lyrics into Memory
End of explanation
"""
for artist, song, lyrics in zip(df_data["artist"],
df_data["song"],
df_data["lyrics"]):
if "python" in set(lyrics.split()):
print(artist, " - ", song)
for artist, song, lyrics in zip(df_data["artist"],
df_data["song"],
df_data["lyrics"]):
if "anaconda" in set(lyrics.split()):
print(artist, " - ", song)
"""
Explanation: Checking Words
End of explanation
"""
# The 1/3 million most frequent words, all lowercase, with counts.
# http://norvig.com/ngrams/
ngrams=pd.read_csv("assets/count_1w.txt",sep="\t",names=["word","count"])
ngrams.tail(n=20)
# The Tournament Word List (178,690 words) -- used by North American Scrabble players.
# http://norvig.com/ngrams/
twl_wordlist=pd.read_csv("assets/TWL06.txt",names=["word"])
twl_wordlist=pd.DataFrame(twl_wordlist["word"].str.lower())
twl_w_ngrams = pd.merge(twl_wordlist, ngrams)
twl_w_ngrams.sort_values(by="count").head(n=20)
np.sum(twl_w_ngrams["count"] < 400000)
all_bigwords = twl_w_ngrams[twl_w_ngrams["count"] < 400000]["word"].values
all_big_bigwords = [word for word in all_bigwords if len(word) > 4]
bigset = set(all_big_bigwords)
print(all_big_bigwords[:100])
df_data["matches"] = []
for lyrics in df_data["lyrics"]:
found_words = set(lyrics.split()) & bigset
df_data["matches"].append(found_words)
rap_data = pd.DataFrame(df_data)
rap_data = rap_data[rap_data.matches != set()]
rap_data.head(n=20)
"""
Explanation: Finding Rare Words
End of explanation
"""
from collections import Counter
rap_onegrams = Counter()
for lyrics in df_data["lyrics"]:
rap_onegrams.update(lyrics.split())
rap_onegrams_df = pd.DataFrame.from_dict(rap_onegrams, orient='index').reset_index()
rap_onegrams_df.columns = ["word","count"]
rap_onegrams_df.sort_values(by="count").tail(n=20)
raptwl_df = pd.merge(twl_wordlist, rap_onegrams_df, on="word")
raptwl_df.head()
all_bigrapwords = set(raptwl_df[raptwl_df["count"] < 5]["word"].values)
len(all_bigrapwords)
df_data["rap_matches"] = []
for lyrics in df_data["lyrics"]:
found_words = set(lyrics.split()) & all_bigrapwords
df_data["rap_matches"].append(found_words)
rap_data = pd.DataFrame(df_data)
rap_data = rap_data[rap_data.rap_matches != set()]
rap_data
#rap_data[rap_data["artist"].str.startswith("Kanye")]
#rap_data[rap_data["artist"].str.startswith("Wu-Tang") & rap_data["filename"].str.contains("enter")]
rap_data[rap_data["artist"].str.startswith("Nas") & rap_data["filename"].str.contains("illmatic")]
# Search and replace 23375 to 1173 in the lyrics subset on Github!
print(rap_data.loc[23375]["matches"])
print(rap_data.loc[23375]["rap_matches"])
word = "trifle"
"""
Explanation: Finding Rare Rap Words
End of explanation
"""
import spotipy
sp = spotipy.Spotify()
def test_track_search(sp, search_str):
results = sp.search(q=search_str, type='track', limit=1)
if len(results['tracks']['items']) > 0:
print(results['tracks']['items'][0]['artists'][0]['name']," - ",
results['tracks']['items'][0]['name'],
results['tracks']['items'][0]['popularity'],)
else:
print("")
test_track_search(sp, 'Nas Memory Lane')
"""
Explanation: Finding 'Good' Songs
End of explanation
"""
# Activate Google Data API
DEVELOPER_KEY = ""
from apiclient.discovery import build
from datetime import datetime
def youtube_search(q):
youtube = build("youtube", "v3", developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q, type="video",
part="id,snippet", maxResults=1
).execute()
for search_result in search_response.get("items", []):
if search_result is not None:
video_id = search_result["id"]["videoId"]
date_posted = search_result["snippet"]["publishedAt"]
results = youtube.videos().list(
part="statistics", id=video_id
).execute()
return (video_id,
float(results["items"][0]["statistics"]["viewCount"]),
float((datetime.now()-datetime.strptime(date_posted, "%Y-%m-%dT%H:%M:%S.000Z")).days))
youtube_data = youtube_search("Nas Memory Lane")
print(youtube_data)
from IPython.display import HTML
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/JXBFG2vsyCM?rel=0&controls=0&showinfo=0" frameborder="0" allowfullscreen></iframe>')
"""
Explanation: Finding 'Good' Songs with YouTube
End of explanation
"""
from wiktionaryparser import WiktionaryParser
def get_definition(word):
parser = WiktionaryParser()
worddef = parser.fetch(word)
possible_defs = []
for entymologies in worddef:
for dd in entymologies["definitions"]:
all_defs = re.sub(word+"\s*\u200e","",
dd['text']).strip().split("\n")
all_gdefs = [d for d in all_defs if re.match("^\(.*\)$",d) == None]
possible_defs.append((dd['partOfSpeech'],
all_gdefs))
return possible_defs
get_definition("racket")
get_definition("trifle")
"""
Explanation: Getting Definitions
End of explanation
"""
lyrics = rap_data.loc[23375]["lyrics"]
rap_sentence = [line for line in lyrics.split("\n") if word in line][0]
print(rap_sentence)
import nltk
split_sentence = nltk.word_tokenize(rap_sentence)
tagged_sentence = nltk.pos_tag(split_sentence,tagset="universal")
print(tagged_sentence)
"""
Explanation: Get Definitions from Context
End of explanation
"""
rap_sentence = "word to christ, a disciple of streets, I trifle on beats"
split_sentence = nltk.word_tokenize(rap_sentence)
tagged_sentence = nltk.pos_tag(split_sentence,tagset="universal")
print(tagged_sentence)
def get_definition_with_sentence(word, rap_sentence):
split_sentence = nltk.word_tokenize(rap_sentence)
tagged_sentence = nltk.pos_tag(split_sentence,
tagset="universal")
index_of_word = split_sentence.index(word)
pos_of_word = tagged_sentence[index_of_word][1].lower()
parser = WiktionaryParser()
worddef = parser.fetch(word)
possible_defs = []
for entymologies in worddef:
for dd in entymologies["definitions"]:
part_of_speech = dd['partOfSpeech']
all_defs = re.sub(word+"\s*\u200e","",
dd['text']).strip().split("\n")
all_gdefs = [d for d in all_defs if re.match("^\(.*\)$",d) == None]
# Take the first definition that matches part of speech
if part_of_speech == pos_of_word:
return (dd['partOfSpeech'], all_gdefs)
return ("N/A","N/A")
get_definition_with_sentence("trifle", rap_sentence)
def get_definition_with_lyrics(word, lyrics):
rap_sentence = [line for line in lyrics.split("\n") if word in line][0]
return get_definition_with_sentence(word, rap_sentence)
"""
Explanation: Universal Part of Speech Tags
VERB - verbs (all tenses and modes)
NOUN - nouns (common and proper)
PRON - pronouns
ADJ - adjectives
ADV - adverbs
ADP - adpositions (prepositions and postpositions)
CONJ - conjunctions
DET - determiners
NUM - cardinal numbers
PRT - particles or other function words
X - other: foreign words, typos, abbreviations
End of explanation
"""
lyrics_split=["Twinkle, twinkle, little star",
"How I wonder what you are",
"Up above the world so high",
"Like a diamond in the sky"]
def get_rhymegroup(target_word, lyrics_split):
group_index_of_target = -1
for groupid, line in enumerate(lyrics_split):
if target_word in line:
group_index_of_target = groupid
if group_index_of_target > 0 and group_index_of_target < len(lyrics_split)-1:
return lyrics_split[group_index_of_target-1:group_index_of_target+2]
else:
return "N/A"
get_rhymegroup("high", lyrics_split)
"""
Explanation: Extract Surrounding Lines
End of explanation
"""
import pronouncing
pronouncing.rhymes("star")[0:20]
print(pronouncing.phones_for_word("high"))
print(pronouncing.phones_for_word("sky"))
print(pronouncing.phones_for_word("orange"))
print(pronouncing.phones_for_word("hinge"))
def rhymes_per_line(lyrics_split):
rhymes = []
for line in lyrics_split:
words = line.strip().split()
last_word = words[-1].strip('.,?!;:')
last_word_p = pronouncing.phones_for_word(last_word)
if len(last_word_p) > 0:
rhymes.append((pronouncing.rhyming_part(last_word_p[0]),line))
return rhymes
rhymes_per_line(lyrics_split)
"""
Explanation: Extract Rhyming Couplet
End of explanation
"""
lyrics_split = [line for line in df_data["lyrics"][23375].split("\n") if len(line) > 0]
print(lyrics_split[10:-30])
target_word = "trifle"
def get_rhymes(lyrics_split):
lines_with_rhyming_parts = list()
for line in lyrics_split:
words = line.split()
last_word = words[-1].strip('.,?!;:') # .strip() to remove any punctuation
last_word_p = pronouncing.phones_for_word(last_word)
if len(last_word_p) > 0:
if len(last_word_p) > 1:
last_word_p = [last_word_p[0],]
for phones in last_word_p:
rhyming_part = pronouncing.rhyming_part(phones)
line_with_part = [rhyming_part[:2], line]
#print(line_with_part)
lines_with_rhyming_parts.append(line_with_part)
else:
line_with_part = ["N/A", line]
lines_with_rhyming_parts.append(line_with_part)
return lines_with_rhyming_parts
rhyme_lines = get_rhymes(lyrics_split)
rhyme_lines[40:60]
import itertools
import operator
grouped_rhymes = []
for key,group in itertools.groupby(rhyme_lines, operator.itemgetter(0)):
merged_group = [g[1] for g in group]
grouped_rhymes.append(list(merged_group))
grouped_rhymes[15:35]
def rhymegroup(target_word, grouped_rhymes):
group_index_of_target = -1
for groupid, rhymes in enumerate(grouped_rhymes):
#print(groupid, rhymes)
for line in rhymes:
if target_word in line:
group_index_of_target = groupid
if group_index_of_target != -1:
return grouped_rhymes[group_index_of_target]
else:
return "N/A"
rhymegroup("trifle", grouped_rhymes)
def rhymegroup_from_word(word, lyrics):
lyrics_split = [line for line in lyrics.split("\n") if len(line) > 0]
grouped_rhymes = []
for key,group in itertools.groupby(rhyme_lines, operator.itemgetter(0)):
merged_group = [g[1] for g in group]
grouped_rhymes.append(list(merged_group))
return rhymegroup(word, grouped_rhymes)
"""
Explanation:
End of explanation
"""
# Get API key, https://www.tumblr.com/docs/en/api/v2 and do OATHv1
tumblr_client = ''
tumblr_secret = ''
access_key = ''
access_secret = ''
import pytumblr
user = pytumblr.TumblrRestClient(
tumblr_client, tumblr_secret,
access_key, access_secret)
def post_template(word, part_of_speech, worddef, lyrics, artist, song, youtube=None):
post = '''<p><a href="http://en.wiktionary.org/wiki/{}">{}</a> '''.format(word, word)
post += '''- {} -\xa0 {}</p>'''.format(part_of_speech, worddef)
post += "<p>"
for line in lyrics:
if word in line:
post += line.replace(word,"<b>"+word+"</b>")+"<br />"
else:
post += line+"<br />"
post += "</p>"
if youtube is not None:
post += '''<p>-{} on\xa0“'''.format(artist)
post += '''<a href="https://www.youtube.com/watch?v={}">{}</a>”</p>'''.format(youtube,
song)
else:
post += '''<p>-{} on\xa0“{}”</p>'''.format(artist,song)
return post
"""
Explanation: Posting on Tumblr
End of explanation
"""
print(rap_data.loc[23375])
word = "trifle"
artist = rap_data.loc[23375].artist
song = rap_data.loc[23375].song
part_of_speech, worddef = get_definition_with_lyrics(word,
rap_data.loc[23375].lyrics)
lyrics = rhymegroup_from_word(word, rap_data.loc[23375].lyrics)
youtube = youtube_search(artist + " " + song)[0]
slug = word+"-"+part_of_speech+"-"+artist
#print(word, part_of_speech, worddef[0], lyrics, artist, song, youtube)
post_body = post_template(word, part_of_speech, worddef[0], lyrics, artist, song, youtube)
user.create_text("rapwords",
format="html",
state="published",
slug=slug,
body=post_body,
tags=[part_of_speech],)
"""
Explanation: Puttin' it Together
End of explanation
"""
|
rfinn/LCS
|
notebooks/LCS-MS-Diagnostic-Plots.ipynb
|
gpl-3.0
|
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
"""
Explanation: Making some plots:
NUV-M24 vs R24/Rd
R24 vs 24um Sersic index
Main sequence plot on full LIR sample
But first, import some modules...
End of explanation
"""
%run ~/github/LCS/python/Python3/LCS_MS_rf_plots.py
g.plotNUV24_vs_sizeratio()
"""
Explanation: NUV-M24 vs R24/Rd
End of explanation
"""
%run ~/github/LCS/python/Python3/LCS_MS_rf_plots.py
g.plotn24_vs_R24()
"""
Explanation: Why is this important?
I think I wanted to confirm that galaxies with smaller R24/Rd were more obscured. This plot confirms that statement.
We are plotting all galaxies in the galfit sample.
are NUV and M24 correlated?
Sersic Index @24um vs R_{24}
End of explanation
"""
|
yashdeeph709/Algorithms
|
PythonBootCamp/Complete-Python-Bootcamp-master/Files.ipynb
|
apache-2.0
|
%%writefile test.txt
Hello, this is a quick test file
"""
Explanation: Files
Python uses file objects to interact with external files on your computer. These file objects can be any sort of file you have on your computer, whether it be an audio file, a text file, emails, Excel documents, etc. Note: You will probably need to install certain libraries or modules to interact with those various file types, but they are easily available. (We will cover downloading modules later on in the course).
Python has a built-in open function that allows us to open and play with basic file types. First we will need a file though. We're going to use some iPython magic to create a text file!
iPython Writing a File
End of explanation
"""
# Open the text.txt we made earlier
my_file = open('test.txt')
# We can now read the file
my_file.read()
# But what happens if we try to read it again?
my_file.read()
"""
Explanation: Python Opening a file
We can open a file with the open() function. The open function also takes in arguments (also called parameters). Lets see how this is used:
End of explanation
"""
# Seek to the start of file (index 0)
my_file.seek(0)
# Now read again
my_file.read()
"""
Explanation: This happens because you can imagine the reading "cursor" is at the end of the file after having read it. So there is nothing left to read. We can reset the "cursor" like this:
End of explanation
"""
# Readlines returns a list of the lines in the file.
my_file.readlines()
"""
Explanation: In order to not have to reset every time, we can also use the readlines method. Use caution with large files, since everything will be held in memory. We will learn how to iterate over large files later in the course.
End of explanation
"""
# Add a second argument to the function, 'w' which stands for write
my_file = open('test.txt','w+')
# Write to the file
my_file.write('This is a new line')
# Read the file
my_file.read()
"""
Explanation: Writing to a File
By default, using the open() function will only allow us to read the file, we need to pass the argument 'w' to write over the file. For example:
End of explanation
"""
%%writefile test.txt
First Line
Second Line
"""
Explanation: Iterating through a File
Lets get a quick preview of a for loop by iterating over a text file. First let's make a new text file with some iPython Magic:
End of explanation
"""
for line in open('test.txt'):
print line
"""
Explanation: Now we can use a little bit of flow to tell the program to for through every line of the file and do something:
End of explanation
"""
# Pertaining to the first point above
for asdf in open('test.txt'):
print asdf
"""
Explanation: Don't worry about fully understanding this yet, for loops are coming up soon. But we'll break down what we did above. We said that for every line in this text file, go ahead and print that line. Its important to note a few things here:
1.) We could have called the 'line' object anything (see example below).
2.) By not calling .read() on the file, the whole text file was not stored in memory.
3.) Notice the indent on the second line for print. This whitespace is required in Python.
We'll learn a lot more about this later, but up next: Sets and Booleans!
End of explanation
"""
|
cfcdavidchan/Deep-Learning-Foundation-Nanodegree
|
intro-to-tensorflow/intro_to_tensorflow.ipynb
|
mit
|
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in different fonts.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
scaling = []
a = 0.1
b = 0.9
Xmin = 0
Xmax = 255
Xrange = Xmax - Xmin
return a + (((image_data - Xmin)*(b-a))/Xrange)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/Mean_Variance_Image.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize_grayscale() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
biases = tf.Variable(tf.zeros(labels_count))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: Problem 2
Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.
<img src="image/network_diagram.png" style="height: 40%;width: 40%; position: relative; right: 10%">
For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network.
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 100
learning_rate = 0.01
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/Learn_Rate_Tune_Image.png" style="height: 70%;width: 70%">
Problem 3
Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Learning Rate:
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
Configuration 2
* Epochs:
* 1
* 2
* 3
* 4
* 5
* Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Test
You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
eatingcrispr/VirtualEating
|
archive/Simulating and generating 3MB Xenopus library/VirtualEating_AsInDevCell.ipynb
|
apache-2.0
|
import Bio
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
from Bio.Blast import NCBIXML
from Bio import Restriction
from Bio.Restriction import *
from Bio.Alphabet.IUPAC import IUPACAmbiguousDNA
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import cPickle as pickle
import subprocess
import matplotlib
from eating import *
import multiprocessing as mp
from operator import itemgetter, attrgetter, methodcaller
import numpy
%pylab inline
"""
Explanation: VirtualEATING
Andrew Lane, University of California, Berkeley
Overview
CRISPR-EATING is a molecular biology protocol to generate libraries of CRISPR guide RNAs. The use of this this approach to generate a library suitable for chromosomal locus imaging requires ways to avoid regions that will be processed into non-specific guides, which (in part) is what these scripts are designed to achieve.
These scripts contain a set of functions that are assembled into a workflow to:
- Predict the sgRNA spacers produced when a particualr substrate DNA is subjected to the EATING protocol described in Lane et al., Dev. Cell (2015).
- Score those peredicted guides for specificity within a genome from which a BLAST database and an implementation of the CRISPR guide scoring algorithm described in Hsu et al (2013).
- Using the score information, pick out sub-regions within the substrate DNA that will produce clusters of high-specificity guides and design PCR primers to amplify only those regions.
Following the generation of suitable PCR primers from this tool, the "wet" portion of the protocol is as follows:
1. The output PCR primers (144 pairs in 144 separate reactions in the case of the labled 3MB region) are used to amplify from the substrate DNA.
2. The resulting products are pooled and subjected to the EATING molecular biology protocol.
3. When complexed to dCas9-mNeonGreen (or other fluorescent protein), the resulting library can be used to image your desired locus.
Prerequisites
Some experience with Python and the very basics of Biopython and BLAST
A Python installation with biopython, pickle
A BLAST database generated from the genome against which you would like to score your guides and a working BLAST installation. To generate the BLAST database, use a FASTA file containing your genome of interest. For example, LAEVIS_7.1.repeatmasked.fa. Use the following syntax to generate the BLAST DB. (The -parse_seqids flag is critical; the guide scoring algorithm expects a database generated using this flag).
makeblastdb -in LAEVIS_7.1.repeatmasked.fa -dbtype nucl -parse_seqids -out xl71 -title ‘xl71’
This was tested using makeblastdb version 2.2.29+. Perform a test BLAST query on your database to check that your installation can find it.
The original FASTA file used to make the BLAST database must also be available; this is necessary so that it can be determined whether a guide BLAST database hit is adjacent to a PAM and therefore relevant for score determination. The entire genome is loaded entirely into memory in the current implementation and thus you need a computer with enough RAM (8-16GB) for this. (Future updates may remove this requirement)
References:
Hsu PD, Scott DA, Weinstein JA, Ran FA, Konermann S, Agarwala V, et al. DNA targeting specificity of RNA-guided Cas9 nucleases. Nat Biotechnol. Nature Publishing Group; 2013;31: 827–832. doi:10.1038/nbt.2647
Using this notebook and adapting it for a particular purpose
The basic EATING-related logic is in the eating module (eating.py). This module contains functions (prefixed with "al_") to predict the guides that will be generated from an input DNA sequence and score the guides.
Import modules
End of explanation
"""
fasta_file = SeqIO.parse("../../Genomic Data/LAEVIS_7.1.repeatMasked.fa", "fasta")
"""
Explanation: Set up input files
1. The FASTA file to be scored
End of explanation
"""
handle = open("../../Genomic Data/LAEVIS_7.1.repeatMasked.fa", 'rb')
xl71genome = SeqIO.parse(handle, "fasta", alphabet=IUPACAmbiguousDNA())
xl71genomedict = {}
for item in xl71genome:
xl71genomedict[item.id] = item
len(xl71genomedict)
"""
Explanation: 2. The genome against which generated guides are scored against
See Prerequisites above for explanation.
End of explanation
"""
longest = 0
for item in fasta_file:
if len(item) > longest:
longest = len(item)
longscaffold = [item]
print(longscaffold[0].name + " is the longest scaffold at " "{:,}".format(len(longscaffold[0])) + " bp in length.")
"""
Explanation: Begin custom processing
The FASTA file we've loaded (fasta_file) contains the entire X. laevis genome. The X. laevis genome hasn't yet been definitively assembled into physical chromosomes - instead, it's a large number of contigs or "scaffolds". For the purposes of making a library that labels a single region, we want to work with a big piece that we know is contiguous. So, we find the longest "scaffold".
End of explanation
"""
cutslist = al_digesttarget(longscaffold)
"""
Explanation: Next, we want to digest this scaffold into guides. This uses the al_diggesttarget function contained in eating.py to produce a generator of scores.
End of explanation
"""
[item for item in al_digesttarget([longscaffold[0][0:1500]])]
"""
Explanation: In this version of the script, the output from al_digesttarget isn't especially readable, but for reference:
Each item is a SeqRecord (see BioPython docs)
The Sequence is the guide 20mer, written from 5' to 3'
The ID is the cut-fragment of DNA of that an enzyme produces, counting from the left (i.e. the most 5' guide has an id of 1) and the strand that the guide is found on (F or R, where F is forward with respect to the input DNA), starting with all the HpaII cuts, then all the BfaI cuts, then all the ScrFI cuts. Note that the script predicts the results when each digestion is done in a separate tube, rather than when all enzymes are used as a mixture (which would kill some guides where cut sites of two different enzymes are <20 bp apart).
The name is the sequence position of the left edge of the guide along the input DNA. For forward-direction guides, this is position of the 5' end of the guide. For reverse, it's position of the 3' end of the guide.
The description is the enzyme that generates the guide's cut site.
In this example (the most 5' 1500 bp of the chosen Scaffold), HpaII does not cut. Note that enzyme recognition sites are palindromic and thus recognizes a palindromic sequence containing a PAM on both strands. This results in a guide being generated on both sides of the cut site.
End of explanation
"""
def multiscore_pool(x):
score = al_scoreguide(x, "xl71", xl71genomedict)
return (score[0], score[1])
http://sebastianraschka.com/Articles/2014_multiprocessing_intro.html#An-introduction-to-parallel-programming-using-Python's-multiprocessing-module
pool = mp.Pool(processes=2)
results = [pool.apply(multiscore_pool, args=(x,)) for x in cutslist]
pickle.dump(results, open( "finalpicklescores.pkl", "wb" ))
pool.close()
"""
Explanation: Next, we'd like to take the 20mers extracted and score them against the entire Xenopus laevis genome. These lines score each guide variable region for specificity using the xl71 BLAST database and the xl71genomedict dict.
End of explanation
"""
results[0:20]
"""
Explanation: The format of the resulting data is (score, guide).
End of explanation
"""
import copy
a = []
for (score, details) in results:
a.append(int(details.name)) # The guide's name attribute contains its position in bp
resultssorted = zip(results, a)
resultssorted = sorted(resultssorted, key=itemgetter(1), reverse=False)
resultssorted = [item for item, null in resultssorted]
resultssorted[:5]
resultssorted[-5:]
"""
Explanation: The scores in this object are an ordered list, with all HpaII scores first, then all BfaI scores and finally all ScrFI scores. We are interested in the distribution of scores along the DNA fragment, irrespective of the enzyme used to generate them. Thus, we want to rearrange the list with all scores from 5' to 3'.
End of explanation
"""
scores = [score for score, details in resultssorted]
def plot_score_histogram(scores):
'''
Input is a list of scores only (as ints)
'''
path = '/Library/Fonts/Microsoft/Arial.ttf'
prop = matplotlib.font_manager.FontProperties(fname=path)
matplotlib.rcParams['font.family'] = prop.get_name()
bins = range(0,106,5)
figure()
hist(scores, bins, color="gray")
tick_params(axis=u'both', labelsize=18)
#savefig('Scaffold score distribution.pdf', format="pdf")
plot_score_histogram(scores)
"""
Explanation: Let's extract the scores and plot their distribution on a histogram.
End of explanation
"""
def find_clusters_by_cutoff(resultssorted, x):
starts=[]
ends=[]
previtemgood = 0
for index, (score, details) in enumerate(resultssorted):
if score >= x and previtemgood ==0 and len(details) >= 20: #this avoids guides that are shorter than 20 bp (from where an enzyme cuts twice in close proximity)
starts.append((index, score, int(details.name)))
previtemgood = 1
elif score >= x and previtemgood == 1 and len(details) >=20:
None
elif previtemgood == 1:
previtemgood =0
ends.append((index-1, resultssorted[index-1][0], int(resultssorted[index-1][1].name)))
run_positions = zip(starts, ends)
goodruns_length = sorted([end - start for (start, i, j), (end,l,m) in run_positions], reverse=True)
return (goodruns_length, run_positions)
threshold = range(0, 105, 5)
probeyield = []
for item in threshold:
probeyield.append((item, sum(find_clusters_by_cutoff(resultssorted, item)[0][0:143])))
print(probeyield)
%pylab inline
figure()
plot([b for b, c in probeyield], [c for b, c in probeyield], "o")
"""
Explanation: So, there are ~5000 guides that are quite non-specific (score <= 4) and >14,000 guides that have a score of 100 and a further 4000 that score between 95 and 99.
Finding clusters of high-scoring guides
To make a library of useful guides, we'd like to PCR through continuous clusters of the highest-scoring ones. Our oligonucleotide vendor (IDT) has a minimum order of 288 oligos (3x 96-well plates) on a small and relatively inexpensive scale (5 pmol). To work within this limitation, we'd like to pick out 144 possible regions to PCR-amplify.
If we are only willing to accept guides with a score of 100, we'd predict that our 144 PCR products will be short (there are probably few long spans of perfect-scoring guides). However, if we relax our requirement to >=99, we may get longer PCR products and thus more guides in our library. How does this scale at different cutoffs/thresholds?
End of explanation
"""
threshold = 95
runs = find_clusters_by_cutoff(resultssorted, threshold)[1]
#(countofguides, (startguidenumberfrom5', startscore, startpositionbp), (endguidenumberfrom5', endscore, endpositionbp))
goodruns = sorted([((i_end - i), (i, s, pos), (i_end, s_end, pos_end)) for (i, s, pos), (i_end, s_end, pos_end) in runs], reverse=True)
"""
Explanation: Our "yield" of guides descends steadily from a cutoff of >=5 to a cutoff of >=95, then drops from 2894 guides produced at a cutoff of 95 to 1719 at 100. So, a cutoff of >=95 balances specificity and yield.
End of explanation
"""
probeyield = []
x = 95
fraction = 7.0
overlap = 2.0
region_to_extract = len(resultssorted)/fraction
for i in [float(item)/overlap for item in range(int(overlap*fraction+2.0))]:
goodruns = find_clusters_by_cutoff(resultssorted[int(region_to_extract*i):int(region_to_extract*(i+1))], x)[0]
probeyield.append((i, int(region_to_extract*i), sum(goodruns[0:143])))
if sum(goodruns[0:143]) == 0:
break
probeyield
"""
Explanation: We next asked what happens if we concentrate the guides into a smaller region. To test this, we cut the input DNA into sections of 1/7 the ~21MB starting length and asked how many guides would be obtained if 144 PCR products were designed within each of those subregions.
End of explanation
"""
#Modify resultssorted to only include the 3.4MB region used. (18121076 to (21505465+786) = 21506251)
resultssorted = [item for item in resultssorted if int(item[1].name) >= 18121076 and int(item[1].name) <= 21506251]
scores = [score for score, details in resultssorted]
"""
Explanation: The final 1/7 of the scaffold has the densest guide yield.
End of explanation
"""
# Set up the input for primer3:
# Sequence available to PCR:
guide_count = []
amps_in_3MB = []
for index, item in enumerate(goodruns[0:400]):
left_outside = item[0][0].id[-1]
left_inside = item[2][0][1].id[-1]
if left_outside == "F" and left_inside == "R":
permissible_start = int(item[0][0].name) + 10
required_start_absolute = int(item[2][0][1].name) +14
elif left_outside == "R" and left_inside == "R":
permissible_start = int(item[0][0].name) + 1
required_start_absolute = int(item[2][0][1].name) +14
elif left_outside == "R" and left_inside == "F":
permissible_start = int(item[0][0].name) + 1
required_start_absolute = int(item[2][0][1].name) +18
elif left_outside == "F" and left_inside == "F":
permissible_start = int(item[0][0].name) + 10
required_start_absolute = int(item[2][0][1].name) +18
else:
print("error on left")
right_inside = item[2][-1][1].id[-1]
right_outside = item[0][1].id[-1]
if right_outside == "F" and right_inside == "R":
permissible_end = int(item[0][1].name) + 19
required_end_absolute = int(item[2][-1][1].name) + 2
elif right_outside == "R" and right_inside == "F":
permissible_end = int(item[0][1].name) + 10
required_end_absolute = int(item[2][-1][1].name) + 8
elif right_outside == "R" and right_inside == "R":
permissible_end = int(item[0][1].name) + 10
required_end_absolute = int(item[2][-1][1].name) + 2
elif right_outside == "F" and right_inside == "F":
permissible_end = int(item[0][1].name) + 19
required_end_absolute = int(item[2][-1][1].name) + 8
else:
print("error on right")
amp = longscaffold[0][permissible_start:permissible_end]
# Bounds that need to be included in PCR product :
required_start_relative = required_start_absolute-permissible_start
required_end_relative = required_end_absolute - permissible_start
amp.dbxrefs=((required_start_relative, required_end_relative))
# Set up some other stuff:
amp.name =str(item[0][0].name)
amp.id =str(item[0][0].name)
amp.description=str(item[1])
amp.seq.alphabet = IUPACAmbiguousDNA()
if "NNNNN" in amp.seq: # Exclude if it has runs of Ns
None
#print amp.name + " contains ns " + str(item[1])
else:
amps_in_3MB.append(amp)
guide_count.append(item[1])
amps_in_3MB_gen = (i for i in amps_in_3MB)
print sum(guide_count[0:144])
with open("primerlist.txt", "w") as primerlist:
primerlist.write("Sequence_id\tforward_seq\tforward_start\tforward_length\tforward_tm\tforward_gc\treverse_seq\treverse_start\treverse_length\treverse_tm\treverse_gc\tinput_seq_length\tPCR_product_length\tGuides_Contained\n")
primerlist.close()
for item in amps_in_3MB:
current_amp = item
primerdict = al_primersearch(current_amp)
al_collect_good_primers(item, primerdict)
"""
Explanation: Designing primers
We want to make sure we design primers to make PCR products spanning as many guides as possible. The challenge is making sure we prime in a way that covers all of the high-specificity guides and none of the adjacent low specificity guides. If a "good" and a "bad" guide are very close together, the constraints on where primers can be designed against are tight.
End of explanation
"""
|
Jackie789/JupyterNotebooks
|
3.KNN_Classifiers.ipynb
|
gpl-3.0
|
music = pd.DataFrame()
# Some data to play with.
music['duration'] = [184, 134, 243, 186, 122, 197, 294, 382, 102, 264,
205, 110, 307, 110, 397, 153, 190, 192, 210, 403,
164, 198, 204, 253, 234, 190, 182, 401, 376, 102]
music['loudness'] = [18, 34, 43, 36, 22, 9, 29, 22, 10, 24,
20, 10, 17, 51, 7, 13, 19, 12, 21, 22,
16, 18, 4, 23, 34, 19, 14, 11, 37, 42]
# We know whether the songs in our training data are jazz or not.
music['jazz'] = [ 1, 0, 0, 0, 1, 1, 0, 1, 1, 0,
0, 1, 1, 0, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0]
# Look at our data.
plt.scatter(
music[music['jazz'] == 1].duration,
music[music['jazz'] == 1].loudness,
color='red'
)
plt.scatter(
music[music['jazz'] == 0].duration,
music[music['jazz'] == 0].loudness,
color='blue'
)
plt.legend(['Jazz', 'Rock'])
plt.title('Jazz and Rock Characteristics')
plt.xlabel('Duration')
plt.ylabel('Loudness')
plt.show()
"""
Explanation: K Nearest Neighbors Classifiers
So far we've covered learning via probability (naive Bayes) and learning via errors (regression). Here we'll cover learning via similarity. This means we look for the datapoints that are most similar to the observation we are trying to predict.
Let's start by the simplest example: Nearest Neighbor.
Nearest Neighbor
Let's use this example: classifying a song as either "rock" or "jazz". For this data we have measures of duration in seconds and loudness in loudness units (we're not going to be using decibels since that isn't a linear measure, which would create some problems we'll get into later).
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
neighbors = KNeighborsClassifier(n_neighbors=1)
X = music[['loudness', 'duration']]
Y = music.jazz
neighbors.fit(X,Y)
## Predict for a song with 24 loudness that's 190 seconds long.
neighbors.predict([[24, 190]])
"""
Explanation: The simplest form of a similarity model is the Nearest Neighbor model. This works quite simply: when trying to predict an observation, we find the closest (or nearest) known observation in our training data and use that value to make our prediction. Here we'll use the model as a classifier, the outcome of interest will be a category.
To find which observation is "nearest" we need some kind of way to measure distance. Typically we use Euclidean distance, the standard distance measure that you're familiar with from geometry. With one observation in n-dimensions $(x_1, x_2, ...,x_n)$ and the other $(w_1, w_2,...,w_n)$:
$$ \sqrt{(x_1-w_1)^2 + (x_2-w_2)^2+...+(x_n-w_n)^2} $$
You might recognize this formula, (taking distances, squaring them, adding the squares together, and taking the root) as a generalization of the Pythagorean theorem into n-dimensions. You can technically define any distance measure you want, and there are times where this customization may be valuable. As a general standard, however, we'll use Euclidean distance.
Now that we have a distance measure from each point in our training data to the point we're trying to predict the model can find the datapoint with the smallest distance and then apply that category to our prediction.
Let's try running this model, using the SKLearn package.
End of explanation
"""
neighbors = KNeighborsClassifier(n_neighbors=5)
X = music[['loudness', 'duration']]
Y = music.jazz
neighbors.fit(X,Y)
## Predict for a 24 loudness, 190 seconds long song.
print(neighbors.predict([[24, 190]]))
print(neighbors.predict_proba([[24, 190]]))
"""
Explanation: It's as simple as that. Looks like our model is predicting that 24 loudness, 190 second long song is not jazz. All it takes to train the model is a dataframe of independent variables and a dataframe of dependent outcomes.
You'll note that for this example, we used the KNeighborsClassifier method from SKLearn. This is because Nearest Neighbor is a simplification of K-Nearest Neighbors. The jump, however, isn't that far.
K-Nearest Neighbors
K-Nearest Neighbors (or "KNN") is the logical extension of Nearest Neighbor. Instead of looking at just the single nearest datapoint to predict an outcome, we look at several of the nearest neighbors, with $k$ representing the number of neighbors we choose to look at. Each of the $k$ neighbors gets to vote on what the predicted outcome should be.
This does a couple of valuable things. Firstly, it smooths out the predictions. If only one neighbor gets to influence the outcome, the model explicitly overfits to the training data. Any single outlier can create pockets of one category prediction surrounded by a sea of the other category.
This also means instead of just predicting classes, we get implicit probabilities. If each of the $k$ neighbors gets a vote on the outcome, then the probability of the test example being from any given class $i$ is:
$$ \frac{votes_i}{k} $$
And this applies for all classes present in the training set. Our example only has two classes, but this model can accommodate as many classes as the data set necessitates. To come up with a classifier prediction it simply takes the class for which that fraction is maximized.
Let's expand our initial nearest neighbors model from above to a KNN with a $k$ of 5.
End of explanation
"""
# Our data. Converting from data frames to arrays for the mesh.
X = np.array(X)
Y = np.array(Y)
# Mesh size.
h = 4.0
# Plot the decision boundary. We asign a color to each point in the mesh.
x_min = X[:, 0].min() - .5
x_max = X[:, 0].max() + .5
y_min = X[:, 1].min() - .5
y_max = X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h)
)
Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot.
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(6, 4))
plt.set_cmap(plt.cm.Paired)
plt.pcolormesh(xx, yy, Z)
# Add the training points to the plot.
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.xlabel('Loudness')
plt.ylabel('Duration')
plt.title('Mesh visualization')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.show()
"""
Explanation: Now our test prediction has changed. In using the five nearest neighbors it appears that there were two votes for rock and three for jazz, so it was classified as a jazz song. This is different than our simpler Nearest Neighbors model. While the closest observation was in fact rock, there are more jazz songs in the nearest $k$ neighbors than rock.
We can visualize our decision bounds with something called a mesh. This allows us to generate a prediction over the whole space. Read the code below and make sure you can pull out what the individual lines do, consulting the documentation for unfamiliar methods if necessary.
End of explanation
"""
# Play with different mesh sizes here.
# Our data. Converting from data frames to arrays for the mesh.
X = np.array(X)
Y = np.array(Y)
# Mesh size.
h = 0.5
# Plot the decision boundary. We asign a color to each point in the mesh.
x_min = X[:, 0].min() - .5
x_max = X[:, 0].max() + .5
y_min = X[:, 1].min() - .5
y_max = X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h)
)
Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot.
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(6, 4))
plt.set_cmap(plt.cm.Paired)
plt.pcolormesh(xx, yy, Z)
# Add the training points to the plot.
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.xlabel('Loudness')
plt.ylabel('Duration')
plt.title('Mesh visualization')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.show()
"""
Explanation: Looking at the visualization above, any new point that fell within a blue area would be predicted to be jazz, and any point that fell within a brown area would be predicted to be rock.
The boundaries above are strangly jagged here, and well get into that in more detail in the next lesson.
Also note that the visualization isn't completely continuous. There are an infinite number of points in this space, and we can't calculate the value for each one. That's where the mesh comes in. We set our mesh size (h = 4.0) to 4.0 above, which means we calculate the value for each point in a grid where the points are spaced 4.0 away from each other.
You can make the mesh size smaller to get a more continuous visualization, but at the cost of a more computationally demanding calculation. In the cell below, recreate the plot above with a mesh size of 10.0. Then reduce the mesh size until you get a plot that looks good but still renders in a reasonable amount of time. When do you get a visualization that looks acceptably continuous? When do you start to get a noticible delay?
End of explanation
"""
from heapq import nsmallest
#first, find the nearest neighbors
def nearest_neighbors (k, currentPoint):
predictionSet = list()
# identfiy the k nearest neighbor
distances = list()
for x in X:
distance = np.sqrt((x[0]-currentPoint[0])**2 + (x[1]-currentPoint[1])**2)
distances.append(distance)
# Choose the k smallest distances
kneighbor_distances = nsmallest(k, distances)
for i in range(k):
this_neighbor = distances.index(kneighbor_distances[i])
predictionSet.append(Y[this_neighbor])
# identify the ratios of target within k neighbords
predictionProb = sum(predictionSet)/len(predictionSet)
# identfiy highest probabilty prediction
if predictionProb >= 0.50:
return 1
elif predictionProb < 0.50:
return 0
#Run the Code! Try your own parameters
nearest_neighbors(7, [30,90])
"""
Explanation: Now you've built a KNN model!
Challenge: Implement the Nearest Neighbor algorithm
The Nearest Neighbor algorithm is extremely simple. So simple, in fact, that you should be able to build it yourself from scratch using the Python you already know. Code a Nearest Neighbors algorithm that works for two dimensional data. You can use either arrays or dataframes to do this. Test it against the SKLearn package on the music dataset from above to ensure that it's correct. The goal here is to confirm your understanding of the model and continue to practice your Python skills. We're just expecting a brute force method here. After doing this, look up "ball tree" methods to see a more performant algorithm design.
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
dev/_downloads/64e3b6395952064c08d4ff33d6236ff3/evoked_whitening.ipynb
|
bsd-3-clause
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import mne
from mne import io
from mne.datasets import sample
from mne.cov import compute_covariance
print(__doc__)
"""
Explanation: Whitening evoked data with a noise covariance
Evoked data are loaded and then whitened using a given noise covariance
matrix. It's an excellent quality check to see if baseline signals match
the assumption of Gaussian white noise during the baseline period.
Covariance estimation and diagnostic plots are based on
:footcite:EngemannGramfort2015.
References
.. footbibliography::
End of explanation
"""
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 40, fir_design='firwin')
raw.info['bads'] += ['MEG 2443'] # bads + 1 more
events = mne.read_events(event_fname)
# let's look at rare events, button presses
event_id, tmin, tmax = 2, -0.2, 0.5
reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),
baseline=None, reject=reject, preload=True)
# Uncomment next line to use fewer samples and study regularization effects
# epochs = epochs[:20] # For your data, use as many samples as you can!
"""
Explanation: Set parameters
End of explanation
"""
method_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))
noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
return_estimators=True, n_jobs=None,
projs=None, rank=None,
method_params=method_params, verbose=True)
# With "return_estimator=True" all estimated covariances sorted
# by log-likelihood are returned.
print('Covariance estimates sorted from best to worst')
for c in noise_covs:
print("%s : %s" % (c['method'], c['loglik']))
"""
Explanation: Compute covariance using automated regularization
End of explanation
"""
evoked = epochs.average()
evoked.plot(time_unit='s') # plot evoked response
"""
Explanation: Show the evoked data:
End of explanation
"""
evoked.plot_white(noise_covs, time_unit='s')
"""
Explanation: We can then show whitening for our various noise covariance estimates.
Here we should look to see if baseline signals match the
assumption of Gaussian white noise. we expect values centered at
0 within 2 standard deviations for 95% of the time points.
For the Global field power we expect a value of 1.
End of explanation
"""
|
tzoiker/gensim
|
docs/notebooks/doc2vec-lee.ipynb
|
lgpl-2.1
|
import gensim
import os
import collections
import random
"""
Explanation: Doc2Vec Tutorial on the Lee Dataset
End of explanation
"""
# Set file names for train and test data
test_data_dir = '{}'.format(os.sep).join([gensim.__path__[0], 'test', 'test_data'])
lee_train_file = test_data_dir + os.sep + 'lee_background.cor'
lee_test_file = test_data_dir + os.sep + 'lee.cor'
"""
Explanation: What is it?
Doc2Vec is an NLP tool for representing documents as a vector and is a generalizing of the Word2Vec method. This tutorial will serve as an introduction to Doc2Vec and present ways to train and assess a Doc2Vec model.
Resources
Word2Vec Paper
Doc2Vec Paper
Dr. Michael D. Lee's Website
Lee Corpus
IMDB Doc2Vec Tutorial
Getting Started
To get going, we'll need to have a set of documents to train our doc2vec model. In theory, a document could be anything from a short 140 character tweet, a single paragraph (i.e., journal article abstract), a news article, or a book. In NLP parlance a collection or set of documents is often referred to as a <b>corpus</b>.
For this tutorial, we'll be training our model using the Lee Background Corpus included in gensim. This corpus contains 314 documents selected from the Australian Broadcasting
Corporation’s news mail service, which provides text e-mails of headline stories and covers a number of broad topics.
And we'll test our model by eye using the much shorter Lee Corpus which contains 50 documents.
End of explanation
"""
def read_corpus(fname, tokens_only=False):
with open(fname, encoding="iso-8859-1") as f:
for i, line in enumerate(f):
if tokens_only:
yield gensim.utils.simple_preprocess(line)
else:
# For training data, add tags
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i])
train_corpus = list(read_corpus(lee_train_file))
test_corpus = list(read_corpus(lee_test_file, tokens_only=True))
"""
Explanation: Define a Function to Read and Preprocess Text
Below, we define a function to open the train/test file (with latin encoding), read the file line-by-line, pre-process each line using a simple gensim pre-processing tool (i.e., tokenize text into individual words, remove punctuation, set to lowercase, etc), and return a list of words. Note that, for a given file (aka corpus), each continuous line constitutes a single document and the length of each line (i.e., document) can vary. Also, to train the model, we'll need to associate a tag/number with each document of the training corpus. In our case, the tag is simply the zero-based line number.
End of explanation
"""
train_corpus[:2]
"""
Explanation: Let's take a look at the training corpus
End of explanation
"""
print(test_corpus[:2])
"""
Explanation: And the testing corpus looks like this:
End of explanation
"""
model = gensim.models.doc2vec.Doc2Vec(size=50, min_count=2, iter=10)
"""
Explanation: Notice that the testing corpus is just a list of lists and does not contain any tags.
Training the Model
Instantiate a Doc2Vec Object
Now, we'll instantiate a Doc2Vec model with a vector size with 50 words and iterating over the training corpus 10 times. We set the minimum word count to 2 in order to give higher frequency words more weighting. Model accuracy can be improved by increasing the number of iterations but this generally increases the training time.
End of explanation
"""
model.build_vocab(train_corpus)
"""
Explanation: Build a Vocabulary
End of explanation
"""
%time model.train(train_corpus)
"""
Explanation: Essentially, the vocabulary is a dictionary (accessible via model.vocab) of all of the unique words extracted from the training corpus along with the count (e.g., model.vocab['penalty'].count for counts for the word penalty).
Time to Train
If the BLAS library is being used, this should take no more than 2 seconds.
If the BLAS library is not being used, this should take no more than 2 minutes, so use BLAS if you value your time.
End of explanation
"""
model.infer_vector(['only', 'you', 'can', 'prevent', 'forrest', 'fires'])
"""
Explanation: Inferring a Vector
One important thing to note is that you can now infer a vector for any piece of text without having to re-train the model by passing a list of words to the model.infer_vector function. This vector can then be compared with other vectors via cosine similarity.
End of explanation
"""
ranks = []
second_ranks = []
for doc_id in range(len(train_corpus)):
inferred_vector = model.infer_vector(train_corpus[doc_id].words)
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [docid for docid, sim in sims].index(doc_id)
ranks.append(rank)
second_ranks.append(sims[1])
"""
Explanation: Assessing Model
To assess our new model, we'll first infer new vectors for each document of the training corpus, compare the inferred vectors with the training corpus, and then returning the rank of the document based on self-similarity. Basically, we're pretending as if the training corpus is some new unseen data and then seeing how they compare with the trained model. The expectation is that we've likely overfit our model (i.e., all of the ranks will be less than 2) and so we should be able to find similar documents very easily. Additionally, we'll keep track of the second ranks for a comparison of less similar documents.
End of explanation
"""
collections.Counter(ranks) #96% accuracy
"""
Explanation: Let's count how each document ranks with respect to the training corpus
End of explanation
"""
print('Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
"""
Explanation: Basically, greater than 95% of the inferred documents are found to be most similar to itself and about 5% of the time it is mistakenly most similar to another document. This is great and not entirely surprising. We can take a look at an example:
End of explanation
"""
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(train_corpus))
# Compare and print the most/median/least similar documents from the train corpus
print('Train Document ({}): «{}»\n'.format(doc_id, ' '.join(train_corpus[doc_id].words)))
sim_id = second_ranks[doc_id]
print('Similar Document {}: «{}»\n'.format(sim_id, ' '.join(train_corpus[sim_id[0]].words)))
"""
Explanation: Notice above that the most similar document is has a similarity score of ~80% (or higher). However, the similarity score for the second ranked documents should be significantly lower (assuming the documents are in fact different) and the reasoning becomes obvious when we examine the text itself
End of explanation
"""
# Pick a random document from the test corpus and infer a vector from the model
doc_id = random.randint(0, len(test_corpus))
inferred_vector = model.infer_vector(test_corpus[doc_id])
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
# Compare and print the most/median/least similar documents from the train corpus
print('Test Document ({}): «{}»\n'.format(doc_id, ' '.join(test_corpus[doc_id])))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(train_corpus[sims[index][0]].words)))
"""
Explanation: Testing the Model
Using the same approach above, we'll infer the vector for a randomly chosen test document, and compare the document to our model by eye.
End of explanation
"""
|
fonnesbeck/scientific-python-workshop
|
notebooks/Plotting and Visualization.ipynb
|
cc0-1.0
|
import numpy as np
import pandas as pd
import matplotlib as mpl # used sparingly
import matplotlib.pyplot as plt
pd.set_option("notebook_repr_html", False)
pd.set_option("max_rows", 10)
"""
Explanation: Plotting and Visualization
End of explanation
"""
%matplotlib inline
"""
Explanation: Landscape of Plotting Libraries
matplotlib
pandas
seaborn
mpld3
"Bringing matplotlib to the browser"
d3py
"a plotting library for python based on d3."
mayavi
"seeks to provide easy and interactive visualization of 3D data."
ggplot
"Yes, it's another port of ggplot2."
bokeh
"Bokeh is a Python interactive visualization library that targets modern web browsers for presentation."
mpl_toolkits
basemap
mplot3d
Matplotlib Orientation
Introduction
Matplotlib is the de facto standard for plotting in Python
Understanding matplotlib is key to unlocking its power
Online Documentation
Project web site is here
Examples online and also in the source code
API documentation is here
The FAQ may answer some of your questions
Get to know the gallery
Getting Help
First stop should be the gallery
Next stop could be stackoverflow
There's also a mailing list
This notebook draws heavily from the following sources [E.g., 1, 2, 3]
Notebook specifics
End of explanation
"""
from matplotlib import matplotlib_fname
matplotlib_fname()
"""
Explanation: Backends
Potential uses of matplotlib
interactively from python shell/IPython
Embed in a GUI
Generate postscript images in batch scripts
In a web application to serve graphs
Each of these use cases is enabled by using a backend
Two types
User interface / Interactive (for use in pygtk, wxpython, tkinter, qt4, or macosx)
Hard copy / Non-interactive (PNG, SVG, PDF, PS)
Set your backend in your matplotlibrc
Or with the use function (before importing pyplot
python
from matplotlib import use
use('PS') # postscript
Configuration
See Customizing Matplotlib for more information
You can edit your matplotlibrc to change the matplotlib defaults
End of explanation
"""
from matplotlib import rcParams
rcParams.keys()
rcParams['font.family']
rcParams['font.family'] = 'monospace'
rcParams['font.family']
rcParams['font.family'] = 'sans-serif'
"""
Explanation: This has a popular one
Take a look at the Seaborn project. More on that later.
You can also change them dynamically using the global rcParams object
End of explanation
"""
from matplotlib import rc_context
with rc_context({'font.family': 'monospace'}):
print(rcParams['font.family'])
print(rcParams['font.family'])
"""
Explanation: You can also use the rc_context context manager
End of explanation
"""
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4])
plt.title("Title")
plt.xlabel("X")
"""
Explanation: Interactive Plotting with PyPlot
Interative backends allow plotting to the screen
Interactive mode plots to the screen without calls to show
Interactive mode does not require using pyplot
Doing the following at the interpreter will show a plot
python
import matplotlib.pyplot as plt
plt.ion()
plt.plot([1, 2, 3, 4, 5])
plt.title("Title")
At IPython interpreter enable interactive with (or set it in matplotlibrc
python
import matplotlib.pyplot as plt
plt.ion()
or with
from matplotlib import interactive
interactive()
End of explanation
"""
fig, ax = plt.subplots()
ax.plot([1, 2, 3, 4, 5])
ax.set_title("Title")
plt.draw_if_interactive()
"""
Explanation: If using object method calls, you must call draw or draw_if_interactive to see changes
Again, this is unnecessary in the notebook
End of explanation
"""
plt.plot([1, 5, 3])
"""
Explanation: By default the plot method takes x values, then y values
If the y values are omitted, then it is assumed that the x values are the indices of the given values
End of explanation
"""
plt.Figure?
fig = plt.Figure()
"""
Explanation: What is the pyplot namespace?
It's where everything comes together
Usually where you want to start
Broadly, 3 categories of functions
Plotting preparation
Plotting functions
Plot modifiers
Plotting Preparation
Function | Description
:-----------------|:----------------------------------------------------------
autoscale | Autoscale the axis view to the data (toggle).
axes | Add an axes to the figure.
axis | Convenience method to get or set axis properties.
cla | Clear the current axes.
clf | Clear the current figure.
clim | Set the color limits of the current image.
delaxes | Remove an axes from the current figure.
locator_params | Control behavior of tick locators.
margins | Set or retrieve autoscaling margins.
figure | Creates a new figure.
gca | Return the current axis instance.
gcf | Return a reference to the current figure.
gci | Get the current colorable artist.
hold | Set the hold state.
ioff | Turn interactive mode off.
ion | Turn interactive mode on.
ishold | Return the hold status of the current axes.
isinteractive | Return status of interactive mode.
rc | Set the current rc params.
rc_context | Return a context manager for managing rc settings.
rcdefaults | Restore the default rc params.
savefig | Save the current figure.
sca | Set the current Axes instance.
sci | Set the current image.
set_cmap | Set the default colormap
setp | Set a property on an artist object
show | Display a figure
subplot | Return a subplot axes positioned by the given grid definition.
subplot2grid | Create a subplot in a grid.
subplot_tool | Launch a subplot tool window for a figure.
subplots | Create a figure with a set of subplots already made.
subplots_adjust | Tune the subplot layout.
switch_backend | Switch the default backend.
tick_params | Change the appearance of ticks and tick labels.
ticklabel_format| Change the ScalarFormatter used by default for linear axes.
tight_layout | Automatically adjust subplot parameters to give specified padding.
xkcd | Turns on XKCD sketch-style drawing mode.
xlabel | Set the x axis label of the current axis.
xlim | Get or set the x limits of the current axes.
xscale | Set the scaling of the x-axis.
xticks | Get or set the x-limits of the current tick locations and labels.
ylabel | Set the y axis label of the current axis.
ylim | Get or set the y-limits of the current axes.
yscale | Set the scaling of the y-axis.
yticks | Get or set the y-limits of the current tick locations and labels.
Plotting Functions
Function | Description
:-----------------|:--------------------------------------------
acorr | Plot the autocorrelation of x
bar | Make a bar plot
barbs | Plot a 2-D field of barbs
barh | Make a horizontal bar plot
boxplot | Make a box and whisker plot
broken_barh | Plot horizontal bars
cohere | Plot the coherence between x and y
contour | Plot contours
contourf | Plot filled contours
csd | Plot cross-spectral density
errorbar | Plot an errorbar graph
eventplot | Plot identical parallel lines at specific positions
fill | Plot filled polygons
fill_between | Make filled polygons between two curves
fill_betweenx | Make filled polygons between two horizontal curves
hexbin | Make a hexagonal binning plot
hist | Plot a histogram
hist2d | Make a 2D histogram plot
imshow | Display an image on the axes
loglog | Make a plot with log scaling on both the x and y axis
matshow | Display an array as a matrix in a new figure window
pcolor | Create a pseudocolor plot of a 2-D array
pcolormesh | Plot a quadrilateral mesh
pie | Plot a pie chart
plot | Plot lines and/or markers
plot_date | Plot with data with dates
polar | Make a polar plot
psd | Plot the power spectral density
quiver | Plot a 2-D field of arrows
scatter | Make a scatter plot of x vs y
semilogx | Make a plot with log scaling on the x axis
semilogy | Make a plot with log scaling on the y axis
specgram | Plot a spectrogram
spy | Plot the sparsity pattern on a 2-D array
stackplot | Draws a stacked area plot
stem | Create a stem plot
step | Make a step plot
streamplot | Draws streamlines of a vector flow
tricontour | Draw contours on an unstructured triangular grid
tricontourf | Draw filled contours on an unstructured triangular grid
tripcolor | Create a pseudocolor plot of an unstructured triangular grid
triplot | Draw a unstructured triangular grid as lines and/or markers
xcorr | Plot the cross-correlation between x and y
Plot modifiers
Function | Description
:-----------------|:---------------------------------------------------------------------
annotate | Create an annotation: a piece of text referring to a data point
arrow | Add an arrow to the axes
axhline | Add a horizontal line across the axis
axhspan | Add a horizontal span (rectangle) across the axis
axvline | Add a vertical line across the axes
axvspan | Add a vertical span (rectangle) across the axes
box | Turn the axes box on or off
clabel | Label a contour plot
colorbar | Add a colorbar to a plot
grid | Turn the axes grids on or off
hlines | Plot horizontal lines
legend | Place a legend on the current axes
minorticks_off | Remove minor ticks from the current plot
minorticks_on | Display minor ticks on the current plot
quiverkey | Add a key to a quiver plot
rgrids | Get or set the radial gridlines on a polar plot
suptitle | Add a centered title to the figure
table | Add a table to the current axes
text | Add text to the axes
title | Set a title of the current axes
vlines | Plot vertical lines
xlabel | Set the x axis label of the current axis
ylabel | Set the y axis label of the current axis"
Figures
The Figure is the central object of matplotlib
It is the GUI window that contains the plot
End of explanation
"""
plt.close()
"""
Explanation: Close the last made Figure, by default
End of explanation
"""
fig = plt.figure(figsize=(5, 5))
"""
Explanation: You can also refer to figures by their number starting at 1
plt.close('all') is handy
One of the most commonly used option used to create a Figure is figsize, a tuple of integers specifying the width and height in inches
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
lines = ax.plot([1, 2, 3])
text = ax.set_xlabel("X")
"""
Explanation: Axes
The Axes object is contained within and belongs to a figure
This is where the plotting happens
You will interact with the Axes most often
Use the add_subplot method to put an axes on a figure
It takes the shorthand for n_rows, n_cols, plot_number
End of explanation
"""
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(121)
ax1.plot([1, 2, 3])
ax2 = fig.add_subplot(122)
ax2.plot([3, 2, 1])
"""
Explanation: You may have guessed that you can have more than one axes on a plot
End of explanation
"""
plt.xlabel??
"""
Explanation: Library Plotting
You'll notice above that I stopped using plt for almost everything but figure creation
This is usually how I use matplotlib and allows the most flexible, powerful usage
In fact, most calls functions in the pyplot namespace call gca to get the current axis and then delegate to the method of the Axes object
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(np.random.randn(20), np.random.randn(20))
"""
Explanation: You'll also notice that I assign the returns from the matplotlib object method calls to variables
This is a good habit to get in and we will see why below
One last handy function is plt.subplots
It's almost all I ever use from the plt namespace with a few exceptions
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(np.random.randn(20), np.random.randn(20))
ax.scatter(np.random.randn(20), np.random.randn(20), color='r')
fig
"""
Explanation: Notebook aside
You can work on figures across cells. Just make the existing figure object the last line in the cell.
End of explanation
"""
plt.plot?
"""
Explanation: Exercise
Let's make some basic plots. Make a scatter plot as above with 500 points. Draw random numbers from 0 to 100 for the y axis and set the limits of the y axis at 0 and 200.
Styling
End of explanation
"""
x = np.linspace(-2*np.pi, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
"""
Explanation: Colors
Single letter shortcuts
b: blue
g: green
r: red
c: cyan
m: magenta
y: yellow
k: black
w: white
Shades of gray string float in the 0-1 range
color = '0.75'
HTML hex strings
color = '#eeefff'
R, G, B tuples with R, G, B in [0, 1]
HTML names for colors, like ‘red’, ‘burlywood’ and ‘chartreuse’
Markers
See here for the full list
A few commonly used ones are
".": point
",": pixel
"o": circle
"*": star
"+": plus
"x": x
"D”: diamond
Linestyles
'-' solid
'--' dashed
'-.' dash_dot
':' dotted
'None' draw nothing
' ' draw nothing
'' draw nothing
Exercises
Create a figure that holds two subplots in two rows. In the top one, plot a sin curve from $-2\pi$ to $2\pi$ in green. In the second one, plot a dashed red line (Hint: you may find np.linspace to be useful).
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot([1, 2, 4, 5], label="Line 1")
ax.plot([2, 5, 3, 4], label="Line 2")
legend = ax.legend(loc='best', fontsize=20)
"""
Explanation: Labels and Legends
You can label many things in matplotlib
Labeling lines allows automatic legend creation
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot([1, 2, 4, 5], label="Line 1")
ax.plot([2, 5, 3, 4], label="Line 2")
ax.set_xlabel("X", fontsize=20)
ax.set_ylabel("Y", fontsize=20)
legend = ax.legend(loc='best', fontsize=20)
"""
Explanation: You can label the X and Y axes
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot([1, 2, 4, 5], label="Line 1")
ax.plot([2, 5, 3, 4], label="Line 2")
ax.set_xlabel("X", fontsize=20)
ax.set_ylabel("Y", fontsize=20)
ax.set_title("Title", fontsize=20)
legend = ax.legend(loc='best', fontsize=20)
"""
Explanation: Label the axes with a title
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.grid(False)
ax.tick_params(axis='y', which='major', length=15, right=False)
ax.tick_params(axis='x', which='major', length=15, top=False, direction="out", pad=15)
"""
Explanation: Ticks and Tick Labels
The Ticks are the location of the Tick labels
The Tick lines denote the Ticks
The Tick labels are the text accompanying the tick
A Ticker determines the ticks and their labels automatically
You can use tick_params to adjust the appearance of the ticks
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.grid(False)
ax.tick_params(axis='y', which='major', length=15, right=False)
ax.tick_params(axis='x', which='major', length=15, top=False)
ticklabels = ax.xaxis.set_ticklabels(['aaaa', 'bbbb', 'cccc',
'dddd', 'eeee', 'ffff'],
rotation=45, fontsize=15)
"""
Explanation: You can set your own tick labels
End of explanation
"""
ax.spines
fig, ax = plt.subplots(figsize=(8, 8))
ax.tick_params(bottom=False, top=False, left=False, right=False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.grid(False)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([]);
"""
Explanation: Spines
The spines are the boundaries of the axes, and they can be selectively turned off
End of explanation
"""
x, y = np.random.randn(2, 100)
x.sort()
fig, ax = plt.subplots()
ax.plot(y, 'g--')
fig, ax = plt.subplots()
ax.plot(x, y)
fig, ax = plt.subplots()
ax.plot(x, y, 'o')
x2, y2 = np.random.randn(2, 200)
x2.sort()
fig, ax = plt.subplots()
lines = ax.plot(x, y, 'o', x2, y2, 'ro', ms=8, alpha=.5)
"""
Explanation: More on plot
The plot function is a bit of a work horse with a flexible API
End of explanation
"""
y = pd.Series(np.random.randn(25))
y.plot()
y.cumsum().plot()
"""
Explanation: Plotting in Pandas vs Matplotlib
Pandas provides a few accessors that allow you to stay fairly high-level without giving up any of the power and flexibility of matplotlib
Series and DataFrames have a plot method
They take a kind keyword argument which accepts several values for plots other than the default line plot. These include:
bar or barh for bar plots
hist for histogram
box for boxplot
kde or 'density' for density plots
area for area plots
scatter for scatter plots
hexbin for hexagonal bin plots
pie for pie plots
End of explanation
"""
dta = pd.DataFrame({'normal': np.random.normal(size=100),
'gamma': np.random.gamma(1, size=100),
'poisson': np.random.poisson(size=100)})
ax = dta.cumsum(0).plot()
"""
Explanation: Notice that these return AxesSubplot objects, so we have our hook in to all of the powerful methods from matplotlib
So, too, do DataFrames
End of explanation
"""
ax = dta.cumsum(0).plot(subplots=True, figsize=(10, 10))
"""
Explanation: Exercise
Without re-plotting any of the above, re-size the fonts for the labels and the legend and display the figure.
Alternatively, we can plot the above in separate subplots
We can also change the figsize
End of explanation
"""
axes = dta.cumsum(0).plot(subplots=True, figsize=(10, 10))
fig = axes[0].figure
fig.tight_layout()
"""
Explanation: These are just matplotlib objects
Note the use of tight_layout below
tight_layout automatically adjusts the subplot params so that the subplot fits the figure
You can have more fine-grained control using
python
fig.subplots_adjust
End of explanation
"""
axes = dta.cumsum().plot(secondary_y='normal')
"""
Explanation: We can easily add a secondary y-axis
End of explanation
"""
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for i, ax in enumerate(axes):
variable = dta.columns[i]
ax = dta[variable].cumsum().plot(ax=ax)
ax.set_title(variable, fontsize=16)
axes[0].set_ylabel("Cumulative Sum", fontsize=14);
"""
Explanation: We can also ask pandas to plot on already existing axes
End of explanation
"""
dta = pd.read_csv("../data/weather_nyc.csv")
dta = dta.ix[dta.year < 2015] # truncate to end of year
"""
Explanation: Bar plots
Bar plots are useful for displaying and comparing measurable quantities, such as counts or volumes.
We can use the plot method with a kind='bar' argument.
Let's use temperature data from NYC 1995 - 2014
End of explanation
"""
dta.query("year < 2015")
"""
Explanation: Or equivalently
End of explanation
"""
bins = [dta.temp.min(), 32, 55, 80, dta.temp.max()]
bins
labels = ["freezing", "cold", "warm", "hot"]
dta["temp_bin"] = pd.cut(dta.temp, bins, labels=labels)
try:
from scipy.constants import F2C
except ImportError: # no scipy installed
def F2C(f):
return (np.array(f) - 32)/1.8
lmap = lambda func, x : list(map(func, x))
"""
Explanation: Recall that pandas.cut can be used to bin continuous data into buckets
End of explanation
"""
bins = [dta.tempc.min()] + lmap(F2C, (32, 55, 80)) + [dta.tempc.max()]
bins
labels = ["freezing", "cold", "warm", "hot"]
dta["tempc_bin"] = pd.cut(dta.temp, bins, labels=labels)
dta.head()
ax = dta.groupby("temp_bin").size().plot(kind="bar")
"""
Explanation: Celsius bins
End of explanation
"""
ax = dta.groupby("temp_bin").size().plot(kind="bar", rot=0, fontsize=16, figsize=(8, 5))
ax.set_xlabel("Temperature")
ax.set_ylabel("Number of Days")
ax.set_title("Temperatures from 1995 - 2014");
"""
Explanation: What's wrong with this graph?
Axis labels and tick labels to start
Some things we can do through the plot method
Some things we have to do with matplotlib
Make the xticks labels bigger and rotate them
End of explanation
"""
dta.groupby(["season", "temp_bin"]).size().plot(kind="barh", figsize=(6, 8))
"""
Explanation: Horizontal bar chart
End of explanation
"""
ct = pd.crosstab(dta.temp_bin, dta.season)
ct
ax = ct.plot(kind="bar", stacked=True, figsize=(12, 8), grid=False,
legend=True)
"""
Explanation: Stacked bar chart
The pandas crosstab function creates a cross-tabulation of two or more factors.
End of explanation
"""
colors = plt.cm.Paired(np.linspace(0, 1, 4))
colors
ax = pd.crosstab(dta.temp_bin, dta.season).plot(kind="bar", stacked=True,
figsize=(12, 8), grid=False,
legend=True, colors=colors, rot=0,
fontsize=16)
# adjust the fontsize of the legend
legend = ax.get_legend()
for text in legend.get_texts():
text.set_fontsize(18)
legend.get_title().set_fontsize(20)
"""
Explanation: Matplotlib provides a variety of ColorMaps
The Paired colormap is a good qualitative colormap
End of explanation
"""
dta.temp.min()
ax = dta.temp.plot(kind="hist", bins=50)
"""
Explanation: Histograms
Frequently it is useful to look at the distribution of data before you analyze it.
Histograms display relative frequencies of data values
The y-axis is always some measure of frequency, raw counts of values or scaled proportions
End of explanation
"""
dta.ix[dta.temp == -99, ["temp", "tempc"]] = np.nan
"""
Explanation: It's even a good exercise here! Let's drop turn the -99 into NaNs.
End of explanation
"""
ax = dta.temp.plot(kind="hist", bins=50, grid=False, figsize=(10, 6))
# plot a vertical line that spans the axis
line = ax.axvline(dta.temp.mean(), color='r', lw=3, label="Mean")
# specifically add a legend
handles, labels = ax.get_legend_handles_labels()
ax.legend([handles[0]], [labels[0]], fontsize=16)
handles
"""
Explanation: Incidentally, pandas will handle nulls in plotting
End of explanation
"""
def scotts_rule(x):
x = x.dropna()
std = x.std()
return 3.5 * std / (len(x)**(1./3))
def width_to_nbins(x, h):
x = x.dropna()
return int(round(x.ptp()/h))
h = scotts_rule(dta.temp)
nbins = width_to_nbins(dta.temp, h)
ax = dta.temp.plot(kind="hist", bins=nbins, grid=False, figsize=(10, 6))
# plot a vertical line that spans the axis
line = ax.axvline(dta.temp.mean(), color='r', lw=3, label="Mean")
"""
Explanation: Optimal number of bins
Scott's rule
$$h=\frac{3.5\sigma}{n^{1/3}}$$
End of explanation
"""
ax = dta.temp.plot(kind='kde', grid=False, figsize=(10, 6))
ax.set_xlim(0, 100)
"""
Explanation: Density Plots
Kernel Density Estimators are a kind of smoothed histogram (more on this later)
Pandas provides a hook to KDE plots using statsmodels, if installed, or scipy
End of explanation
"""
ax = dta.temp.plot(kind='kde', grid=False, figsize=(10, 6), color='r', lw=3)
ax = dta.temp.plot(kind="hist", bins=nbins, grid=False, figsize=(10, 6), ax=ax, normed=True, alpha=.7)
ax.set_xlim(0, 100)
"""
Explanation: We can compare the KDE to the normed histogram
End of explanation
"""
ax = dta.boxplot(column="temp", by="season", grid=False, figsize=(8, 10), fontsize=16,
whis=[5, 95])
ax.set_title(ax.get_title(), fontsize=20)
ax.xaxis.get_label().set_fontsize(18)
fig = ax.figure
# Change the size of the figure title
# http://stackoverflow.com/a/12449783/535665
fig.texts[0].set_fontsize(20)
# whitespace between axes and fig boundary
fig.subplots_adjust(top=.85)
"""
Explanation: Exercise
Create KDE estimates for the temperature in each season on a single plot. Label the plotted lines.
Box plots
Boxplots (aka "box and whisker" plots) are a different way to display distributions of data
The box contains the quartiles of the data
The "whiskers" are typically the lower and upper 5 percent values
In matplotlib they are 1.5 * the lower/upper quarteriles by default
The horizontal line is the median
Boxplots have their own method on DataFrames
End of explanation
"""
def jitter(x, n, noise=.05):
return x + np.random.normal(0, noise, size=n)
ax = dta.boxplot(column="temp", by="season", grid=False, figsize=(8, 10), fontsize=16,
whis=[5, 95])
ax.set_title(ax.get_title(), fontsize=20)
ax.xaxis.get_label().set_fontsize(18)
fig = ax.figure
# http://stackoverflow.com/a/12449783/535665
fig.texts[0].set_fontsize(20)
# whitespace between axes and fig boundary
fig.subplots_adjust(top=.85)
for i, season in enumerate(ax.get_xticklabels()):
y = dta.ix[dta.season == season.get_text()].temp
x = jitter(i + 1, len(y))
# there's a lot of data so turn the alpha way down (or sub-sample)
ax.plot(x, y, 'ro', alpha=.05)
"""
Explanation: We can add some more information by overlaying the original data on the boxplot
End of explanation
"""
baseball = pd.read_csv("../data/baseball.csv")
baseball.head()
ax = baseball.plot(kind="scatter", x="ab", y="h", grid=False, figsize=(8, 6), s=8**2,
alpha=.7)
ax.margins(0)
ax.set_xlim(0, 700)
ax.set_ylim(0, 200)
"""
Explanation: Scatterplots
Let's load the baseball dataset to look at scatterplots
End of explanation
"""
ax = baseball.plot(kind="scatter", x="ab", y="h", grid=False, figsize=(8, 6), s=baseball.hr*10,
alpha=.5)
ax.margins(0)
ax.set_xlim(0, 700)
ax.set_ylim(0, 200)
"""
Explanation: We can uncover more information by changing the size of the points
End of explanation
"""
ax = baseball.plot(kind="scatter", x="ab", y="h", grid=False, figsize=(8, 6), c="DarkGreen", s=50)
ax = baseball.plot(kind="scatter", x="ab", y="rbi", grid=False, figsize=(8, 6), c="Blue", s=50,
ax=ax)
ax.margins(0)
ax.set_xlim(0, 700)
ax.set_ylim(0, 200);
"""
Explanation: Or by adding color using the c keyword
End of explanation
"""
ax = baseball.plot(kind="scatter", x="ab", y="h", grid=False, figsize=(8, 6), c=baseball.hr*10,
s=40, cmap="hot")
ax.margins(0)
ax.set_xlim(0, 700)
ax.set_ylim(0, 200);
"""
Explanation: c can also be a color intensity
in this case we can specify a colormap through the cmap keyword
End of explanation
"""
ax = baseball.plot(kind="scatter", x="ab", y="h", grid=False, figsize=(8, 6), c=baseball.hr*10,
s=40, cmap="hot")
ax.margins(0)
ax.set_xlim(0, 700)
ax.set_ylim(0, 200)
fig = ax.figure
# colorbars are actually a separate subplot in your figure
colorbar = fig.axes[1]
colorbar.yaxis.set_tick_params(right=False);
"""
Explanation: Notice that there is a colorbar automatically
We can adjust it just like all other things matplotlib
It's actually implemented as a separate axes subplot in the figure
End of explanation
"""
ax = pd.scatter_matrix(baseball.loc[:,'r':'sb'], figsize=(14, 10), diagonal='hist')
ax = pd.scatter_matrix(baseball.loc[:,'r':'sb'], figsize=(14, 10), diagonal='kde')
"""
Explanation: Use pd.scatter_matrix To view a large number of variables simultaenously
End of explanation
"""
idx = pd.to_datetime(dta.year*10000 + dta.month*100 + dta.day, format='%Y%m%d')
idx
y = dta.set_index(idx).temp
y.head()
y.index
"""
Explanation: Plotting Time-Series
Let's convert the temperature data into a TimeSeries for convenience
End of explanation
"""
#ax = y.plot(figsize=(12, 8))
ax = pd.rolling_mean(y, window=60, min_periods=1, center=True).plot(figsize=(12, 8),
label="Rolling 2-month mean")
means = y.groupby(lambda x : x.year).mean()
means.index = pd.DatetimeIndex(pd.to_datetime(means.index * 10000 + 1231, format="%Y%m%d"))
ax = means.plot(ax=ax, label="Yearly Average")
legend = ax.legend()
"""
Explanation: Pandas plotting is DatetimeIndex aware
Outside of the browser, you can pan and zoom and the tick labels adjust dynamically
End of explanation
"""
ax = plt.subplot2grid((2, 2), (0, 0))
"""
Explanation: GridSpec
GridSpec provides a high-level abstraction for placing subplots on a grid
plt.subplot2grid is a helper function for creating grids of subplots
To create a 2x2 figure with a reference to the first axes we could do
python
ax = plt.subplot(2, 2, 1)
Equivalently with subplot2grid
End of explanation
"""
with plt.rc_context(rc={"xtick.labelsize": 0,
"ytick.labelsize": 0,
"axes.facecolor": "lightgray",
"figure.figsize": (8, 8)}):
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1, 2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2, 0))
ax5 = plt.subplot2grid((3,3), (2, 1))
ax1.figure.suptitle("subplot2grid", fontsize=20)
"""
Explanation: We can have more easy, fine-grained control with subplot2grid for creating multiple subplots that span columns, for example
End of explanation
"""
from matplotlib.gridspec import GridSpec
with plt.rc_context(rc={"xtick.labelsize": 0,
"ytick.labelsize": 0,
"axes.facecolor": "lightgray"}):
fig, ax = plt.subplots(figsize=(8, 8))
gs = GridSpec(3, 3)
ax1 = plt.subplot(gs[0, :])
# identical to ax1 = plt.subplot(gs.new_subplotspec((0,0), colspan=3))
ax2 = plt.subplot(gs[1,:-1])
ax3 = plt.subplot(gs[1:, -1])
ax4 = plt.subplot(gs[-1,0])
ax5 = plt.subplot(gs[-1,-2])
fig.suptitle("GridSpec", fontsize=20)
"""
Explanation: You can use GridSpec class directly to create the same plot
End of explanation
"""
import seaborn as sns
tips = sns.load_dataset("tips")
tips.head()
"""
Explanation: Seaborn
Seaborn is a Python visualization library based on matplotlib. It provides a high-level interface for drawing attractive statistical graphics.
It is built on top of matplotlib
Provides support for numpy and pandas
Coupled with statistical routines from scipy and statsmodels
Trellis plots
"At the heart of quantitative reasoning is a single question: Compared to what? Small multiple designs, multivariate and data bountiful, answer directly by visually enforcing comparisons of changes, of the differences among objects, of the scope of alternatives. For a wide range of problems in data presentation, small multiples are the best design solution."
-Edward Tufte
For medium-dimensional data,
Multiple instances of the same plot on different subsets of your dataset.
Quickly extract a large amount of information about complex data.
End of explanation
"""
with mpl.rc_context(rc={"legend.fontsize": "18", "axes.titlesize": "18"}):
g = sns.FacetGrid(tips, col="sex", hue="smoker", size=7)
g.map(plt.scatter, "total_bill", "tip", alpha=.7, s=80)
g.add_legend()
g._legend.get_title().set_fontsize(20)
g.axes[0, 0].title.set_fontsize(20)
g.axes[0, 0].xaxis.get_label().set_fontsize(20)
g.axes[0, 1].title.set_fontsize(20)
g.axes[0, 1].xaxis.get_label().set_fontsize(20)
"""
Explanation: FacetGrid
Used to visualize the distribution of a variable or the relationship between multiple variables within subsets of your data
Can be drawn with up to three dimensions: row, col, and hue.
These should be discrete variables
Say we wanted to examine differences between lunch and dinner in the tips dataset
End of explanation
"""
ax = dta.boxplot(column="temp", by="season", grid=False, figsize=(8, 10), fontsize=16,
whis=[5, 95])
X = dta[["temp", "season"]].dropna()
ax = sns.violinplot(X.temp, groupby=X.season)
"""
Explanation: Violin plot
The violin plot is a combination of a boxplot and a kernel density estimator
End of explanation
"""
ax = sns.violinplot(X.temp, groupby=X.season, inner='points', alpha=.5,
order=['Winter', 'Spring', 'Summer', 'Fall'])
"""
Explanation: We can plot the points inside the violins and re-order the seasons
End of explanation
"""
temp95 = dta.query("year == 1995")[["temp", "month", "day"]]
temp14 = dta.query("year == 2014")[["temp", "month", "day"]]
temps = temp95.merge(temp14, on=["month", "day"], how="inner", suffixes=("_95", "_14"))
g = sns.jointplot(temps.temp_95, temps.temp_14, kind="kde", size=7, space=0)
"""
Explanation: Distribution plots
Seaborn allows you to look at bivariate distributions. Here, we can compare the distribution of the temperatures in 1995 and 2014.
End of explanation
"""
g = sns.jointplot(temps.temp_95, temps.temp_14, kind="hex", color="#4CB391",
joint_kws={"bins": 200})
"""
Explanation: We can also look at a hexbin plot of the same data with the marginal distributions as histograms.
End of explanation
"""
fig, ax = plt.subplots(figsize=(6, 6))
np.random.seed(0)
x, y = np.random.normal(size=(2, 200))
color, size = np.random.random((2, 200))
ax.scatter(x, y, c=color, s=500 * size, alpha=0.5, cmap="rainbow")
ax.grid(color='lightgray', alpha=0.7)
"""
Explanation: mpld3
The mpld3 project brings together Matplotlib, and D3js, the popular Javascript library for creating interactive data visualizations for the web. The result is a simple API for exporting your matplotlib graphics to HTML code which can be used within the browser, within standard web pages, blogs, or tools such as the IPython notebook.
Let's look at a regular scatter plot
End of explanation
"""
import mpld3
mpld3.display(fig)
"""
Explanation: Unfortunately, this is just a static image. Let's use mpld3 to change that. Using the display command, you get a fully interactive visualization of the figure.
End of explanation
"""
from mpld3 import plugins
fig, ax = plt.subplots(6, 6, figsize=(6, 6))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
ax = ax[::-1]
X = baseball.loc[:, 'r':'rbi']
for i in range(6):
for j in range(6):
ax[i, j].xaxis.set_major_formatter(plt.NullFormatter())
ax[i, j].yaxis.set_major_formatter(plt.NullFormatter())
points = ax[i, j].scatter(X.values[:, j], X.values[:, i])
if i == 0:
ax[i, j].set_xlabel(X.columns[j])
ax[i, 0].set_ylabel(X.columns[i])
plugins.connect(fig, plugins.LinkedBrush(points))
mpld3.display(fig)
"""
Explanation: Notice the toolbar on hover. You can use that to interact with the figure.
You can use mpld3 for every plot that you render in the notebook by executing
python
mpld3.enable_notebook()
mpld3 plugins
Much like event handling via callback functions in regular matplotlib (not covered in this notebook), you can define plugins for mpld3 to specify additional interactivity.
A number of plugins are built-in, and it is also possible to define new, custom plugins for nearly limitless interactive behaviors. For example, here is the built-in Linked Brushing plugin that allows exploration of multi-dimensional datasets:
End of explanation
"""
from IPython.display import Image, HTML
# Image("./tufte.svg")
HTML("./tufte.svg")
"""
Explanation: Putting it all together
Let's recreate this graphic inspired by Tufte's
End of explanation
"""
import os
to_colors = lambda x : x/255.
blue3 = list(map(to_colors, (24, 116, 205))) # 1874CD
wheat2 = list(map(to_colors, (238, 216, 174))) # EED8AE
wheat3 = list(map(to_colors, (205, 186, 150))) # CDBA96
wheat4 = list(map(to_colors, (139, 126, 102))) # 8B7E66
firebrick3 = list(map(to_colors, (205, 38, 38))) # CD2626
gray30 = list(map(to_colors, (77, 77, 77))) # 4D4D4D
"""
Explanation: This is a plot of NYC's weather in 2014 versus historical averages
Daily historical highs and lows
Historical confidence intervals around averages
The daily temperatures for 2013
Markers for new highs and lows
Annotations for points
Text for the graphic
Custom tick labels
Load the data from yesterday
End of explanation
"""
idx = range(366)
"""
Explanation: You probably don't wan't to work with the month, day tuples in its present form for plotting
Instead, you can use the below for the x axis
End of explanation
"""
np.where([True, False, False, True, False])[0]
"""
Explanation: First, make the figure and plot the high and low bars (Hints: see the ax.vlines)
The color is wheat3
Second, plot the confidence intervals around the historical means
The color is wheat4
Plot the highs and lows of the present year in present_highs and present_lows
You will need the x axes of these two objects to line up with your current x axis (Hint: you may find np.where to be helpful)
End of explanation
"""
yticks = range(-10, 101, 10)
ylabels = [str(i) + u"\u00b0" for i in yticks]
ylabels
"""
Explanation: Annotate the points one of the 2014 historical lows and one of the 2014 historical highs with the appropriate text (Hint: see ax.annotate)
You may want to look at some of the examples below for annotate and arrows
Now, add text to the figure. (Hint: see ax.text)
Finally, let's add the correct tick labels
You can use unicode to add the $^\circ$
End of explanation
"""
with plt.xkcd():
# Based on "Stove Ownership" from XKCD by Randall Monroe
# http://xkcd.com/418/
fig = plt.figure()
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.xticks([])
plt.yticks([])
ax.set_ylim([-30, 10])
data = np.ones(100)
data[70:] -= np.arange(1, 31)
plt.annotate(
'THE DAY I REALIZED\nI COULD COOK BACON\nWHENEVER I WANTED',
xy=(70, 1), arrowprops=dict(arrowstyle='->'), xytext=(15, -10), zorder=-1)
plt.plot(data)
plt.xlabel('time')
plt.ylabel('my overall health')
fig.text(0.5, 0.05,
'"Stove Ownership" from xkcd by Randall Monroe', ha='center')
with plt.xkcd():
# Based on "The data So Far" from XKCD by Randall Monroe
# http://xkcd.com/373/
fig = plt.figure()
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.bar([-0.125, 1.0-0.125], [0, 100], 0.25)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks([0, 1])
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([0, 110])
ax.set_xticklabels(['CONFIRMED BY\nEXPERIMENT', 'REFUTED BY\nEXPERIMENT'])
ax.set_yticks([])
fig.suptitle("CLAIMS OF SUPERNATURAL POWERS")
fig.text(0.5, 0.01,
'"The Data So Far" from xkcd by Randall Monroe',
ha='center', )
"""
Explanation: Other frequently used plotting tricks
XKCD and Annotation
End of explanation
"""
from matplotlib.ticker import MaxNLocator
x = np.arange(20)
y = np.random.randn(20)
fig, ax = plt.subplots()
ax.plot(x, y)
ax.xaxis.set_major_locator(MaxNLocator(nbins=8))
"""
Explanation: Tick Tricks
End of explanation
"""
x = np.arange(20)
y1 = np.random.randn(20)
y2 = np.random.randn(20)
fig, axes = plt.subplots(2, 1, sharex=True)
axes[0].plot(x, y1)
axes[1].plot(x, y2)
fig.tight_layout()
"""
Explanation: ColorMaps
See colormap reference
Sharing Axes
End of explanation
"""
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
s2 = np.sin(2*np.pi*t)
fig, ax1 = plt.subplots()
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('exp', color='b', fontsize=18)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.plot(t, s2, 'r.')
ax2.set_ylabel('sin', color='r', fontsize=18)
for tl in ax2.get_yticklabels():
tl.set_color('r')
"""
Explanation: Twinning Axes
End of explanation
"""
fig, ax = plt.subplots()
ax.imshow(np.random.uniform(0, 1, size=(50, 50)), cmap="RdYlGn")
"""
Explanation: Image Plots
End of explanation
"""
fig, ax = plt.subplots()
ax.set_ylabel("$\\beta^2$", fontsize=20, rotation=0, labelpad=20)
with mpl.rc_context(rc={"text.usetex": True}):
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_ylabel("$\\beta^2$", fontsize=20, rotation=0, labelpad=20)
"""
Explanation: $LaTeX$
By default, matplotlib uses its own $TeX$ enging for text and math layout
You have the option to use call out to $TeX$, though by setting the text.usetext option
End of explanation
"""
from matplotlib.pylab import bivariate_normal
np.random.seed(12)
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
with mpl.rc_context(rc={'xtick.direction': 'out',
'ytick.direction': 'out'}):
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
fig, ax = plt.subplots(figsize=(8, 8))
contours = ax.contour(X, Y, Z)
ax.clabel(contours, inline=1, fontsize=10)
"""
Explanation: Contour Plots
End of explanation
"""
fig, ax = plt.subplots()
ax.arrow(0, 0, 0.5, 0.5, head_width=0.05, head_length=0.1, fc='k', ec='k')
ax.arrow(0.25, 0, 0.5, 0.5, head_width=0, head_length=0, fc='k', ec='k')
"""
Explanation: Arrows
End of explanation
"""
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(6, 10))
axes[0].fill_between(x, 0, y1)
axes[0].set_ylabel('between y1 and 0')
axes[1].fill_between(x, y1, 1)
axes[1].set_ylabel('between y1 and 1')
"""
Explanation: Filling in plots
End of explanation
"""
|
KMFleischer/PyEarthScience
|
Tutorial/04a_PyNGL_xy.ipynb
|
mit
|
import Ngl
wks = Ngl.open_wks('png', 'plot_xy')
"""
Explanation: 4.a Plot type - xy
Our first plot example is a simple xy-plot and the graphics output format is PNG.
End of explanation
"""
import numpy as np
x = np.arange(0,5)
y = np.arange(0,10,2)
plot = Ngl.xy(wks, x, y)
"""
Explanation: To use Numpy arrays we need to import the module.
Define x- and y-values:
End of explanation
"""
from IPython.display import Image
Image(filename='plot_xy.png', retina=True)
"""
Explanation: Hm, we created the plot but where is it? Unlike matplotlib PyNGL can't display inline plots but IPython provides a solution for us.
retina=True --> half of the size of the plot
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.tiXAxisString = 'x-axis title string'
res.tiYAxisString = 'y-axis title string'
plot = Ngl.xy(wks, x, y, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: That's really sparse. Next, we want to add a title above the plot and add the axis titles, too.
To do that we use the plot resources and add the resources to the plot function.
NOTICE:
The first plot call has created a file called plot_map1.png. If we call plot again it will create two files with the names plot_maps1.000001.png, plot_maps1.000001.png, and so on. The first one is the plot above, the second will be the one the plot below. If we make changes it will increase the number of plots on disk, and it is hard to display the correct plot in the notebook. That's why we delete the workstation and create just one single plot.
If you use a script and run it at a terminal the workstation will be closed when the script exits, and you can rerun it without producing multiple files.
<br>
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.tiXAxisString = 'x-axis title string'
res.tiYAxisString = 'y-axis title string'
res.tmXMajorGrid = True
res.tmYMajorGrid = True
plot = Ngl.xy(wks, x, y, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: Display the grid lines of the coordinate system, too.
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.tiXAxisString = 'x-axis title string'
res.tiYAxisString = 'y-axis title string'
res.tmXMajorGrid = True
res.tmYMajorGrid = True
res.xyLineColor = 'red'
res.xyDashPattern = 3 # -- - -- - --
res.xyLineThicknessF = 5
plot = Ngl.xy(wks, x, y, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: Change the line settings
line color from black to red
line pattern from solid pattern to dashed pattern
line width thicker
End of explanation
"""
y1 = np.array([0,3,6,1,4])
y2 = np.array([2,5,3,2,7])
data = np.array([y1,y2])
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
plot = Ngl.xy(wks, x, data, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: To display two datasets in one xy-plot.
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.xyLineColors = ['red','blue']
res.xyDashPatterns = [3,16]
res.xyLineThicknesses = [5,3]
plot = Ngl.xy(wks, x, data, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: Uh, that's not what we want! We want to have two lines with different colors, lets say red and blue. And while we're at it, they should have two different dash pattern types.
End of explanation
"""
y1 = np.array([0,3,6,1,4])
y2 = np.array([2,5,3,2,7])
y3 = np.array([1,1,2,3,5])
data = np.array([y1,y2,y3])
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.xyLineColors = ['red','blue','green']
res.xyDashPatterns = [3,16,0]
res.xyLineThicknesses = [5,3,5]
plot = Ngl.xy(wks, x, data, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: NOTICE:
The used resources for more than one color, line thickness and line dash pattern are the plural of the resources from the single line.
Set the same color, dash pattern and thickness for one or multiple lines:
res.xyLineColor
res.xyDashPattern
res.xyLineThicknessF
Set different colors, dash pattern and thickness for each line:
res.xyLineColors
res.xyDashPatterns
res.xyLineThicknesses
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.xyLineColors = ['red','blue','green']
res.xyDashPatterns = [3,16,0]
res.xyLineThicknesses = [5,3,5]
res.pmLegendDisplayMode = "Always" #-- turn on the drawing
plot = Ngl.xy(wks, x, data, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: We can distinguish them now but don't know which line is y1, y2 or y3. It is always good to have legend and that's what we want to do next.
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy.png')
res = Ngl.Resources()
res.tiMainString = 'This is the title string'
res.xyLineColors = ['red','blue','green']
res.xyDashPatterns = [3,16,0]
res.xyLineThicknesses = [5,3,5]
res.xyExplicitLegendLabels = ["y1","y2","y3"]
res.pmLegendDisplayMode = "Always" #-- turn on the legend drawing
res.pmLegendOrthogonalPosF = -1.0 #-- move the legend upward
res.pmLegendParallelPosF = 0.17 #-- move the legend rightward
res.pmLegendWidthF = 0.15 #-- increase width
res.pmLegendHeightF = 0.10 #-- increase height
res.lgPerimOn = False #-- turn off the perimeter
plot = Ngl.xy(wks, x, data, res)
Image(filename='plot_xy.png', retina=True)
"""
Explanation: That is the default. Doesn't look very nice, does it? So, we should fix it up a bit.
use the correct dataset names
make the legend smaller
move it to the upper left inside the plot
End of explanation
"""
import xarray as xr
ds = xr.open_dataset('./data/tsurf_fldmean.nc')
tsurf = ds.tsurf
time = np.arange(0,len(ds.time),1)
"""
Explanation: The next example shows how to read a dataset from file.
Dataset: ./data/tsurf_fldmean.nc
Variable: tsurf
End of explanation
"""
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy_tsurf.png')
res = Ngl.Resources()
res.tiMainString = 'Variable tsurf'
res.tiXAxisString = 'time'
res.tiYAxisString = tsurf.long_name
plot = Ngl.xy(wks, time, tsurf[:,0,0].values, res)
Image(filename='plot_xy_tsurf.png', retina=True)
"""
Explanation: The variable time is x and and the variable tsurf is y.
Note, that we have to use tsurf.values because Ngl.xy needs to get a numpy array.
End of explanation
"""
import datetime
ntime = len(ds.time)
years = ds.time.dt.year.values
months = ds.time.dt.month.values
days = ds.time.dt.day.values
date_labels = [datetime.date(years[i],months[i],days[i]) for i in range(0,ntime)]
date_labels = list(np.array(date_labels,dtype='str'))
Ngl.delete_wks(wks)
wks = Ngl.open_wks('png', 'plot_xy_tsurf.png')
res = Ngl.Resources()
res.tiMainString = 'Variable tsurf'
res.tiXAxisString = 'time'
res.tiYAxisString = tsurf.long_name
res.tmXBMode = 'Explicit' #-- use explicit values
res.tmXBValues = time[::4] #-- use the new x-values array
res.tmXBLabels = date_labels[::4] #-- use the new x-values array as labels
res.tmXBLabelFontHeightF = 0.008
res.tmXBLabelAngleF = 45
res.tmXBMinorOn = False #-- turn off minor tickmark
plot = Ngl.xy(wks, time, tsurf[:,0,0].values, res)
Image(filename='plot_xy_tsurf.png', retina=True)
"""
Explanation: Hm, I would like to have the x-axis labels as dates and not as indices.
Convert the time values to date strings using the Python module datetime.
End of explanation
"""
|
ealogar/curso-python
|
sysadmin/1_Gathering_system_data.ipynb
|
apache-2.0
|
import psutil
import glob
import sys
import subprocess
#
# Our code is p3-ready
#
from __future__ import print_function, unicode_literals
def grep(needle, fpath):
"""A simple grep implementation
goal: open() is iterable and doesn't
need splitlines()
goal: comprehension can filter lists
"""
return [x for x in open(fpath) if needle in x]
# Do we have localhost?
print(grep("localhost", "/etc/hosts"))
#The psutil module is very nice
import psutil
#Works on Windows, Linux and MacOS
psutil.cpu_percent()
#And its output is very easy to manage
ret = psutil.disk_io_counters()
print(ret)
# Exercise: Which other informations
# does psutil provide?
# Use this cell and the tab-completion jupyter functionalities.
# Exercise
def multiplatform_vmstat(count):
# Write a vmstat-like function printing every second:
# - cpu usage%
# - bytes read and written in the given interval
# Hint: use psutil and time.sleep(1)
# Hint: use this cell or try on ipython and *then* write the function
# using %edit vmstat.py
for i in range(count):
raise NotImplementedError
print(cpu_usage, bytes_rw)
multiplatform_vmstat(5)
!python -c "from solutions import multiplatform_vmstat;multiplatform_vmstat(3)"
#
# subprocess
#
# The check_output function returns the command stdout
from subprocess import check_output
# It takes a *list* as an argument!
out = check_output("ping -c5 www.google.com".split())
# and returns a string
print(out)
print(type(out))
"""
Explanation: Gathering system data
Goals:
- Gathering System Data with multiplatform and platform-dependent tools
- Get infos from files, /proc, /sys
- Capture command output
- Use psutil to get IO, CPU and memory data
- Parse files with a strategy
Non-goals for this lesson:
- use with, yield or pipes
Modules
End of explanation
"""
def sh(cmd, shell=False, timeout=0):
""""Returns an iterable output of a command string
checking...
"""
from sys import version_info as python_version
if python_version < (3, 3): # ..before using..
if timeout:
raise ValueError("Timeout not supported until Python 3.3")
output = check_output(cmd.split(), shell=shell)
else:
output = check_output(cmd.split(), shell=shell, timeout=timeout)
return output.splitlines()
# Exercise:
# implement a multiplatform pgrep-like function.
def pgrep(program):
"""
A multiplatform pgrep-like function.
Prints a list of processes executing 'program'
@param program - eg firefox, explorer.exe
Hint: use subprocess, os and list-comprehension
eg. items = [x for x in a_list if 'firefox' in x]
"""
raise NotImplementedError
pgrep('firefox')
from solutions import pgrep as sol_pgrep
sol_pgrep("firefox")
"""
Explanation: If you want to stream command output, use subprocess.Popen and check carefully subprocess documentation!
End of explanation
"""
# Parsing /proc - 1
def linux_threads(pid):
"""Retrieving data from /proc
"""
from glob import glob
# glob emulates shell expansion of * and ?
# Change to /proc the base path if you run on linux machine
path = "proc/{}/task/*/status".format(pid)
# pick a set of fields to gather
t_info = ('Pid', 'Tgid', 'voluntary') # this is a tuple!
for t in glob(path):
# ... and use comprehension to get
# intersting data.
t_info = [x
for x in open(t)
if x.startswith(t_info)] # startswith accepts tuples!
print(t_info)
# If you're on linux try linux_threads
pid_of_init = 1 # or systemd ?
linux_threads(pid_of_init)
# On linux /proc/diskstats is the source of I/O infos
disk_l = grep("vda1", "proc/diskstats")
print(''.join(disk_l))
# To gather that data we put the header in a multiline string
from solutions import diskstats_headers as headers
print(*headers, sep='\n')
#Take the 1st entry (sda), split the data...
disk_info = disk_l[0].split()
# ... and tie them with the header
ret = zip(headers, disk_info)
# On py3 we need to iterate over the generators
print(list(ret))
# Try to mangle ret
print('\n'.join(str(x) for x in ret))
# Exercise: trasform ret in a dict.
# We can create a reusable commodity class with
from collections import namedtuple
# using the imported `headers` as attributes
# like the one provided by psutil
DiskStats = namedtuple('DiskStat', headers)
# ... and disk_info as values
dstat = DiskStats(*disk_info)
print(dstat.device, dstat.writes_ms)
# Exercise
# Write the following function
def linux_diskstats(partition):
"""Print every second I/O information from /proc/diskstats
@param: partition - eg sda1 or vdx1
Hint: use the above `grep` function
Hint: use zip, time.sleep, print() and *magic
"""
diskstats_headers = ('reads reads_merged reads_sectors reads_ms'
' writes writes_merged writes_sectors writes_ms'
' io_in_progress io_ms_weight').split()
while True:
raise NotImplementedError
print(values, sep="\t")
!python -c "from solutions import linux_diskstats;linux_diskstats('vda1')"
# Using check_output with split() doesn't always work
from os import makedirs
makedirs('/tmp/course/b l a n k s') # , exist_ok=True) this on py3
check_output('ls "/tmp/course/b l a n k s"'.split())
# You can use
from shlex import split
# and
cmd = split('dir -a "/tmp/course/b l a n k s"')
check_output(cmd)
"""
Explanation: Parsing /proc
Linux /proc filesystem is a cool place to get data
In the next example we'll see how to get:
- thread informations;
- disk statistics;
End of explanation
"""
|
NYUDataBootcamp/Projects
|
UG_S17/Sohil-Patel-Final-Project.ipynb
|
mit
|
import sys # system module
import pandas as pd # data package
import matplotlib as mpl # graphics package
import matplotlib.pyplot as plt # pyplot module
import datetime as dt # date and time module
import numpy as np
import pandas as pd
import seaborn as sns
import sys
from scipy.stats import linregress
%matplotlib inline
"""
Explanation: Confluences between Artist Sale Prices: British Post-War Contemporary Micro-Art Markets from 1990-1995
Sohil S. Patel,
May 12th, 2017
ABSTRACT:
Some art economists believe that the micro-markets of individual artists are independent of one another. This project seeks to either confirm or deny this view point on the art market. By studying the confluences between 3 British Post-War Contemporary Artists and their respective sale prices, we can make an informed hypothesis on whether individual artists' markets are in fact independent. The artists Francis Bacon, Lucian Freud, and Frank Auerbach were chosen for this analysis because they are all from within the same genre of british post-war contemporary art, belong to the same stylistic movements, and are considered by the art historical community to be highly related. While each of their individual artworks’ values vary, what is more relevant for this analysis is the whether their prices move together or not. There are four prongs to my analysis: 1) a sorting of the raw Post-Auction data gathered from Artnet (an online art price database), 2) creation of key metrics using the sale price data, 3) a graphical representation to determine which of these artists had the “strongest” market, and 4) a comparison of the 3 artists' sale price by year. My hypothesis is that the artist with the “strongest” market will be a sort of genre-leader and the other artists' changes in sale price will follow in tow. The ultimate goal is to discover empirical evidence of a correlation between individual artist's markets at the micro-level. Potential benefits of an affirmative conclusion include: lower risk for using art as an asset class, more strategic sale timing of artworks, and more accurate appraisal and valuation techniques.
End of explanation
"""
#Data for Francis Bacon Art Prices
path_fb = '/Users/Sohil/Desktop/Data_Bootcamp_Final/FrancisBacon.csv'
fb = pd.read_csv(path_fb,
thousands=',')
fb = fb.rename(columns={'a' : 'Year', 'b' : 'Low_Estimate', 'c' : 'High_Estimate', 'd' : 'Price'})
#Calculating key metric of Sale Price Deviation %
fb['Percent_Deviation_from_Low_Estimate'] = ((fb['Price'] - fb['Low_Estimate']) / (fb['Low_Estimate'])) * 100
fb['Percent_Deviation_from_High_Estimate'] = ((fb['Price'] - fb['High_Estimate']) / (fb['High_Estimate']))* 100
#Data for Frank Auerbach Sale Prices
path_fa = '/Users/Sohil/Desktop/Data_Bootcamp_Final/FrankAuerbach.csv'
fa = pd.read_csv(path_fa,
thousands=',')
fa = fa.rename(columns={'a' : 'Year', 'b' : 'Low_Estimate', 'c' : 'High_Estimate', 'd' : 'Price'})
#Calculating key metric of Sale Price Deviation %
fa['Percent_Deviation_from_Low_Estimate'] = ((fa['Price'] - fa['Low_Estimate']) / (fa['Low_Estimate'])) * 100
fa['Percent_Deviation_from_High_Estimate'] = ((fa['Price'] - fa['High_Estimate']) / fa['High_Estimate']) * 100
#Data for Lucian Freud Sale Prices
path_lf = '/Users/Sohil/Desktop/Data_Bootcamp_Final/LucianFreud.csv'
lf = pd.read_csv(path_lf,
thousands=',')
lf = lf.rename(columns={'a' : 'Year', 'b' : 'Low_Estimate', 'c' : 'High_Estimate', 'd' : 'Price'})
#Calculating key metric of Sale Price Deviation %
lf['Percent_Deviation_from_Low_Estimate'] = ((lf['Price'] - lf['Low_Estimate']) / (lf['Low_Estimate'])) * 100
lf['Percent_Deviation_from_High_Estimate'] = ((lf['Price'] - lf['High_Estimate']) / (lf['High_Estimate'])) * 100
#Metric test of market strength of each artist
fb_low_dev_mean = fb['Percent_Deviation_from_Low_Estimate'].mean()
fb_high_dev_mean = fb['Percent_Deviation_from_High_Estimate'].mean()
#Mean of deviation percentage for Francis Bacon
fa_low_dev_mean = fa['Percent_Deviation_from_Low_Estimate'].mean()
fa_high_dev_mean = fa['Percent_Deviation_from_High_Estimate'].mean()
#Mean of deviation percentages for Frank Auerbach
lf_low_dev_mean = lf['Percent_Deviation_from_Low_Estimate'].mean()
lf_high_dev_mean = lf['Percent_Deviation_from_High_Estimate'].mean()
#Mean of deviation percentages for Lucian Freud
"""
Explanation: 1) Data Source and Data Usage
The data used here is sourced from Artnet, the leading art price database. The sale prices are historic auction data from around the globe. I have picked a 5 year sampling period from 1990-1995 for this analysis. The data is split into 4 categories; Year of Sale, Low Estimate, High Estimate, and Price Realized. Whenever an artwork is for sale at auction, appraisers give an estimated range of potential sale prices. Given that art has no fundamental or intrinsic value, valuation of art can be fickle and hence appraisers have adopted an interval method.
End of explanation
"""
#Bar graph depiciting mean deviation percentages per artist
D = {'Franics Bacon':fb_low_dev_mean, 'Frank Auerbach': fa_low_dev_mean, 'Lucian Freud':lf_low_dev_mean}
plt.barh(range(len(D)),
D.values(),
align='center',
color=['#A9D0F5', '#58ACFA', '#045FB4'],)
plt.yticks(range(len(D)), D.keys())
plt.axvline(x=0.3, color='#B43104')
plt.ylabel('Artists', fontsize=10)
plt.xlabel ('Mean Percent Deviation between Low Estimate and Price', fontsize= 10)
plt.title("Comparison of Mean % Deviation between Price Realized and Low Estimate", fontsize=13)
plt.show()
#Bar graph depiciting mean deviation percentages per artist
D = {'Franics Bacon':fb_high_dev_mean, 'Frank Auerbach': fa_high_dev_mean, 'Lucian Freud':lf_high_dev_mean}
plt.barh(range(len(D)),
D.values(),
align='center',
color=['#A9D0F5', '#58ACFA', '#045FB4'],)
plt.axvline(x=0, color='#B43104')
plt.yticks(range(len(D)),
D.keys())
plt.ylabel('Artists', fontsize=10)
plt.xlabel ('Mean Percent Deviation between Price and High Estimate', fontsize= 10)
plt.title("Comparison of Mean % Deviation between Price Realized and High Estimate", fontsize=13)
"""
Explanation: From this data we calculated two key metrics which will be useful in determining which of these three artist’s markets was most robust in this period. The percent deviation between Price Realized and the cooresponding Low and High estimate is an efficient way to test for market strength. If an artist is consistently performing above the appraiser’s high estimate (i.e. a positive deviation value) that indicates that the artist is outperforming market expectations. On the other hand, a weak artist’s market is consistently underperformaning if it is reamining close to its low estimate value. The real utility of this metric is as a means to measuring 'strength' of market performance. Whichever of these artist have the highest positive deviation values will likely be the one to be most consistently performaning. Whichever artists is determenined to have the highest positive deviation value( if my hypothesis holds true) would be the "market leader" of this group of artists . If we can see any coorelation between upward trending deiviations and upward moving sale prices, we can more confidently believe that indiviudual artists' markets do affect one another.
Now, I have used these metrics to create two bar charts. The first represents the mean % deviation between the price realized and the low estimate while the second represenets the mean % deviation between the price and the high estimate.
End of explanation
"""
x= fb['Year']
y= fb['Price']
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
# Used a linear regession model to estimate trend of markets.
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
pylab.plot(x, y, 'o')
pylab.plot(x, predict_y, 'k-')
pylab.ylim(0,1000000)
pylab.xlim(1990,1995)
pylab.title('Francis Bacon Sale Price, 1990-95, with Linear Regression', fontsize=13)
pylab.ylabel('USD($)', fontsize=10)
pylab.xlabel('Years (Scientific Notation)(1990-95)', fontsize=10)
pylab.show()
x= fa['Year']
y= fa['Price']
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
# Used a linear regession model to estimate trend of markets.
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
pylab.plot(x, y, 'o')
pylab.plot(x, predict_y, 'k-')
pylab.ylim(0,100000)
pylab.xlim(1990,1995)
pylab.title('Frank Auerbach Sale Price, 1990-95, with Linear Regression', fontsize=13)
pylab.ylabel('USD($)', fontsize=10)
pylab.xlabel('Years (Scientific Notation)(1990-95)', fontsize=10)
pylab.show()
x= lf['Year']
y= lf['Price']
slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y)
# Used a linear regession model to estimate trend of markets.
predict_y = intercept + slope * x
pred_error = y - predict_y
degrees_of_freedom = len(x) - 2
residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom)
pylab.plot(x, y, 'o')
pylab.plot(x, predict_y, 'k-')
pylab.ylim(0,100000)
pylab.xlim(1990,1995)
pylab.title('Lucian Freud Sale Price, 1990-95, with Linear Regression', fontsize=13)
pylab.ylabel('USD($)', fontsize=10)
pylab.xlabel('Years (Scientific Notation)(1990-95)', fontsize=10)
pylab.show()
"""
Explanation: It is clear from this set of data that Lucian Freud is constistantly outperfomning his market while Frank Auerbach takes a close second while Francis Bacon lags behind. Now we will compare these finding to sale prices to see if there are any connections that can be drawn.
End of explanation
"""
|
SamLau95/nbinteract
|
docs/notebooks/tutorial/tutorial_monty_hall.ipynb
|
bsd-3-clause
|
from ipywidgets import interact
import numpy as np
import random
PRIZES = ['Car', 'Goat 1', 'Goat 2']
def monty_hall(example_num=0):
'''
Simulates one round of the Monty Hall Problem. Outputs a tuple of
(result if stay, result if switch, result behind opened door) where
each results is one of PRIZES.
'''
pick = random.choice(PRIZES)
opened = random.choice(
[p for p in PRIZES if p != pick and p != 'Car']
)
remainder = next(p for p in PRIZES if p != pick and p != opened)
return (pick, remainder, opened)
"""
Explanation: An Interactive Monty Hall Simulation
nbinteract was designed to make interactive explanations easy to create. In this tutorial, we will show the process of writing a simulation from scratch and visualizing the results interactively.
In this section, you will create an interactive simulation of the Monty Hall Problem. You may continue writing code in the notebook from the previous section or create a new one for this section.
The Monty Hall Problem
The Monty Hall Problem (Wikipedia) is a famous probability problem that has stumped many, mathematicians included. The problem goes something like this:
Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, "Do you want to switch your choice to door No. 2?" Is it to your advantage to switch your choice?
Perhaps unintuitively, you will win the prize about twice as often if you switch doors. We can show this through simulation.
Simulating a Game
One way to write an interactive explanation is to write functions and create interactions for each one as applicable. Composing the functions allows you to create more complicated processes. nbinteract also provides tools for interactive visualizations as we will soon see.
Let's start with defining a function to simulate one round of the Monty Hall Problem.
End of explanation
"""
interact(monty_hall, example_num=(0, 100));
"""
Explanation: Note that the example_num argument is passed in but not used in the monty_hall function. Although it's unneeded for the function, it is easier to use interact to call functions when they have arguments to manipulate:
End of explanation
"""
def winner(example_num=0):
'''
Plays a game of Monty Hall. If staying with the original door wins
a car, return 'stay'. Otherwise, the remaining door contains the car
so 'switch' would have won.
'''
picked, _, _ = monty_hall()
return 'stay' if picked == 'Car' else 'switch'
interact(winner, example_num=(0, 100));
"""
Explanation: By interacting with the function above, we are able to informally verify that the function never allows the host to open a door with a car behind it. Even though the function is random we are able to use interaction to examine its long-term behavior!
We'll continue by defining a function to simulate a game of Monty Hall and output the winning strategy for that game:
End of explanation
"""
import nbinteract as nbi
nbi.bar(['a', 'b'], [4, 6])
"""
Explanation: Again, a bit of interaction lets us quickly examine the behavior of winner. We can see that switch appears more often than stay.
Brief Introduction to Plotting with nbinteract
Let's create an interactive bar chart of the number of times each strategy wins. We'll use nbinteract's plotting functionality.
nbi.bar creates a bar chart:
End of explanation
"""
# This function generates the x-values
def categories(n):
return list('abcdefg')[:n]
# This function generates the y-values (heights of bars)
# The y response function always takes in the x-values as its
# first argument
def offset_y(xs, offset):
num_categories = len(xs)
return np.arange(num_categories) + offset
# Each argument of the response functions is passed in as a keyword
# argument to `nbi.bar` in the same format as `interact`
nbi.bar(categories, offset_y, n=(1, 7), offset=(0, 10))
"""
Explanation: To make an interactive chart, pass a response function in place of one or both of bar's arguments.
End of explanation
"""
categories = ['stay', 'switch']
winners = [winner() for _ in range(1000)]
# Note that the the first argument to the y response function
# will be the x-values which we don't need
def won(_, num_games):
'''
Outputs a 2-tuple of the number of times each strategy won
after num_games games.
'''
return (winners[:num_games].count('stay'),
winners[:num_games].count('switch'))
nbi.bar(categories, won, num_games=(1, 1000))
"""
Explanation: Visualizing the Winners
Now, let's turn back to our original goal: plotting the winners as games are played.
We can call winner many times and use nbi.bar to show the bar chart as it's built over the trials.
Note that we compute the results before defining our function won. This has two benefits over running the simulation directly in won:
It gives us consistency in our interaction. If we run a random simulation in won, moving the slider from 500 to a different number back to 500 will give us a slightly different bar chart.
It makes the interaction smoother since less work is being done each time the slider is moved.
End of explanation
"""
options = {
'title': 'Number of times each strategy wins',
'xlabel': 'Strategy',
'ylabel': 'Number of wins',
'ylim': (0, 700),
}
nbi.bar(categories, won, options=options, num_games=(1, 1000))
"""
Explanation: Note that by default the plot will adjust its y-axis to match the limits of the data. We can manually set the y-axis limits to better visualize this plot being built up. We will also add labels to our plot:
End of explanation
"""
from ipywidgets import Play
nbi.bar(categories, won, options=options,
num_games=Play(min=0, max=1000, step=10, value=0, interval=17))
"""
Explanation: We can get even fancy and use the Play widget from ipywidgets to animate the plot.
End of explanation
"""
def prop_wins(sample_size):
'''Returns proportion of times switching wins after sample_size games.'''
return sum(winner() == 'switch' for _ in range(sample_size)) / sample_size
interact(prop_wins, sample_size=(10, 100));
"""
Explanation: Now we have an interactive, animated bar plot showing the distribution of wins over time for both Monty Hall strategies. This is a convincing argument that switching is better than staying. In fact, the bar plot above suggests that switching is about twice as likely to win as staying!
Simulating Sets of Games
Is switching actually twice as likely to win? We can again use simulation to answer this question by simulating sets of 50 games at a time. recording the proportion of times switch wins.
End of explanation
"""
def generate_proportions(sample_size, repetitions):
'''
Returns an array of length reptitions. Each element in the list is the
proportion of times switching won in sample_size games.
'''
return np.array([prop_wins(sample_size) for _ in range(repetitions)])
interact(generate_proportions, sample_size=(10, 100), repetitions=(10, 100));
"""
Explanation: We can then define a function to play sets of games and generate a list of win proportions for each set:
End of explanation
"""
# Play the game 10 times, recording the proportion of times switching wins.
# Repeat 100 times to record 100 proportions
proportions = generate_proportions(sample_size=10, repetitions=100)
def props_up_to(num_sets):
return proportions[:num_sets]
nbi.hist(props_up_to, num_sets=Play(min=0, max=100, value=0, interval=50))
"""
Explanation: Interacting with generate_proportions shows the relationship between its arguments sample_size and repetitions more quickly than reading the function itself!
Visualizing Proportions
We can then use nbi.hist to show these proportions being computed over runs.
Again, we pre-compute the simulations and interact with a function that takes a slice of the simulations to make the interaction faster.
End of explanation
"""
options = {
'title': 'Distribution of win proportion over 100 sets of 10 games when switching',
'xlabel': 'Proportions',
'ylabel': 'Percent per area',
'xlim': (0.3, 1),
'ylim': (0, 3),
'bins': 7,
}
nbi.hist(props_up_to, options=options, num_sets=Play(min=0, max=100, value=0, interval=50))
"""
Explanation: As with last time, it's illustrative to specify the limits of the axes:
End of explanation
"""
varying_sample_size = [generate_proportions(sample_size, repetitions=100)
for sample_size in range(10, 101)]
def props_for_sample_size(sample_size):
return varying_sample_size[sample_size - 10]
changed_options = {
'title': 'Distribution of win proportions as sample size increases',
'ylim': (0, 6),
'bins': 20,
}
nbi.hist(props_for_sample_size,
options={**options, **changed_options},
sample_size=Play(min=10, max=100, value=10, interval=50))
"""
Explanation: We can see that the distribution of wins is centered at roughly 0.66 but the distribution almost spans the entire x-axis. Will increasing the sample size make our distribution more narrow? Will increasing repetitions do the trick? Or both? We can find out through simulation and interaction.
We'll start with increasing the sample size:
End of explanation
"""
varying_reps = [generate_proportions(sample_size=10, repetitions=reps) for reps in range(10, 101)]
def props_for_reps(reps):
return varying_reps[reps - 10]
changed_options = {
'title': 'Distribution of win proportions as repetitions increase',
'ylim': (0, 5),
}
nbi.hist(props_for_reps,
options={**options, **changed_options},
reps=Play(min=10, max=100, value=10, interval=50))
"""
Explanation: So increasing the sample size makes the distribution narrower. We can now see more clearly that the distribution is centered at 0.66.
We can repeat the process for the number of repetitions:
End of explanation
"""
|
bicepjai/Puzzles
|
adventofcode/2017/.ipynb_checkpoints/day1_9-checkpoint.ipynb
|
bsd-3-clause
|
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
import numpy as np
import pandas as pd
import gc
import random
import smart_open
import h5py
import csv
import tensorflow as tf
import gensim
import string
import datetime as dt
from tqdm import tqdm_notebook as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
random_state_number = 967898
"""
Explanation: Setup
End of explanation
"""
! cat day1_input.txt
input_data = None
with open("day1_input.txt") as f:
input_data = f.read().strip().split()
input_data = [w.strip(",") for w in input_data ]
"""
Explanation: Code
Day 1: Inverse Captcha
The captcha requires you to review a sequence of digits (your puzzle input) and
find the sum of all digits that match the next digit in the list. The list is circular,
so the digit after the last digit is the first digit in the list.
End of explanation
"""
directions = {
("N","R") : ("E",0,1),
("N","L") : ("W",0,-1),
("W","R") : ("N",1,1),
("W","L") : ("S",1,-1),
("E","R") : ("S",1,-1),
("E","L") : ("N",1,1),
("S","R") : ("W",0,-1),
("S","L") : ("E",0,1)
}
def get_distance(data):
d,pos = "N",[0,0]
for code in data:
d1,v = code[0], int(code[1:])
d,i,m = directions[(d, code[0])]
pos[i] += m*v
#print(code,d,v,pos)
return sum([abs(n) for n in pos])
data = ["R2", "R2", "R2"]
get_distance(data)
data = ["R5", "L5", "R5", "R3"]
get_distance(data)
get_distance(input_data)
"""
Explanation: We will form the direction map since they are finite.
End of explanation
"""
input_data = None
with open("day2_input.txt") as f:
input_data = f.read().strip().split()
def get_codes(data, keypad, keypad_max_size, start_index=(1,1), verbose=False):
r,c = start_index
digit = ""
for codes in data:
if verbose: print(" ",codes)
for code in codes:
if verbose: print(" before",r,c,keypad[r][c])
if code == 'R' and c+1 < keypad_max_size and keypad[r][c+1] is not None:
c += 1
elif code == 'L' and c-1 >= 0 and keypad[r][c-1] is not None:
c -= 1
elif code == 'U' and r-1 >= 0 and keypad[r-1][c] is not None:
r -= 1
elif code == 'D' and r+1 < keypad_max_size and keypad[r+1][c] is not None:
r += 1
if verbose: print(" after",code,r,c,keypad[r][c])
digit += str(keypad[r][c])
return digit
sample = ["ULL",
"RRDDD",
"LURDL",
"UUUUD"]
keypad = [[1,2,3],[4,5,6],[7,8,9]]
get_codes(sample, keypad, keypad_max_size=3)
keypad = [[1,2,3],[4,5,6],[7,8,9]]
get_codes(input_data, keypad, keypad_max_size=3)
"""
Explanation: Day 2: Bathroom Security
part1
You arrive at Easter Bunny Headquarters under cover of darkness. However, you left in such a rush that you forgot to use the bathroom! Fancy office buildings like this one usually have keypad locks on their bathrooms, so you search the front desk for the code.
"In order to improve security," the document you find says, "bathroom codes will no longer be written down. Instead, please memorize and follow the procedure below to access the bathrooms."
The document goes on to explain that each button to be pressed can be found by starting on the previous button and moving to adjacent buttons on the keypad: U moves up, D moves down, L moves left, and R moves right. Each line of instructions corresponds to one button, starting at the previous button (or, for the first line, the "5" button); press whatever button you're on at the end of each line. If a move doesn't lead to a button, ignore it.
You can't hold it much longer, so you decide to figure out the code as you walk to the bathroom. You picture a keypad like this:
1 2 3
4 5 6
7 8 9
Suppose your instructions are:
ULL
RRDDD
LURDL
UUUUD
You start at "5" and move up (to "2"), left (to "1"), and left (you can't, and stay on "1"), so the first button is 1.
Starting from the previous button ("1"), you move right twice (to "3") and then down three times (stopping at "9" after two moves and ignoring the third), ending up with 9.
Continuing from "9", you move left, up, right, down, and left, ending with 8.
Finally, you move up four times (stopping at "2"), then down once, ending with 5.
So, in this example, the bathroom code is 1985.
Your puzzle input is the instructions from the document you found at the front desk. What is the bathroom code?
End of explanation
"""
input_data = None
with open("day21_input.txt") as f:
input_data = f.read().strip().split()
keypad = [[None, None, 1, None, None],
[None, 2, 3, 4, None],
[ 5, 6, 7, 8, None],
[None, 'A', 'B', 'C', None],
[None, None, 'D', None, None]]
sample = ["ULL",
"RRDDD",
"LURDL",
"UUUUD"]
get_codes(sample, keypad, keypad_max_size=5, start_index=(2,0), verbose=False)
get_codes(input_data, keypad, keypad_max_size=5, start_index=(2,0), verbose=False)
"""
Explanation: part2
You finally arrive at the bathroom (it's a several minute walk from the lobby so visitors can behold the many fancy conference rooms and water coolers on this floor) and go to punch in the code. Much to your bladder's dismay, the keypad is not at all like you imagined it. Instead, you are confronted with the result of hundreds of man-hours of bathroom-keypad-design meetings:
1
2 3 4
5 6 7 8 9
A B C
D
You still start at "5" and stop when you're at an edge, but given the same instructions as above, the outcome is very different:
You start at "5" and don't move at all (up and left are both edges), ending at 5.
Continuing from "5", you move right twice and down three times (through "6", "7", "B", "D", "D"), ending at D.
Then, from "D", you move five more times (through "D", "B", "C", "C", "B"), ending at B.
Finally, after five more moves, you end at 3.
So, given the actual keypad layout, the code would be 5DB3.
Using the same instructions in your puzzle input, what is the correct bathroom code?
Although it hasn't changed, you can still get your puzzle input.
End of explanation
"""
input_data = None
with open("day3_input.txt") as f:
input_data = f.read().strip().split("\n")
input_data = [list(map(int, l.strip().split())) for l in input_data]
result = [ (sides[0]+sides[1] > sides[2]) and (sides[2]+sides[1] > sides[0]) and (sides[0]+sides[2] > sides[1]) for sides in input_data]
sum(result)
"""
Explanation: Day3 squares With Three Sides
part1
Now that you can think clearly, you move deeper into the labyrinth of hallways and office furniture that makes up this part of Easter Bunny HQ. This must be a graphic design department; the walls are covered in specifications for triangles.
Or are they?
The design document gives the side lengths of each triangle it describes, but... 5 10 25? Some of these aren't triangles. You can't help but mark the impossible ones.
In a valid triangle, the sum of any two sides must be larger than the remaining side. For example, the "triangle" given above is impossible, because 5 + 10 is not larger than 25.
In your puzzle input, how many of the listed triangles are possible?
End of explanation
"""
input_data = None
with open("day31_input.txt") as f:
input_data = f.read().strip().split("\n")
input_data = [list(map(int, l.strip().split())) for l in input_data]
input_data[:5]
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
single_list = [input_data[r][c] for c in [0,1,2] for r in range(len(input_data))]
result = [ (sides[0]+sides[1] > sides[2]) and (sides[2]+sides[1] > sides[0]) and (sides[0]+sides[2] > sides[1]) for sides in chunks(single_list, 3)]
sum(result)
"""
Explanation: part2
Now that you've helpfully marked up their design documents, it occurs to you that triangles are specified in groups of three vertically. Each set of three numbers in a column specifies a triangle. Rows are unrelated.
For example, given the following specification, numbers with the same hundreds digit would be part of the same triangle:
101 301 501
102 302 502
103 303 503
201 401 601
202 402 602
203 403 603
In your puzzle input, and instead reading by columns, how many of the listed triangles are possible?
End of explanation
"""
input_data = None
with open("day4_input.txt") as f:
input_data = f.read().strip().split("\n")
len(input_data), input_data[:5]
answer = 0
for code in input_data:
m = re.match(r'(.+)-(\d+)\[([a-z]*)\]', code)
code, sector, checksum = m.groups()
code = code.replace("-","")
counts = collections.Counter(code).most_common()
counts.sort(key=lambda k: (-k[1], k[0]))
if ''.join([ch for ch,_ in counts[:5]]) == checksum:
answer += int(sector)
answer
"""
Explanation: Day4
part1: Security Through Obscurity
Finally, you come across an information kiosk with a list of rooms. Of course, the list is encrypted and full of decoy data, but the instructions to decode the list are barely hidden nearby. Better remove the decoy data first.
Each room consists of an encrypted name (lowercase letters separated by dashes) followed by a dash, a sector ID, and a checksum in square brackets.
A room is real (not a decoy) if the checksum is the five most common letters in the encrypted name, in order, with ties broken by alphabetization. For example:
aaaaa-bbb-z-y-x-123[abxyz] is a real room because the most common letters are a (5), b (3), and then a tie between x, y, and z, which are listed alphabetically.
a-b-c-d-e-f-g-h-987[abcde] is a real room because although the letters are all tied (1 of each), the first five are listed alphabetically.
not-a-real-room-404[oarel] is a real room.
totally-real-room-200[decoy] is not.
Of the real rooms from the list above, the sum of their sector IDs is 1514.
What is the sum of the sector IDs of the real rooms?
End of explanation
"""
for code in input_data:
m = re.match(r'(.+)-(\d+)\[([a-z]*)\]', code)
code, sector, checksum = m.groups()
sector = int(sector)
code = code.replace("-","")
counts = collections.Counter(code).most_common()
counts.sort(key=lambda k: (-k[1], k[0]))
string_maps = string.ascii_lowercase
cipher_table = str.maketrans(string_maps, string_maps[sector%26:] + string_maps[:sector%26])
if ''.join([ch for ch,_ in counts[:5]]) == checksum:
if "north" in code.translate(cipher_table):
print(code.translate(cipher_table))
print("sector",sector)
"""
Explanation: part2
With all the decoy data out of the way, it's time to decrypt this list and get moving.
The room names are encrypted by a state-of-the-art shift cipher, which is nearly unbreakable without the right software. However, the information kiosk designers at Easter Bunny HQ were not expecting to deal with a master cryptographer like yourself.
To decrypt a room name, rotate each letter forward through the alphabet a number of times equal to the room's sector ID. A becomes B, B becomes C, Z becomes A, and so on. Dashes become spaces.
For example, the real name for qzmt-zixmtkozy-ivhz-343 is very encrypted name.
What is the sector ID of the room where North Pole objects are stored?
End of explanation
"""
|
iurilarosa/thesis
|
codici/Archiviati/prove TF/.ipynb_checkpoints/Prove TF-checkpoint.ipynb
|
gpl-3.0
|
#basic python
x = 35
y = x + 5
print(y)
#basic TF
#x = tf.random_uniform([1, 2], -1.0, 1.0)
x = tf.constant(35, name = 'x')
y = tf.Variable(x+5, name = 'y')
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
print(sess.run(y))
#per scrivere il grafo
#writer = tf.summary.FileWriter("output", sess.graph)
print(sess.run(y))
#writer.close
a = tf.add(1, 2,)
b = tf.multiply(a, 3)
c = tf.add(4, 5,)
d = tf.multiply(c, 6,)
e = tf.multiply(4, 5,)
f = tf.div(c, 6,)
g = tf.add(b, d)
h = tf.multiply(g, f)
primo = tf.constant(3, name = 'primo')
secondo = tf.constant(5, name = 'secondo')
somma1 = primo + secondo
somma2 = tf.add(primo, secondo)
sess = tf.Session()
#writer = tf.summary.FileWriter("output", sess.graph)
print(sess.run(h))
%time print(sess.run(somma1))
%time print(sess.run(somma2))
#writer.close
# Creates a graph.
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
"""
Explanation: http://localhost:8888/notebooks/Documenti/TESI/thesis/codici/Prove%20TF.ipynb#Prova-cose-base
http://localhost:8888/notebooks/Documenti/TESI/thesis/codici/Prove%20TF.ipynb#Prova-arrays
http://localhost:8888/notebooks/Documenti/TESI/thesis/codici/Prove%20TF.ipynb#Riproduco-cose-fatte-con-numpy
http://localhost:8888/notebooks/Documenti/TESI/thesis/codici/Prove%20TF.ipynb#Prove-assegnazione-e-indexing
http://localhost:8888/notebooks/Documenti/TESI/thesis/codici/Prove%20TF.ipynb#Prova-convolution
Prova cose base
End of explanation
"""
primo = tf.constant([[10,20,30], [100,200,300]], name = 'primo')
righe = tf.constant([1,2,3], name = 'secondo1')
colonne = tf.constant([[1],[2]], name = 'secondo2')
somma1 = primo + righe
somma2 = tf.add(primo, colonne)
sessione = tf.Session()
#writer = tf.summary.FileWriter("output", sess.graph)
print(sessione.run(somma1))
print(sessione.run(somma2))
print("dimensioni dei tre tensori")
print(primo.shape,
righe.shape,
colonne.shape)
print(primo)
# First, load the image
filename = "MarshOrchid.jpg"
img = tf.constant(image.imread(filename))
# Print out its shape
sessione = tf.Session()
numpimg = sessione.run(img)
pyplot.imshow(numpimg)
pyplot.show()
print(numpimg.size)
# immagine è resa come un array (non è chiaro se di numpy o di python)
alterazione = tf.constant([5,5,0], name='blur')
tensoreImg = tf.Variable(img+alterazione, name='x')
#print(tensoreImg)
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
sess.run(tensoreImg)
img = tensoreImg.eval(session = sess)
img = img.astype(float)
#print(img)
pyplot.imshow(img)
#pyplot.show()
"""
Explanation: Prova arrays
End of explanation
"""
# non ho ben capito cosa è Variable
#unitensor = tf.Variable(tf.ones((10,10)))
unitensor = tf.Variable(tf.ones((10,10)))
unitensor2 = tf.ones((10,10))
unitensor3 = tf.constant(1, shape=(10,10))
tritensor = tf.constant(3, shape=(10,10))
tritensor2 = tf.Variable(unitensor3*3)
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
#print(sessione.run(unitensor))
#print(sessione.run(unitensor2))
print(sessione.run(unitensor3))
print(sessione.run(tritensor))
print(sessione.run(tritensor2))
"""
Explanation: Riproduco cose fatte con numpy
Inizializzazioni matrici costanti
End of explanation
"""
rangetensor = tf.range(0, limit = 9, delta = 1)
rangeMatrTensor = tf.reshape(rangetensor, (3,3))
#transposeTensor = tf.transpose(rangetensor, perm=[1])
reshapeTensor = tf.reshape(rangetensor,(9,1))
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
print(sessione.run(rangetensor))
print(sessione.run(rangeMatrTensor))
#print(sessione.run(transposeTensor))
print(sessione.run(reshapeTensor))
print(sessione.run(tf.ones(10, dtype = tf.int32)))
"""
Explanation: Inizializzazioni ranges e reshaping
End of explanation
"""
#tf.add #addizioni
#tf.subtract #sottrazioni
#tf.multiply #moltiplizazioni
#tf.scalar_mul(scalar, tensor) #aggiunge uno scalare a tutto il tensore
#tf.div #divisioni WARNING FORSE tf.divide È DIVERSO!
#tf.truediv #divisioni restituendo sempre float
unitensor = tf.ones((3,3))
duitensor = tf.constant(2.0, shape=(3,3))
sommatensor1 = tf.add(unitensor,duitensor)
sommatensor2 = tf.Variable(unitensor+duitensor)
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
print(sessione.run(sommatensor1))
print(sessione.run(sommatensor2))
rangetensor = tf.range(0.0, limit = 9, delta = 1)
rangetensor = tf.reshape(rangetensor, (3,3))
prodottotensor1 = tf.multiply(rangetensor,duitensor)
# con variabile, non è esattamente la stessa cosa
prodottotensor2 = tf.Variable(rangetensor*duitensor)
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
print(sessione.run(prodottotensor1))
print(sessione.run(prodottotensor2))
# le operazioni + e * lavorano elementwise come numpy, ma * può lavorare come prodotto scalare
"""
Explanation: Operazioni matriciali elementwise (somma, prodotto)
End of explanation
"""
#faccio prodotto vettoriale tra due vettori per ottenere matrice 2d, e poi faccio hack per ottenere matrice 3d
prodotto1 = tf.Variable(rangetensor*reshapeTensor)
#prodotto1 = tf.reshape(prodotto1, (81,1))
prodotto1bis = tf.multiply(rangetensor,reshapeTensor)
prodotto2 = tf.Variable(reshapeTensor*rangetensor)
prodotto2bis = tf.multiply(reshapeTensor, rangetensor)
#prodotto3d = tf.multiply(rangetensor,prodotto1) #che output dà questo comando?
#prodotto3d = tf.multiply(prodotto1, reshapeTensor) #è commutativo e dà stesso risultato sia che vettore sia
#verticale che orizzontale!
#prodotto3d = tf.multiply(rangetensor, prodotto1)
#prodotto3d = tf.reshape(prodotto3d, (9,9,9))
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
def outer3d(vettore, matrice):
shape = tf.shape(matrice)
matrice = tf.reshape(matrice, (tf.size(matrice),1))
prodotto3d = tf.multiply(vettore, matrice)
return tf.reshape(prodotto3d, (shape[0],shape[1],tf.size(vettore)))
prodottoFunzione = outer3d(rangetensor,prodotto1)
#print(sessione.run(prodotto3d))
print(sessione.run(prodottoFunzione))
#prodotti matriciali
unitensor = tf.ones((3,3))
rangetensor = tf.range(0.0, limit = 9)
rangetensor = tf.reshape(rangetensor, (3,3))
tensorMatrProd = tf.matmul(rangetensor, rangetensor)
tensorProd = tf.tensordot(rangetensor,rangetensor, 1)
# sono equivalenti, se si fa il tensordot con asse 2 esce
# uno scalare che non capisco
sessione = tf.Session()
print(sessione.run(tensorMatrProd))
print(sessione.run(tensorProd))
print(sessione.run(tf.transpose(tensorProd)))
#tf.transpose #trasposta
#tf.reshape(rangetensor,(10,1)) #vettore trasposto
#tf.matrix_transpose #traposto di ultime due dimensioni un tensore di rango >=2
#tf.matrix_inverse #matrice inversa di quadrata, invertibile
tensoruni = tf.ones(10.0)
tensorzeri = tf.zeros(10.0)
tensorscala = tf.range(10.0)
colonne = tf.constant(10)
#prodotto scalare
tensorScalar = tf.tensordot(tensoruni,tensorscala, 1)
#trasposto
tensorTrasposto = tf.reshape(tensorscala,(10,1))
#outer: NB tensorFlow broadcasta automaticamente
tensorOuter = tensoruni*tensorTrasposto
sessione = tf.Session()
print(sessione.run(tensoruni), sessione.run(tensorzeri))
print(sessione.run(tf.zeros([colonne])))
print(sessione.run(tensorScalar))
print(sessione.run(tensorscala))
print(sessione.run(tensorTrasposto))
print(sessione.run(tensorOuter))
"""
Explanation: Altre operazioni matematiche:
https://www.tensorflow.org/api_guides/python/math_ops#Arithmetic_Operators
https://www.tensorflow.org/api_guides/python/math_ops#Basic_Math_Functions
Operazioni matriciali: prodotto esterno, kronecker, righe x colonne, inversa, trasposta
End of explanation
"""
array = tf.Variable(tf.range(10,20))
indici = tf.constant([1,3,5])
updati = tf.constant([100,90,4050])
slicearray = tf.gather(array,indici)
updarray = tf.scatter_update(array,indici,updati)
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
print(sessione.run(array[0:4]))
print(sessione.run(slicearray))
print(sessione.run(array))
print(sessione.run(updarray))
# selezione nonzero elements
#vettore = tf.constant([1,0,0,2,0], dtype=tf.int64)
ravettore = tf.random_uniform((1,100000000),0,2,dtype = tf.int32)
ravettore = ravettore[0]
where = tf.not_equal(ravettore, 0)
indici = tf.where(where)
nonzeri = tf.gather(ravettore,indici)
#OPPURE
#sparso = tf.SparseTensor(indici, nonzeri, dense_shape=vettore.get_shape())
sessione = tf.Session(config=tf.ConfigProto(log_device_placement=True))
%time sessione.run(nonzeri)
#print(shape,sessione.run(shape))
#print(sessione.run(ravettore))
#%time print(sessione.run(indici))
#%time print(sessione.run(nonzeri))
#%time sessione.run(ravettore)
#%time sessione.run(indici)
#print(sessione.run(sparso))
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
nomi = get_available_gpus()
print(nomi)
# prova map
sessione = tf.Session()
moltiplicatore = tf.range(10)
addizionatore = tf.range(100,110)
def mappalo(stepIesimo):
uni = tf.range(10)
moltiplicato = tf.multiply(moltiplicatore[stepIesimo],uni)
addizionato = moltiplicato + addizionatore
return addizionato
image = tf.map_fn(mappalo, tf.range(0, 10), dtype=tf.int32)
print(sessione.run(image))
#prova map con prodotto scalare
import numpy
from scipy import sparse
from matplotlib import pyplot
import tensorflow as tf
from tensorflow.python.client import timeline
import time
nRows = 10
def mapfunc(ithStep):
matrix1 = tf.zeros([1000,1000], dtype = tf.float32)
matrix2 = tf.ones([1000,1000], dtype = tf.float32)
matrix1 = tf.add(matrix1,ithStep)
prodotto = tf.matmul(matrix1,matrix2)
return prodotto
sessione = tf.Session(config=tf.ConfigProto(log_device_placement=True))
imageMapped = tf.map_fn(mapfunc, tf.range(0,nRows), dtype = tf.float32)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
start = time.time()
image = sessione.run(imageMapped, options=run_options, run_metadata=run_metadata)
stop = time.time()
print(stop-start)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timelineDB.json', 'w') as f:
f.write(ctf)
#prova prodotto scalare
import numpy
import tensorflow as tf
from tensorflow.python.client import timeline
matrix1 = tf.zeros([5000,5000], dtype = tf.int32)
matrix2 = tf.ones([5000,5000], dtype = tf.int32)
matrix1 = tf.add(matrix1,2)
product = tf.matmul(matrix1,matrix2)
session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
image = session.run(product, options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timelineDB.json', 'w') as f:
f.write(ctf)
#prova histogram fixed
import numpy
import tensorflow as tf
from tensorflow.python.client import timeline
matrix1 = tf.random_uniform((5000,5000),0,2,dtype = tf.int32)
matrix2 = tf.ones([5000,5000], dtype = tf.int32)
matrix1 = tf.add(matrix1,2)
product = tf.matmul(matrix1,matrix2)
session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
image = session.run(product, options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timelineDB.json', 'w') as f:
f.write(ctf)
"""
Explanation: Tutte le operazioni matriciali:
https://www.tensorflow.org/api_guides/python/math_ops#Matrix_Math_Functions
https://www.tensorflow.org/api_docs/python/tf/tensordot (prodotto per contrazione di un indice)
Prove assegnazione e indexing
End of explanation
"""
import numpy
import tensorflow as tf
sessione = tf.Session()
array = tf.range(0.0,100.0)
cosaImportante1 = tf.range(0.0,2.0)
cosaImportante2 = tf.constant([2.0])
tutto = tf.concat((cosaImportante1, cosaImportante2, array),0)
def funsione(i):
j = i+3
funsionalo = tutto[2] + tutto[j]
return funsionalo
mappa = tf.map_fn(funsione, tf.range(0,tf.size(tutto)-3), dtype=tf.float32)
print(sessione.run(tf.size(mappa)))
"""
Explanation: Prova map con passaggio di più variabili
End of explanation
"""
import numpy
import tensorflow as tf
sessione = tf.Session()
array = tf.range(0.0,8160000.0)
array = tf.reshape(array, (85,96000))
kernel = tf.constant([[-1.0,0.0,0.0,1.0]])
array = tf.reshape(array,(1,85,96000,1))
kernel = tf.reshape(kernel, (1,4,1,1))
somma = tf.nn.conv2d(input=array,filter=kernel,strides=[1,1,1,1],padding ='SAME')
somma = tf.reshape(somma, (85,96000))
#somma = tf.reshape(somma, (85,95997))
#print(sessione.run(kernel))
#print(sessione.run(array))
print(sessione.run(somma))
array = tf.range(0.0,8160000.0)
array = tf.reshape(array, (85,96000))
larghezza = tf.constant(3)
colonne = tf.size(array[0])
#houghInt = houghDiff[:,semiLarghezza*2:nColumns]-houghDiff[:,0:nColumns - semiLarghezza*2]
#houghInt = tf.concat([houghDiff[:,0:semiLarghezza*2],houghInt],1)
arrayInt = array[:,larghezza:colonne]-array[:,0:colonne-larghezza]
print(sessione.run(arrayInt))
print(sessione.run(tf.shape(arrayInt)))
enhancement = 10
kernel = tf.concat(([-1.0],tf.zeros(enhancement,dtype=tf.float32),[1.0]),0)
print(sessione.run(kernel))
"""
Explanation: Prova convolution
End of explanation
"""
import numpy
import tensorflow as tf
sess = tf.Session()
array = numpy.array([0.1, 0.2, 0.4, 0.8, 0.9, 1.1]).astype(numpy.float32)
print(array.tobytes())
print(numpy.fromstring(array.tobytes()))
tensoraw = tf.constant(array.tobytes())
print(sess.run(tensoraw))
print(sess.run(tf.decode_raw(tensoraw, tf.float32)))
rawArray = sess.run(tensoraw)
decodedArray = sess.run(tf.decode_raw(tensoraw, tf.float32))
print(numpy.fromstring(rawArray))
print(numpy.fromstring(decodedArray))
"""
Explanation: ## Prove raw to float e viceversa
End of explanation
"""
#formalizzato in maniera generale come fare prodotto vettoriale tra due vettori e prodotto esterno vettore colonna-matrice
matrice = tf.reshape(tf.range(0,50), (10,5))
vettore = tf.range(0,4)
vettore1 = tf.range(1,6)
vettore2 = tf.range(100,106)
shape = tf.shape(matrice)
matrice = tf.reshape(matrice, (1, tf.size(matrice)))
vettore = tf.reshape(vettore, (tf.size(vettore),1))
prodotto3d = tf.multiply(vettore, matrice)
prodotto3d = tf.reshape(prodotto3d, (tf.size(vettore), shape[1],shape[0]))
vettore2 = tf.reshape(vettore2, (tf.size(vettore2),1))
prodottoX = tf.multiply(vettore1,vettore2)
sessione = tf.Session()
print(sessione.run(prodottoX))
#print(sessione.run(prodotto3d))
#print(sessione.run(shape))
# alcune altre prove su somme
vettore = tf.range(0,4)
vettore2 = tf.range(10,14)
sommaVettori = tf.add(vettore,vettore2)
vettoreSomma = vettore +2
vettoreSomma2 = tf.add(vettore,2)
vettoreSomma3 = tf.Variable(vettore+2)
init = tf.global_variables_initializer()
sessione = tf.Session()
sessione.run(init)
print(vettore, vettoreSomma, vettoreSomma2, vettoreSomma3)
print(sessione.run((vettore, vettoreSomma,vettoreSomma2,vettoreSomma3)))
print(sessione.run(sommaVettori))
# prova stack
vettore = tf.range(0,4)
vettore2 = tf.range(10,14)
vettore = tf.reshape(vettore,(1,4))
vettore2 = tf.reshape(vettore2,(1,4))
staccato = tf.stack([vettore[0],vettore2[0]])
sessione = tf.Session()
print(sessione.run(staccato))
# prova somma elementi con stesse coordinate in matrice sparsa
indices = tf.constant([[1, 1], [1, 2], [1, 2], [1, 6]])
values = tf.constant([1, 2, 3, 4])
# Linearize the indices. If the dimensions of original array are
# [N_{k}, N_{k-1}, ... N_0], then simply matrix multiply the indices
# by [..., N_1 * N_0, N_0, 1]^T. For example, if the sparse tensor
# has dimensions [10, 6, 4, 5], then multiply by [120, 20, 5, 1]^T
# In your case, the dimensions are [10, 10], so multiply by [10, 1]^T
linearized = tf.matmul(indices, [[10], [1]])
# Get the unique indices, and their positions in the array
y, idx = tf.unique(tf.squeeze(linearized))
# Use the positions of the unique values as the segment ids to
# get the unique values
values = tf.segment_sum(values, idx)
# Go back to N-D indices
y = tf.expand_dims(y, 1)
righe = tf.cast(y/10,tf.int32)
colonne = y%10
indices = tf.concat([righe, colonne],1)
tf.InteractiveSession()
print(indices.eval())
print(values.eval())
print(linearized.eval())
print(sessione.run((righe,colonne)))
# qui provo fully vectorial
sessione = tf.Session()
matrix = tf.random_uniform((10,10), 0,2, dtype= tf.int32)
coordinates = tf.where(tf.not_equal(matrix,0))
x = coordinates[:,0]
x = tf.cast(x, tf.float32)
times = coordinates[:,1]
times = tf.cast(times, tf.float32)
xSize = tf.shape(x)[0]
weights = tf.random_uniform((1,xSize),0,1,dtype = tf.float32)
nStepsY=5.0
y = tf.range(1.0,nStepsY+1)
#y = tf.reshape(y,(tf.size(y),1))
nRows = 5
nColumns = 80
image = tf.zeros((nRows, nColumns))
y = tf.reshape(y, (tf.size(y),1))
print(y[0])
yTimed = tf.multiply(y,times)
appoggio = tf.ones([nRows])
appoggio = tf.reshape(appoggio, (tf.size(appoggio),1))
#print(sessione.run(tf.shape(appoggio)))
#print(sessione.run(tf.shape(x)))
x3d = tf.multiply(appoggio,x)
weights3d = tf.multiply(appoggio,weights)
positions = tf.round(x3d-yTimed)
positions = tf.add(positions,50)
positions = tf.cast(positions, dtype=tf.int64)
riappoggio = tf.ones([xSize], dtype = tf.int64)
y = tf.cast(y, tf.int64)
y3d = tf.multiply(y, riappoggio)
y3d = tf.reshape(y3d, (1,tf.size(y3d)))
weights3d = tf.reshape(weights3d, (1,tf.size(weights3d)))
positions = tf.reshape(positions, (1,tf.size(positions)))
righe = y3d[0]
colonne = positions[0]
pesi = weights3d[0]
#VALUTARE DI FARE PARALLEL STACK
coordinate = tf.stack([righe,colonne],1)
shape = [6,80]
matrice = tf.SparseTensor(coordinate, pesi, shape)
matrice = tf.sparse_reorder(matrice)
coordinate = tf.cast(matrice.indices, tf.int32)
linearized = tf.matmul(coordinate, [[100], [1]])
coo, idx = tf.unique(tf.squeeze(linearized))
values = tf.segment_sum(matrice.values, idx)
# Go back to N-D indices
coo = tf.expand_dims(coo, 1)
indices = tf.concat([tf.cast(coo/100,tf.int32), coo%100],1)
#print(sessione.run((indices)))
#matrice = tf.SparseTensor(indices, pesi, shape)
immagine = tf.sparse_to_dense(indices, shape, values)
#print(sessione.run((tf.shape(coordinate), tf.shape(pesi), tf.shape(shape))))
#print(sessione.run((tf.shape(x3d), tf.shape(y3d),tf.shape(positions))))
#print(sessione.run(indices))
plottala = sessione.run(immagine)
%matplotlib inline
a = pyplot.imshow(plottala, aspect = 10)
#pyplot.show()
# qui provo mappando
sessione = tf.Session()
matrix = tf.random_uniform((10,10), 0,2, dtype= tf.int32)
coordinates = tf.where(tf.not_equal(matrix,0))
x = coordinates[:,0]
x = tf.cast(x, tf.float32)
times = coordinates[:,1]
times = tf.cast(times, tf.float32)
xSize = tf.shape(x)[0]
weights = tf.random_uniform((1,xSize),0,1,dtype = tf.float32)
nStepsY=5.0
y = tf.range(1.0,nStepsY+1)
#y = tf.reshape(y,(tf.size(y),1))
nRows = 5
nColumns = 80
weights = tf.reshape(weights, (1,tf.size(weights)))
pesi = weights[0]
def funmap(stepIesimo):
yTimed = tf.multiply(y[stepIesimo],times)
positions = tf.round(x-yTimed)
positions = tf.add(positions,50)
positions = tf.cast(positions, dtype=tf.int64)
positions = tf.reshape(positions, (1,tf.size(positions)))
riga= tf.ones([tf.size(x)])
riga = tf.reshape(riga, (1,tf.size(riga)))
righe = riga[0]
colonne = positions[0]
coordinate = tf.stack([tf.cast(righe,dtype=tf.int64),tf.cast(colonne,dtype=tf.int64)],1)
shape = [1,80]
matrice = tf.SparseTensor(coordinate, pesi, shape)
#matrice = tf.sparse_reorder(matrice)
coordinate = tf.cast(matrice.indices, tf.int32)
coo, idx = tf.unique(coordinate[:,1])
values = tf.segment_sum(matrice.values, idx)
immagine = tf.sparse_to_dense(coo, [nColumns], values)
#immagine = tf.cast(immagine, dtype=tf.float32)
return immagine
hough = tf.map_fn(funmap, tf.range(0,5),dtype=tf.float32)
plottala = sessione.run(hough)
print(numpy.size(plottala))
#imm = [plottala,plottala]
%matplotlib inline
a = pyplot.imshow(plottala, aspect = 10)
pyplot.show()
# qui provo con tf map o scan (con bincount)
sessione = tf.Session()
matrix = tf.random_uniform((10,10), 0,2, dtype= tf.int32)
coordinates = tf.where(tf.not_equal(matrix,0))
x = coordinates[:,0]
x = tf.cast(x, tf.float32)
times = coordinates[:,1]
times = tf.cast(times, tf.float32)
xSize = tf.shape(x)[0]
weights = tf.random_uniform((1,xSize),0,1,dtype = tf.float32)
nStepsY=5.0
y = tf.range(1.0,nStepsY+1)
#y = tf.reshape(y,(tf.size(y),1))
nRows = 5
nColumns = 80
y = tf.reshape(y, (tf.size(y),1))
def mapIt(ithStep):
image = tf.zeros(nColumns)
yTimed = y[ithStep]*times
positions = tf.round(x-yTimed+50, dtype=tf.int32)
values = tf.bincount(positions,weights)
values = values[numpy.nonzero(values)]
positions = numpy.unique(positions)
image[positions] = values
return image
%time imageMapped = list(map(mapIt, range(nStepsY)))
imageMapped = numpy.array(imageMapped)
%matplotlib inline
a = pyplot.imshow(imageMapped, aspect = 10)
import scipy.io
import numpy
percorsoFile = "/home/protoss/matlabbo.mat"
#percorsoFile = "matlabbo/miaimgSenzacumsum.mat"
scalareMatlabbo = scipy.io.loadmat(percorsoFile)['scalarevero']
scalareMatlabbo
ncolumn = tf.constant(10)
matrice = tf.zeros((0,ncolumn))
print(sessione.run(matrice))
import tensorflow
matricia = tf.random_uniform((9,),0,1,dtype = tf.float32)
"""
Explanation: Documentazione utile
Lezioni su TF
Parte su placeholders forse utile in hough
Doc ufficiale
Cose utili in doc uff
Guide ufficiali
Tensori costanti (generalizzazione di numpy zeros/ones))
Molto interessante, generalizzazione di matrice trasposta
Tensori sparsi
Fourier
Broadcasting IMPORTANTE
cose utili o importanti
https://stackoverflow.com/questions/39219414/in-tensorflow-how-can-i-get-nonzero-values-and-their-indices-from-a-tensor-with
https://www.google.it/search?client=ubuntu&channel=fs&q=tf+scatter+update&ie=utf-8&oe=utf-8&gfe_rd=cr&ei=JkYvWduzO-nv8AfympmoAQ
https://stackoverflow.com/questions/34685947/adjust-single-value-within-tensor-tensorflow
https://www.tensorflow.org/versions/r0.11/api_docs/python/state_ops/sparse_variable_updates
https://www.tensorflow.org/api_docs/python/tf/scatter_add
https://www.tensorflow.org/api_docs/python/tf/scatter_update
https://stackoverflow.com/questions/34935464/update-a-subset-of-weights-in-tensorflow
https://stackoverflow.com/questions/39859516/how-to-update-a-subset-of-2d-tensor-in-tensorflow
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/how_google_does_ml/solutions/automl-tabular-classification.ipynb
|
apache-2.0
|
# Setup your dependencies
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
"""
Explanation: Vertex AI Model Builder SDK: AutoML Tabular Training and Prediction
Overview
In this notebook, you learn how to use the Vertex AI Python client library to train and deploy a tabular classification model for online prediction.
Learning Objective
In this notebook, you learn how to:
Create a Vertex AI model training job.
Train an AutoML tabular model.
Deploy the model resource to a serving endpoint resource.
Make a prediction by sending data.
Undeploy the model resource.
Introduction
In this notebook, you will use Vertex AI Python client library to train and make predictions on an AutoML model based on a tabular dataset. Alternatively, you can train and make predictions on models by using the gcloud command-line tool or by using the online Cloud Console.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
Make sure to enable the Vertex AI API and Compute Engine API.
Installation
End of explanation
"""
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
"""
Explanation: Install the latest version of the Vertex AI client library.
Run the following command in your virtual environment to install the Vertex SDK for Python:
End of explanation
"""
# Upgrade the specified package to the newest available version
! pip install {USER_FLAG} --upgrade google-cloud-storage
"""
Explanation: Install the Cloud Storage library:
End of explanation
"""
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
"""
Explanation: Set your project ID
If you don't know your project ID, you may be able to get your project ID using gcloud.
End of explanation
"""
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
"""
Explanation: Otherwise, set your project ID here.
End of explanation
"""
# Import necessary libraries
from datetime import datetime
# Use a timestamp to ensure unique resources
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
End of explanation
"""
BUCKET_NAME = "gs://qwiklabs-gcp-04-c846b6079446" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://qwiklabs-gcp-04-c846b6079446":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
This notebook demonstrates how to use Model Builder SDK to create an AutoML model based on a tabular dataset. You will need to provide a Cloud Storage bucket where the dataset will be stored.
Set the name of your Cloud Storage bucket below. It must be unique across all of your
Cloud Storage buckets.
You may also change the REGION variable, which is used for operations
throughout the rest of this notebook. Make sure to choose a region where Vertex AI services are
available. You may
not use a Multi-Regional Storage bucket for training with Vertex AI.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_NAME
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_NAME
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
IMPORT_FILE = "petfinder-tabular-classification_toy.csv"
! gsutil cp gs://cloud-training/mlongcp/v3.0_MLonGC/toy_data/{IMPORT_FILE} {BUCKET_NAME}/data/
gcs_source = f"{BUCKET_NAME}/data/{IMPORT_FILE}"
"""
Explanation: Copy dataset into your Cloud Storage bucket
End of explanation
"""
# Import necessary libraries
import os
from google.cloud import aiplatform
aiplatform.init(project=PROJECT_ID, location=REGION)
"""
Explanation: Import Vertex SDK for Python
Import the Vertex SDK into your Python environment and initialize it.
End of explanation
"""
ds = dataset = aiplatform.TabularDataset.create(
display_name="petfinder-tabular-dataset",
gcs_source=gcs_source,
)
ds.resource_name
"""
Explanation: Tutorial
Now you are ready to create your AutoML Tabular model.
Create a Managed Tabular Dataset from a CSV
This section will create a dataset from a CSV file stored on your GCS bucket.
End of explanation
"""
# TODO 1
# Constructs a AutoML Tabular Training Job
job = aiplatform.AutoMLTabularTrainingJob(
display_name="train-petfinder-automl-1",
optimization_prediction_type="classification",
column_transformations=[
{"categorical": {"column_name": "Type"}},
{"numeric": {"column_name": "Age"}},
{"categorical": {"column_name": "Breed1"}},
{"categorical": {"column_name": "Color1"}},
{"categorical": {"column_name": "Color2"}},
{"categorical": {"column_name": "MaturitySize"}},
{"categorical": {"column_name": "FurLength"}},
{"categorical": {"column_name": "Vaccinated"}},
{"categorical": {"column_name": "Sterilized"}},
{"categorical": {"column_name": "Health"}},
{"numeric": {"column_name": "Fee"}},
{"numeric": {"column_name": "PhotoAmt"}},
],
)
# TODO 2a
# Create and train the model object
# This will take around two hour and half to run
model = job.run(
dataset=ds,
target_column="Adopted",
# TODO 2b
# Define training, validation and test fraction for training
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
model_display_name="adopted-prediction-model",
disable_early_stopping=False,
)
"""
Explanation: Launch a Training Job to Create a Model
Once we have defined your training script, we will create a model. The run function creates a training pipeline that trains and creates a Model object. After the training pipeline completes, the run function returns the Model object.
NOTE: It takes nearly 2 hours 15 minutes to complete the training. Please wait till the training get completed.
End of explanation
"""
# TODO 3
# Deploy the model resource to the serving endpoint resource
endpoint = model.deploy(
machine_type="n1-standard-4",
)
"""
Explanation: Deploy your model
Before you use your model to make predictions, you need to deploy it to an Endpoint. You can do this by calling the deploy function on the Model resource. This function does two things:
Creates an Endpoint resource to which the Model resource will be deployed.
Deploys the Model resource to the Endpoint resource.
Deploy your model.
NOTE: Wait until the model FINISHES deployment before proceeding to prediction.
End of explanation
"""
# TODO 4
# Make a prediction using the sample values
prediction = endpoint.predict(
[
{
"Type": "Cat",
"Age": "3",
"Breed1": "Tabby",
"Gender": "Male",
"Color1": "Black",
"Color2": "White",
"MaturitySize": "Small",
"FurLength": "Short",
"Vaccinated": "No",
"Sterilized": "No",
"Health": "Healthy",
"Fee": "100",
"PhotoAmt": "2",
}
]
)
print(prediction)
"""
Explanation: Predict on the endpoint
This sample instance is taken from an observation in which Adopted = Yes
Note that the values are all strings. Since the original data was in CSV format, everything is treated as a string. The transformations you defined when creating your AutoMLTabularTrainingJob inform Vertex AI to transform the inputs to their defined types.
End of explanation
"""
# TODO 5
# Undeploy the model resource
endpoint.undeploy(deployed_model_id=prediction.deployed_model_id)
"""
Explanation: Undeploy the model
To undeploy your Model resource from the serving Endpoint resource, use the endpoint's undeploy method with the following parameter:
deployed_model_id: The model deployment identifier returned by the prediction service when the Model resource is deployed. You can retrieve the deployed_model_id using the prediction object's deployed_model_id property.
End of explanation
"""
delete_training_job = True
delete_model = True
delete_endpoint = True
# Warning: Setting this to true will delete everything in your bucket
delete_bucket = False
# Delete the training job
job.delete()
# Delete the model
model.delete()
# Delete the endpoint
endpoint.delete()
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil -m rm -r $BUCKET_NAME
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Training Job
Model
Endpoint
Cloud Storage Bucket
Note: You must delete any Model resources deployed to the Endpoint resource before deleting the Endpoint resource.
End of explanation
"""
|
planet-os/notebooks
|
api-examples/CFSv2_winter_forecast.ipynb
|
mit
|
%matplotlib notebook
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import calendar
import datetime
import matplotlib.dates as mdates
from API_client.python.datahub import datahub_main
from API_client.python.lib.dataset import dataset
from API_client.python.lib.variables import variables
import matplotlib
import warnings
warnings.filterwarnings("ignore")
matplotlib.rcParams['font.family'] = 'Avenir Lt Std'
print (matplotlib.__version__)
"""
Explanation: Using a CFSv2 forecast
CFSv2 is a seasonal forecast system, used for analysing past climate and also making seasonal, up to 9-month, forecasts. Here we give a brief example on how to use Planet OS API to merge 9-month forecasts started at different initial times, into a single ensemble forecast.
Ensemble forecasting is a traditional technique in medium range (up to 10 days) weather forecasts, seasonal forecasts and climate modelling. By changing initial conditions or model parameters, a range of forecasts is created, which differ from each other slightly, due to the chaotic nature of fluid dynamics (which weather modelling is a subset of). For weather forecasting, the ensemble is usually created by small changes in initial conditions, but for seasonal forecast, it is much easier to just take real initial conditions every 6-hours. Here we are going to show, first how to merge the different dates into a single plot with the help of python pandas library, and in addition we show that even 6-hour changes in initial conditions can lead to large variability in long range forecasts.
In this example we look into 2 m temperture for upcoming winter. We are also adding climatological averages from CFS Reanalysis Climatologies to the plot for better overview.
If you have more interest in Planet OS API, please refer to our official documentation.
Please also note that the API_client python routine, used in this notebook, is still experimental and will change in the future, so take it just as a guidance using the API, and not as an official tool.
Note that we store 10 days of history on this dataset. So, this notebook will be updated with the latest data as well, which means that descriptions here might be little outdated as data is renewed.
End of explanation
"""
apikey = open('APIKEY').readlines()[0].strip()
dh = datahub_main(apikey)
ds = dataset('ncep_cfsv2', dh, debug=False)
ds2 = dataset('ncep_cfsr_climatologies', dh, debug=False)
ds.variables()
ds.vars = variables(ds.variables(), {'reftimes':ds.reftimes,'timesteps':ds.timesteps},ds)
ds2.vars = variables(ds2.variables(), {},ds2)
"""
Explanation: The API needs a file APIKEY with your API key in the work folder. We initialize a datahub and dataset objects.
End of explanation
"""
start_date = datetime.datetime.now() - datetime.timedelta(days=9)
end_date = datetime.datetime.now() + datetime.timedelta(days=5)
reftime_start = start_date.strftime('%Y-%m-%d') + 'T00:00:00'
reftime_end = end_date.strftime('%Y-%m-%d') + 'T18:00:00'
locations = ['Tallinn','Innsbruck','Paris','Berlin','Lisbon']
for locat in locations:
ds2.vars.TMAX_2maboveground.get_values_analysis(count=1000, location=locat)
ds.vars.Temperature_height_above_ground.get_values(count=1000, location=locat, reftime=reftime_start,
reftime_end=reftime_end)
"""
Explanation: At the moment we are going to look into Tallinn, Innsbruck, Paris, Berlin and Lisbon temperature. In order to the automatic location selection to work, add your custom location to the API_client.python.lib.predef_locations file and after add your location into the list of locations here.
End of explanation
"""
def clean_table(loc):
ddd_clim = ds2.vars.TMAX_2maboveground.values[loc][['time','TMAX_2maboveground']]
ddd_temp = ds.vars.Temperature_height_above_ground.values[loc][['reftime','time','Temperature_height_above_ground']]
dd_temp=ddd_temp.set_index('time')
return ddd_clim,dd_temp
"""
Explanation: Here we clean the table just a bit and create time based index.
End of explanation
"""
def resample_1month_totals(loc):
reft_unique = ds.vars.Temperature_height_above_ground.values[loc]['reftime'].unique()
nf_tmp = []
for reft in reft_unique:
abc = dd_temp[dd_temp.reftime==reft].resample('M').mean()
abc['Temperature_height_above_ground'+'_'+reft.astype(str)] = \
abc['Temperature_height_above_ground'] - 272.15
del abc['Temperature_height_above_ground']
nf_tmp.append(abc)
nf2_tmp = pd.concat(nf_tmp,axis=1)
return nf2_tmp
"""
Explanation: Next, we resample the data to 1-month totals.
End of explanation
"""
def reindex_clim_convert_temp():
i_new = 0
ddd_clim_new_indxes = ddd_clim.copy()
new_indexes = []
converted_temp = []
for i,clim_values in enumerate(ddd_clim['TMAX_2maboveground']):
if i == 0:
i_new = 12 - nf2_tmp.index[0].month + 2
else:
i_new = i_new + 1
if i_new == 13:
i_new = 1
new_indexes.append(i_new)
converted_temp.append(ddd_clim_new_indxes['TMAX_2maboveground'][i] -273.15)
ddd_clim_new_indxes['new_index'] = new_indexes
ddd_clim_new_indxes['tmp_c'] = converted_temp
return ddd_clim_new_indxes
"""
Explanation: Give new indexes to climatology dataframe to have data ordered the same way as cfsv2 forecast.
End of explanation
"""
def make_image(loc):
fig=plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
plt.ylim(np.min(np.min(nf2_tmp))-3,np.max(np.max(nf2_tmp))+3)
plt.boxplot(nf2_tmp,medianprops=dict(color='#1B9AA0'))
dates2 = [n.strftime('%b %Y') for n in nf2_tmp.index]
if len(np.arange(1, len(dates2)+1))== len(ddd_clim_indexed.sort_values(by=['new_index'])['tmp_c'][:-3]):
clim_len = -3
else:
clim_len = -2
plt.plot(np.arange(1, len(dates2)+1),ddd_clim_indexed.sort_values(by=['new_index'])['tmp_c'][:clim_len],"*",color='#EC5840',linestyle='-')
plt.xticks(np.arange(1, len(dates2)+1), dates2, rotation='vertical')
plt.grid(color='#C3C8CE',alpha=1)
plt.ylabel('Monthly Temperature [C]')
ttl = plt.title('Monthly Temperature in ' + loc,fontsize=15,fontweight='bold')
ttl.set_position([.5, 1.05])
fig.autofmt_xdate()
#plt.savefig('Monthly_mean_temp_cfsv2_forecast_{0}.png'.format(loc),dpi=300,bbox_inches='tight')
plt.show()
"""
Explanation: Finally, we are visualizing the monthly mean temperature for each different forecast, in a single plot.
End of explanation
"""
for locat in locations:
ddd_clim,dd_temp = clean_table(locat)
nf2_tmp = resample_1month_totals(locat)
ddd_clim_indexed = reindex_clim_convert_temp()
make_image(locat)
"""
Explanation: Below we can find five location graphs. It's forecasted that November seems to be quite similar to climatological mean (red line) while December might be much warmer than usual. January again is pretty similar to the climatology. After January all the months are forecasted to be colder as average, specially April and May.
Forecast for Innsbruck is intresting. It's forecasted that upcoming seven months will be colder as usual. For example, January mean temperature might be two degrees lower.
In the same time, Paris might face bit colder November, while December and January could be pretty average. After that, all the forecasted values are rather colder as climatology.
Berlin might get pretty average November. However, after that December could get much colder. January and February could be quite average. After that, temperatures might be colder as usual.
Temperatures in Lisbon are forecasted to be bit colder in winter and spring. However, May is forecasted to quite average, while June forecast is way warmer as climatological mean.
End of explanation
"""
|
mayank-johri/LearnSeleniumUsingPython
|
Section 1 - Core Python/Chapter 02 - Basics/2.3. Maths Operators.ipynb
|
gpl-3.0
|
# Sample Code
# Say Cheese
x = 34 - 23
y = "!!! Say"
z = 3.45
print(id(x), id(y), id(z))
print(x, y, z)
x = x + 1
y = y + " Cheese !!!"
print("x = " + str(x))
print(y, id(y))
print("Is x > z", x > z ,"and y is", y, "and x =", x)
print("x - z =", x - z)
print("~^" * 30)
print(30 * "~_")
print(id(x), id(y), id(z))
print((30 * "~_") * 2)
print(30 * "~_" * 2)
print(30 * "~_" * "#")
print(30 * "~_" + "#")
t = x > z
print("x = " + str(x) + " and z = " + str(z) + " : " + str(t))
print("x =", x, "and z =", z, ":", t)
print(x, z)
print("x % z =", x % z )
print("x >= z", x <= z)
mass_kg = int(input("What is your mass in kilograms?" ))
mass_stone = mass_kg * 1.1 / 7
print("You weigh", mass_stone, "stone.")
"""
Explanation: Maths Operators
Python supports most common maths operations. The table below lists maths operators supported.
| Syntax | Math | Operation Name |
|-------------- |------------------------------------------- |------------------------------------------------------------------ |
| a+b | a+b | addition |
| a-b | a-b | subtraction |
| ab | a * b | multiplication |
| a/b | a\div b | division (see note below) |
| a//b | a//b | floor division (e.g. 5//2=2) |
| a%b | a%b | modulo |
| -a | -a | negation |
| < | a < b | less- than |
| > | a > b | greater- than |
| <= | a <= b | less- than- equal |
| >= | a >= b | greater- than- equal |
| abs(a)| <code>| a |</code> | absolute value |
| ab | a*b | exponent |
| math.sqrt(a) | sqrt a | square root |
Note:
In order to use math.sqrt() function, you must explicitly load the math module by adding import math at the top of your file, where all the other modules import is defined.
End of explanation
"""
print (round(3.14159265, 2))
"""
Explanation: Order of Operations
Python uses the standard order of operations as taught in Algebra and Geometry classes. That, mathematical expressions are evaluated in the following order (memorized by many as PEMDAS or BODMAS {Brackets, Orders or pOwers, Division, Multiplication, Addition, Subtraction}) .
(Note that operations which share a table row are performed from left to right. That is, a division to the left of a multiplication, with no parentheses between them, is performed before the multiplication simply because it is to the left.)
| Name | Syntax | Description | PEMDAS Mnemonic |
|---------------------------- |---------- |---------------------------------------------------------------------------------------------------------------------------------------- |----------------- |
| Parentheses | ( ... ) | Before operating on anything else, Python must evaluate all parentheticals starting at the innermost level. (This includes functions.) | Please |
| Exponents | ** | As an exponent is simply short multiplication or division, it should be evaluated before them. | Excuse |
| Multiplication and Division | * / // % | Again, multiplication is rapid addition and must, therefore, happen first. | My Dear |
| Addition and Subtraction | + - | They should happen independent to one another and finally operated among eachother | Aunt Sally |
Formatting output
round()
End of explanation
"""
|
seniosh/StatisticalMethods
|
examples/StraightLine/ModelEvaluation.ipynb
|
gpl-2.0
|
%load_ext autoreload
%autoreload 2
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (6.0, 6.0)
plt.rcParams['savefig.dpi'] = 100
from straightline_utils import *
"""
Explanation: Testing the Straight Line Model
End of explanation
"""
(x,y,sigmay) = generate_data()
plot_yerr(x, y, sigmay)
"""
Explanation: The Data
Let's generate a simple Cepheids-like dataset: observations of $y$ with reported uncertainties $\sigma_y$, at given $x$ values.
Yours will look different to mine, as your random seed will be different. That's good - we can do some (completely unrealistic) comparisons over datasets.
End of explanation
"""
def straight_line_log_likelihood(theta, x, y, sigmay):
'''
Returns the log-likelihood of drawing data values *y* at
known values *x* given Gaussian measurement noise with standard
deviation with known *sigmay*, where the "true" y values are
*y_t = m * x + b*
x: list of x coordinates
y: list of y coordinates
sigmay: list of y uncertainties
m: scalar slope
b: scalar line intercept
Returns: scalar log likelihood
'''
m,b = theta
return (np.sum(np.log(1./(np.sqrt(2.*np.pi) * sigmay))) +
np.sum(-0.5 * (y - (m*x + b))**2 / sigmay**2))
def straight_line_log_prior(theta, theta_limits):
m, b = theta
mlimits, blimits = theta_limits
# Uniform in m:
if (m < mlimits[0]) | (m > mlimits[1]):
log_m_prior = -np.inf
else:
log_m_prior = np.log(1.0/(mlimits[1] - mlimits[0]))
# Uniform in b:
if (b < blimits[0]) | (b > blimits[1]):
log_b_prior = -np.inf
else:
log_b_prior = np.log(1.0/(blimits[1] - blimits[0]))
return log_m_prior + log_b_prior
def straight_line_log_posterior(theta, x, y, sigmay, theta_limits):
return (straight_line_log_likelihood(theta, x, y, sigmay) +
straight_line_log_prior(theta, theta_limits))
# Evaluate log P(m,b | x,y,sigmay) on a grid.
# Define uniform prior limits, enforcing positivity in both parameters:
mlimits = [0.0, 2.0]
blimits = [0.0, 200.0]
theta_limits = (mlimits, blimits)
# Set up grid:
mgrid = np.linspace(mlimits[0], mlimits[1], 101)
bgrid = np.linspace(blimits[0], blimits[1], 101)
log_posterior = np.zeros((len(mgrid),len(bgrid)))
# Evaluate log posterior PDF:
for im,m in enumerate(mgrid):
for ib,b in enumerate(bgrid):
theta = (m,b)
log_posterior[im,ib] = straight_line_log_posterior(theta, x, y, sigmay, theta_limits)
# Convert to probability density and plot, taking care with very small values:
posterior = np.exp(log_posterior - log_posterior.max())
plt.imshow(posterior, extent=[blimits[0],blimits[1],mlimits[0],mlimits[1]],cmap='Blues',
interpolation='none', origin='lower', aspect=(blimits[1]-blimits[0])/(mlimits[1]-mlimits[0]),
vmin=0, vmax=1)
plt.contour(bgrid, mgrid, posterior, pdf_contour_levels(posterior), colors='k')
i = np.argmax(posterior)
i,j = np.unravel_index(i, posterior.shape)
print('Grid maximum posterior values (m,b) =', mgrid[i], bgrid[j])
plt.title('Straight line: posterior PDF for parameters');
plot_mb_setup(*theta_limits);
"""
Explanation: Characterizing the posterior PDF
Like we did in Session 2, we can evaluate the posterior PDF for the straight line model slope $m$ and intercept $b$ on a grid.
Let's also take some samples, with a simple Metropolis routine.
End of explanation
"""
def metropolis(log_posterior, theta, theta_limits, stepsize, nsteps=10000):
'''
log_posterior: function of theta
theta_limits: uniform prior ranges
stepsize: scalar or vector proposal distribution width
nsteps: desired number of samples
'''
log_prob = log_posterior(theta, x, y, sigmay, theta_limits)
# Store Markov chain as an array of samples:
chain = np.empty((nsteps, len(theta)))
log_probs = np.empty(nsteps)
# Count our accepted proposals:
naccept = 0
for i in range(nsteps):
theta_new = theta + stepsize * np.random.randn(len(theta))
log_prob_new = log_posterior(theta_new, x, y, sigmay, theta_limits)
if np.log(np.random.rand()) < (log_prob_new - log_prob):
# accept, and move to the proposed position:
theta = theta_new
log_prob = log_prob_new
naccept += 1
else:
# reject, and store the same sample as before:
pass
chain[i] = theta
log_probs[i] = log_prob
acceptance_rate = naccept/float(nsteps)
return chain,log_probs,acceptance_rate
# Initialize m, b at the center of prior:
m = 0.5*(mlimits[0]+mlimits[1])
b = 0.5*(blimits[0]+blimits[1])
theta = np.array([m,b])
# Step sizes, 2% or 5% of the prior
mstep = 0.02*(mlimits[1]-mlimits[0])
bstep = 0.05*(blimits[1]-blimits[0])
stepsize = np.array([mstep,bstep])
# How many steps?
nsteps = 10000
print('Running Metropolis Sampler for', nsteps, 'steps...')
chain, log_probs, acceptance_rate = metropolis(
straight_line_log_posterior, theta, theta_limits, stepsize, nsteps=nsteps
)
print('Acceptance fraction:', acceptance_rate)
# Pull m and b arrays out of the Markov chain and plot them:
mm = [m for m,b in chain]
bb = [b for m,b in chain]
# Traces, for convergence inspection:
plt.figure(figsize=(8,5))
plt.subplot(2,1,1)
plt.plot(mm, 'k-')
plt.ylim(mlimits)
plt.ylabel('m')
plt.subplot(2,1,2)
plt.plot(bb, 'k-')
plt.ylabel('Intercept b')
plt.ylim(blimits)
"""
Explanation: And now to draw some samples:
End of explanation
"""
!pip install --upgrade --no-deps corner
import corner
corner.corner(chain[5000:], labels=['m','b'], range=[mlimits,blimits],quantiles=[0.16,0.5,0.84],
show_titles=True, title_args={"fontsize": 12},
plot_datapoints=True, fill_contours=True, levels=[0.68, 0.95],
color='b', bins=80, smooth=1.0);
"""
Explanation: Looks reasonable: a short burn-in period, followed by reasonably well-mixed samples.
Now let's look at the samples in parameter space, overlaid on our gridded posterior.
End of explanation
"""
# Generate a straight line model for each parameter combination, and plot:
X = np.linspace(xlimits[0],xlimits[1],50)
for i in (np.random.rand(100)*len(chain)).astype(int):
m,b = chain[i]
plt.plot(X, b+X*m, 'b-', alpha=0.1)
# Overlay the data, for comparison:
plot_yerr(x, y, sigmay);
"""
Explanation: It looks like we made a nice, precise measurement!
We made a lot of assumptions though - so we now need to test them. Our next step is to check the model for accuracy.
Model Checking
How do we know if our model is any good? One property that "good" models have is accuracy.
Accurate models generate data that is like the observed data. What does this mean? First we have to define what similarity is, in this context.
Visual impression is one very important way, that's usually best done first.
Test statistics that capture relevant features of the data are another. It's good to explore the posterior predictive distribution for these statistics, as a way of propagating our uncertainty in the model parameters into our predictions.
Visual Model Checking
Plot realizations of the model in data space.
End of explanation
"""
# Test statistics: functions of the data, not the parameters.
# 1) Weighted mean y:
# def test_statistic(x,y,sigmay):
# return np.sum(y/sigmay**2.0)/np.sum(1.0/sigmay**2.0)
# 2) Variance of y:
# def test_statistic(x,y,sigmay):
# return np.var(y)
# 3) Pearson r correlation coefficient:
import scipy.stats
def test_statistic(x,y,dummy):
'''
Pearson r correlation coefficient:
r12 = \sum [(xi - xbar)*(yi - ybar)] / [\sum (xi - xbar)^2 * \sum (yi - ybar)^2]^1/2
'''
r12 = np.sum((x - np.mean(x))*(y - np.mean(y))) / \
np.sqrt(np.sum((x - np.mean(x))**2) * np.sum((y - np.mean(y))**2))
return r12
# Approximate the posterior predictive distribution for T,
# by drawing a replica dataset for each sample (m,b) and computing its T:
T = np.zeros(len(chain))
for k,(m,b) in enumerate(chain):
yrep = b + m*x + np.random.randn(len(x)) * sigmay
T[k] = test_statistic(x,yrep,sigmay)
# Compare with the test statistic of the data, on a plot:
Td = test_statistic(x, y, sigmay)
plt.hist(T, 100, histtype='step', color='blue', lw=2, range=(0.0,np.percentile(T,99.0)))
plt.axvline(Td, color='black', linestyle='--', lw=2)
plt.xlabel('Test statistic')
plt.ylabel('Posterior predictive distribution')
# What is Pr(T>T(d)|d)?
greater = (T > Td)
P = 100*len(T[greater])/(1.0*len(T))
print("Pr(T>T(d)|d) = ",P,"%")
"""
Explanation: Posterior Predictive Model Checking
If our model is the true one, then replica data generated by it should look like the one dataset we have. This means that summaries of both the real dataset, $T(d)$, and the replica datasets, $T(d^{\rm rep})$, should be drawn from the same distribution.
If the real dataset was not generated with our model, then its summary may be an outlier from the distribution of summaries of replica datasets.
We can account for our uncertainty in the parameters $\theta$ by marginalizing them out, which can be easily done by just making the histogram of $T(d^{\rm rep}(\theta))$ from our posterior samples, after drawing one replica dataset $d^{\rm rep}$ from the model sampling distribution ${\rm Pr}(d^{\rm rep}\,|\,\theta)$ for each one.
Then, we can ask: what is the posterior probability for the summary $T$ to be greater than the observed summary $T(d)$? If this is very small or very large, we should be suspicious of our model - because it is not predicting the data very accurately.
${\rm Pr}(T(d^{\rm rep})>T(d)\,|\,d) = \int I(T(d^{\rm rep})>T(d))\,{\rm Pr}(d^{\rm rep}\,|\,\theta)\,{\rm Pr}(\theta\,|\,d)\;d\theta\,dd^{\rm rep}$
Here $I$ is the "indicator function" - 1 or 0 according to the condition.
End of explanation
"""
# Discrepancy: functions of the data AND parameters.
# 1) Reduced chisq for the model:
def discrepancy(x,y,sigmay,b,m):
return np.sum((y - m*x - b)**2.0/sigmay**2.0)/(len(y)-2)
# Approximate the posterior predictive distribution for T,
# by drawing a replica dataset for each sample (m,b) and computing its T,
# AND ALSO its Td (which now depends on the parameters, too):
T = np.zeros(len(chain))
Td = np.zeros(len(chain))
for k,(m,b) in enumerate(chain):
yrep = b + m*x + np.random.randn(len(x)) * sigmay
T[k] = discrepancy(x,yrep,sigmay,b,m)
Td[k] = discrepancy(x,y,sigmay,b,m)
# Compare T with Td, on a scatter plot - how often is T>Td?
plt.scatter(Td, T, color='blue',alpha=0.1)
plt.plot([0.0, 100.0], [0.0, 100.], color='k', linestyle='--', linewidth=2)
plt.xlabel('Observed discrepancy $T(d,\\theta)$')
plt.ylabel('Replicated discrepancy $T(d^{\\rm rep},\\theta)$')
plt.ylim([0.0,np.percentile(Td,99.0)])
plt.xlim([0.0,np.percentile(Td,99.0)])
# Histogram of differences:
diff = T-Td
plt.hist(diff, 100, histtype='step', color='blue', lw=2, range=(np.percentile(diff,1.0),np.percentile(diff,99.0)))
plt.axvline(0.0, color='black', linestyle='--', lw=2)
plt.xlabel('Difference $T(d^{\\rm rep},\\theta) - T(d,\\theta)$')
plt.ylabel('Posterior predictive distribution')
# What is Pr(T>T(d)|d)?
greater = (T > Td)
Pline = 100*len(T[greater])/(1.0*len(T))
print("Pr(T>T(d)|d) = ",Pline,"%")
"""
Explanation: If our model is true (and we're just uncertain about its parameters, given the data), we can compute the probability of getting a $T$ less than that observed.
Note that we did not have to look up any particular standard distribution - we can simply compute the posterior predictive distribution given our generative model.
This test statistic lacks power: better choices might put more acute stress on the model to perform, by focusing on the places where the model predictions are suspect.
Test statistics $T(d,\theta)$ that are functions of both the data and the parameters are known as discrepancy measures.
Similar in spirit to the above, we can compute the posterior probability of getting $T(d^{\rm rep},\theta) > T(d,\theta)$:
${\rm Pr}(T(d^{\rm rep},\theta)>T(d,\theta)\,|\,d) = \int I(T(d^{\rm rep},\theta)>T(d,\theta))\,{\rm Pr}(d^{\rm rep}\,|\,\theta)\,{\rm Pr}(\theta\,|\,d)\;d\theta\,dd^{\rm rep}$
End of explanation
"""
def quadratic_log_likelihood(theta, x, y, sigmay):
'''
Returns the log-likelihood of drawing data values y at
known values x given Gaussian measurement noise with standard
deviation with known sigmay, where the "true" y values are
y_t = m*x + b + q**2
x: list of x coordinates
y: list of y coordinates
sigmay: list of y uncertainties
m: scalar slope
b: scalar line intercept
q: quadratic term coefficient
where theta = (m, b, q)
Returns: scalar log likelihood
'''
m, b, q = theta
return (np.sum(np.log(1./(np.sqrt(2.*np.pi) * sigmay))) +
np.sum(-0.5 * (y - (m*x + b + q*x**2))**2 / sigmay**2))
def quadratic_log_prior(theta, theta_limits):
m, b, q = theta
mlimits, blimits, qpars = theta_limits
# m and b:
log_mb_prior = straight_line_log_prior(np.array([m,b]), np.array([mlimits, blimits]))
# q:
log_q_prior = np.log(1./(np.sqrt(2.*np.pi) * qpars[1])) - \
0.5 * (q - qpars[0])**2 / qpars[1]**2
return log_mb_prior + log_q_prior
def quadratic_log_posterior(theta, x, y, sigmay, theta_limits):
return (quadratic_log_likelihood(theta, x, y, sigmay) +
quadratic_log_prior(theta, theta_limits))
# Define uniform prior limits, enforcing positivity in m and b:
mlimits = [0.0, 2.0]
blimits = [0.0, 200.0]
# Define Gaussian prior centered on zero for q:
qpars = [0.0,0.003]
# Initialize m, b at the center of prior:
m = 0.5*(mlimits[0]+mlimits[1])
b = 0.5*(blimits[0]+blimits[1])
q = qpars[0]
# Arrays to pass to the sampler:
qtheta = np.array([m,b,q])
qtheta_limits = (mlimits, blimits, qpars)
# Step sizes, small fractions of the prior width:
mstep = 0.01*(mlimits[1]-mlimits[0])
bstep = 0.04*(blimits[1]-blimits[0])
qstep = 0.02*qpars[1]
stepsize = np.array([mstep,bstep,qstep])
# How many steps?
nsteps = 10000
print('Running Metropolis Sampler for', nsteps, 'steps...')
qchain, log_probs, acceptance_rate = metropolis(
quadratic_log_posterior, qtheta, qtheta_limits, stepsize, nsteps=nsteps
)
print('Acceptance fraction:', acceptance_rate)
# Pull m, b and q arrays out of the Markov chain and plot them:
mm = [m for m,b,q in qchain]
bb = [b for m,b,q in qchain]
qq = [q for m,b,q in qchain]
# Traces, for convergence inspection:
plt.figure(figsize=(8,5))
plt.subplot(3,1,1)
plt.plot(mm, 'k-')
plt.ylim(mlimits)
plt.ylabel('Slope m')
plt.subplot(3,1,2)
plt.plot(bb, 'k-')
plt.ylim(blimits)
plt.ylabel('Intercept b')
plt.subplot(3,1,3)
plt.plot(qq, 'k-')
plt.ylim([qpars[0]-3*qpars[1],qpars[0]+3*qpars[1]])
plt.ylabel('Quadratic coefficient q')
corner.corner(qchain, labels=['m','b','q'], range=[mlimits,blimits,(qpars[0]-3*qpars[1],qpars[0]+3*qpars[1])],quantiles=[0.16,0.5,0.84],
show_titles=True, title_args={"fontsize": 12},
plot_datapoints=True, fill_contours=True, levels=[0.68, 0.95],
color='green', bins=80, smooth=1.0);
"""
Explanation: The conclusion drawn from the discrepancy is more interesting, in this case. All our $\theta = (m,b)$ samples are plausible, so replica datasets generated by them should also be plausible. The straight line defined by each $(m,b)$ should go through the real data points as readily (on average) as it does its replica dataset.
Do our posterior predictive $p-$values suggest we need to improve our model? What about the visual check?
Exercise:
In some sense, the reduced chi-squared is actually not such an interesting test statistic, because it's very similar to the log likelihood!
Still more powerful discrepancy measures might stress-test different aspects of the model. Talk to your neighbor about where and how this model might be failing, and see if you can design a better discrepancy measure than reduced chi-squared.
Model Expansion
Maybe I see some curvature in the data - or maybe I have a new astrophysical idea for how this data was generated. Let's try adding an extra parameter to the model, to make a quadratic function:
$y = m x + b + q x^2$
The coefficient $q$ is probably pretty small (we were originally expecting to only have to use a straight line model for these data!), so I guess we can assign a fairly narrow prior, centered on zero.
End of explanation
"""
# Posterior visual check, in data space:
X = np.linspace(xlimits[0],xlimits[1],100)
for i in (np.random.rand(100)*len(chain)).astype(int):
m,b,q = qchain[i]
plt.plot(X, b + X*m + q*X**2, 'g-', alpha=0.1)
plot_yerr(x, y, sigmay)
# Discrepancy: functions of the data AND parameters.
# 1) Reduced chisq for the model:
def discrepancy(x,y,sigmay,m,b,q):
return np.sum((y - m*x - b - q*x**2)**2.0/sigmay**2.0)/(len(y)-3)
# Approximate the posterior predictive distribution for T,
# by drawing a replica dataset for each sample (m,b) and computing its T,
# AND ALSO its Td:
T = np.zeros(len(qchain))
Td = np.zeros(len(qchain))
for k,(m,b,q) in enumerate(qchain):
yp = b + m*x + q*x**2 + sigmay*np.random.randn(len(x))
T[k] = discrepancy(x,yp,sigmay,m,b,q)
Td[k] = discrepancy(x,y,sigmay,m,b,q)
# Histogram of differences:
diff = T - Td
plt.hist(diff, 100, histtype='step', color='green', lw=2, range=(np.percentile(diff,1.0),np.percentile(diff,99.0)))
plt.axvline(0.0, color='black', linestyle='--', lw=2)
plt.xlabel('Difference $T(d^{\\rm rep},\\theta) - T(d,\\theta)$')
plt.ylabel('Posterior predictive distribution')
# What is Pr(T>T(d)|d)?
greater = (T > Td)
Pquad = 100*len(T[greater])/(1.0*len(T))
print("Pr(T>T(d)|d,quadratic) = ",Pquad,"%, cf. Pr(T>T(d)|d,straightline) = ",Pline,"%")
"""
Explanation: All parameters are again precisely constrained.
The gradient and intercept $m$ and $b$ are significantly different from before, though...
Checking the Quadratic Model
End of explanation
"""
# Draw a large number of prior samples and calculate the log likelihood for each one:
N = 50000
# Set the priors:
mlimits = [0.0, 2.0]
blimits = [0.0, 200.0]
qpars = [0.0,0.003]
# Sample from the prior:
mm = np.random.uniform(mlimits[0],mlimits[1], size=N)
bb = np.random.uniform(blimits[0],blimits[1], size=N)
qq = qpars[0] + qpars[1]*np.random.randn(N)
# We'll store the posterior samples as a "chain" again
schain = []
log_likelihood_straight_line = np.zeros(N)
log_likelihood_quadratic = np.zeros(N)
for i in range(N):
theta = np.array([mm[i], bb[i]])
log_likelihood_straight_line[i] = straight_line_log_likelihood(theta, x, y, sigmay)
qtheta = np.array([mm[i], bb[i], qq[i]])
log_likelihood_quadratic[i] = quadratic_log_likelihood(qtheta, x, y, sigmay)
schain.append((mm[i],bb[i],qq[i]))
"""
Explanation: How do the two models compare? Which one matches the data better?
Model Comparison with the Evidence
In the posterior predictive check above we found it useful to consider the probability distribution of hypothetical replica datasets.
As well as this, we also have access to the quantity ${\rm Pr}(d\,|\,H)$, the evidence for the model $H$. Let's take a look at its properties.
Calculating the Evidence
The FML is in general quite difficult to calculate, since it involves averaging the likelihood over the prior. MCMC gives us samples from the posterior - and these cannot, it turns out, be reprocessed so as to estimate the evidence stably.
A number of sampling algorithms have been developed that do calculate the evidence, during the process of sampling. These include:
Nested Sampling (including MultiNest and DNest)
Parallel Tempering, Thermodynamic Integration
...
If we draw samples from the prior, we can then estimate the evidence via the usual sum over samples,
${\rm Pr}(d\,|\,H) \approx \frac{1}{N_s} \sum_k\;{\rm Pr}(d\,|\,\theta_s,H)$
Simple Monte Carlo:
Consider the simplest possbile posterior inference, the mean of parameter $\theta$:
$\bar{\theta} = \int\,\theta\,{\rm Pr}(\theta\,|\,d,H)\,d\theta$
This is approximated by an average over posterior samples:
$\bar{\theta} \approx \frac{1}{N_s}\,\sum_s\,\theta_s$
Now replace the posterior PDF with Bayes' Theorem:
$\bar{\theta} = \frac{1}{Z} \int\,\theta\,{\rm Pr}(d\,|\,\theta,H)\,{\rm Pr}(\theta\,|\,H)\,d\theta$
and the posterior samples with prior samples:
$\bar{\theta} \approx \frac{1}{(Z N_s)}\,\sum_s\,\theta_s\,{\rm Pr}(d\,|\,\theta_s,H)$
This is a weighted mean - if we don't want $Z$, we just define weights $w_s = {\rm Pr}(d\,|\,\theta_s,H)$ compute the approximate posterior mean as the likelihood-weighted prior mean:
$\bar{\theta} \approx \frac{\sum_s\,w_s\,\theta_s}{\sum_s\,w_s}$
To compute Z, we multiply both sides by $Z$ and replace $\theta$ with 1:
$Z \approx \frac{1}{N_s} \sum_s\,\,w_s$
Simple Monte Carlo works well in certain low-dimensional situations, but in general it is very inefficient (at best).
Still, let's give it a try on our two models, and attempt to compute the evidence ratio
$R = \frac{{\rm Pr}(d\,|\,{\rm quadratic})}{{\rm Pr}(d\,|\,{\rm straight line})}$
End of explanation
"""
# Unnormalized likelihoods for plotting:
unnormalized_likelihood_straight_line = np.exp(log_likelihood_straight_line - log_likelihood_straight_line.max())
unnormalized_likelihood_quadratic = np.exp(log_likelihood_quadratic - log_likelihood_quadratic.max())
corner.corner(schain, labels=['m','b','q'], range=[mlimits,blimits,(qpars[0]-3*qpars[1],qpars[0]+3*qpars[1])],quantiles=[0.16,0.5,0.84],
weights=unnormalized_likelihood_straight_line,
show_titles=True, title_args={"fontsize": 12},
plot_datapoints=False, fill_contours=True, levels=[0.68, 0.95],
color='blue', bins=80, smooth=1.0);
"""
Explanation: Now that we have the log likelihood for each sample, let's check that we did actually sample the posterior well. Here are the corner plots (note that for plotting, the weights don't need to be correctly normalized - and also that we do not want to plot the points as well as the contours, since the points are prior samples not posterior ones!):
End of explanation
"""
corner.corner(schain, labels=['m','b','q'], range=[mlimits,blimits,(qpars[0]-3*qpars[1],qpars[0]+3*qpars[1])],quantiles=[0.16,0.5,0.84],
weights=unnormalized_likelihood_quadratic,
show_titles=True, title_args={"fontsize": 12},
plot_datapoints=False, fill_contours=True, levels=[0.68, 0.95],
color='green', bins=80, smooth=1.0);
"""
Explanation: Question:
Why does $q$ appear in this plot? Does it affect the evidence calculation?
End of explanation
"""
def logaverage(x):
mx = x.max()
return np.log(np.sum(np.exp(x - mx))) + mx - np.log(len(x))
log_evidence_straight_line = logaverage(log_likelihood_straight_line)
log_evidence_quadratic = logaverage(log_likelihood_quadratic)
print('log Evidence for Straight Line Model:', log_evidence_straight_line)
print('log Evidence for Quadratic Model:', log_evidence_quadratic)
print('Evidence ratio in favour of the Quadratic Model:', np.int(np.exp(log_evidence_quadratic - log_evidence_straight_line)),"to 1")
"""
Explanation: Now let's compute the evidence - we'll need a special function that stably calculates the average $x$ given an array of $\log x$...
End of explanation
"""
|
NICTA/revrand
|
demos/regression_demo.ipynb
|
apache-2.0
|
%matplotlib inline
import matplotlib.pyplot as pl
pl.style.use('ggplot')
import numpy as np
from scipy.stats import gamma
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, RBF
from revrand import StandardLinearModel, GeneralizedLinearModel, likelihoods, Parameter, Positive
from revrand.metrics import msll, smse
from revrand.utils.datasets import gen_gausprocess_se
from revrand import basis_functions as bs
from revrand.optimize import AdaDelta, Adam
"""
Explanation: Example of how to use revrand for regression
In this notebook we demonstrate revrand's standard linear model (SLM) and generalised linear model (GLM) fitting a random draw from a GP. We also compare the perfomance of these algorithms to a full GP.
End of explanation
"""
N = 150 # Number of training points
Ns = 250 # Number of test points
lenscale_true = 1.2
noise_true = 0.1
Xtrain, ytrain, Xtest, ftest = \
gen_gausprocess_se(N, Ns, lenscale=lenscale_true, noise=noise_true)
ytest = ftest + np.random.randn(Ns) * noise_true
"""
Explanation: Dataset settings and creation
End of explanation
"""
# Common settings
nbases = 20 # Number of unique random bases to use for approximating a kernel
lenscale = gamma(a=1, scale=1) # Initial value for the lengthscale
var = gamma(a=0.1, scale=2) # Initial value for target noise
reg = gamma(a=1, scale=1) # Initial weight prior
# GLM specific settings
maxiter = 10000
batch_size = 10
updater = Adam()
# Setup random basis functions
base = bs.RandomRBF(Xdim=1,
nbases=nbases,
lenscale=Parameter(lenscale, Positive()),
regularizer=Parameter(reg, Positive())
)
"""
Explanation: Algorithm Settings
End of explanation
"""
# SLM
slm = StandardLinearModel(base, var=Parameter(var, Positive()),)
slm.fit(Xtrain, ytrain)
# GLM
llhood = likelihoods.Gaussian(var=Parameter(var, Positive()))
glm = GeneralizedLinearModel(llhood,
base,
batch_size=batch_size,
maxiter=maxiter,
updater=updater
)
glm.fit(Xtrain, ytrain)
# GP
kern = WhiteKernel(noise_level=np.sqrt(var.mean())) + 1**2 * RBF(length_scale=lenscale.mean())
gp = GaussianProcessRegressor(kernel=kern)
gp.fit(Xtrain, ytrain)
"""
Explanation: Parameter learning
End of explanation
"""
# SLM
Ey_e, Vy_e = slm.predict_moments(Xtest)
Sy_e = np.sqrt(Vy_e)
# GLM
Ey_g, Vf_g = glm.predict_moments(Xtest)
Vy_g = Vf_g + glm.like_hypers_
Sy_g = np.sqrt(Vy_g)
# GP
Ey_gp, Sy_gp = gp.predict(Xtest, return_std=True)
Vy_gp = Sy_gp**2
"""
Explanation: Model Querying
End of explanation
"""
LL_s = msll(ytest, Ey_e, Vy_e, ytrain)
LL_gp = msll(ytest, Ey_gp, Vy_gp, ytrain)
LL_g = msll(ytest, Ey_g, Vy_g, ytrain)
smse_s = smse(ytest, Ey_e)
smse_gp = smse(ytest, Ey_gp)
smse_glm = smse(ytest, Ey_g)
print("SLM: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_s, smse_s, np.sqrt(slm.var_), slm.hypers_))
print("GLM: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_g, smse_glm, np.sqrt(glm.like_hypers_),
glm.basis_hypers_))
print("GP: msll = {}, smse = {}, noise: {}, hypers: {}"
.format(LL_gp, smse_gp, gp.kernel_.k1.noise_level,
gp.kernel_.k2.k2.length_scale))
"""
Explanation: Score the models
End of explanation
"""
Xpl_t = Xtrain.flatten()
Xpl_s = Xtest.flatten()
# Training/Truth
pl.figure(figsize=(15, 10))
pl.plot(Xpl_t, ytrain, 'k.', label='Training')
pl.plot(Xpl_s, ftest, 'k-', label='Truth')
# ELBO Regressor
pl.plot(Xpl_s, Ey_e, 'g-', label='Bayesian linear regression')
pl.fill_between(Xpl_s, Ey_e - 2 * Sy_e, Ey_e + 2 * Sy_e, facecolor='none',
edgecolor='g', linestyle='--', label=None)
# GP
pl.plot(Xpl_s, Ey_gp, 'b-', label='GP')
pl.fill_between(Xpl_s, Ey_gp - 2 * Sy_gp, Ey_gp + 2 * Sy_gp,
facecolor='none', edgecolor='b', linestyle='--',
label=None)
# GLM Regressor
pl.plot(Xpl_s, Ey_g, 'm-', label='GLM')
pl.fill_between(Xpl_s, Ey_g - 2 * Sy_g, Ey_g + 2 * Sy_g, facecolor='none',
edgecolor='m', linestyle='--', label=None)
pl.legend()
pl.grid(True)
pl.ylabel('y')
pl.xlabel('x')
pl.show()
"""
Explanation: Plot predictions
End of explanation
"""
|
google/jax
|
docs/notebooks/Common_Gotchas_in_JAX.ipynb
|
apache-2.0
|
import numpy as np
from jax import grad, jit
from jax import lax
from jax import random
import jax
import jax.numpy as jnp
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import rcParams
rcParams['image.interpolation'] = 'nearest'
rcParams['image.cmap'] = 'viridis'
rcParams['axes.grid'] = False
"""
Explanation: 🔪 JAX - The Sharp Bits 🔪
levskaya@ mattjj@
When walking about the countryside of Italy, the people will not hesitate to tell you that JAX has "una anima di pura programmazione funzionale".
JAX is a language for expressing and composing transformations of numerical programs. JAX is also able to compile numerical programs for CPU or accelerators (GPU/TPU).
JAX works great for many numerical and scientific programs, but only if they are written with certain constraints that we describe below.
End of explanation
"""
def impure_print_side_effect(x):
print("Executing function") # This is a side-effect
return x
# The side-effects appear during the first run
print ("First call: ", jit(impure_print_side_effect)(4.))
# Subsequent runs with parameters of same type and shape may not show the side-effect
# This is because JAX now invokes a cached compilation of the function
print ("Second call: ", jit(impure_print_side_effect)(5.))
# JAX re-runs the Python function when the type or shape of the argument changes
print ("Third call, different type: ", jit(impure_print_side_effect)(jnp.array([5.])))
g = 0.
def impure_uses_globals(x):
return x + g
# JAX captures the value of the global during the first run
print ("First call: ", jit(impure_uses_globals)(4.))
g = 10. # Update the global
# Subsequent runs may silently use the cached value of the globals
print ("Second call: ", jit(impure_uses_globals)(5.))
# JAX re-runs the Python function when the type or shape of the argument changes
# This will end up reading the latest value of the global
print ("Third call, different type: ", jit(impure_uses_globals)(jnp.array([4.])))
g = 0.
def impure_saves_global(x):
global g
g = x
return x
# JAX runs once the transformed function with special Traced values for arguments
print ("First call: ", jit(impure_saves_global)(4.))
print ("Saved global: ", g) # Saved global has an internal JAX value
"""
Explanation: 🔪 Pure functions
JAX transformation and compilation are designed to work only on Python functions that are functionally pure: all the input data is passed through the function parameters, all the results are output through the function results. A pure function will always return the same result if invoked with the same inputs.
Here are some examples of functions that are not functionally pure for which JAX behaves differently than the Python interpreter. Note that these behaviors are not guaranteed by the JAX system; the proper way to use JAX is to use it only on functionally pure Python functions.
End of explanation
"""
def pure_uses_internal_state(x):
state = dict(even=0, odd=0)
for i in range(10):
state['even' if i % 2 == 0 else 'odd'] += x
return state['even'] + state['odd']
print(jit(pure_uses_internal_state)(5.))
"""
Explanation: A Python function can be functionally pure even if it actually uses stateful objects internally, as long as it does not read or write external state:
End of explanation
"""
import jax.numpy as jnp
import jax.lax as lax
from jax import make_jaxpr
# lax.fori_loop
array = jnp.arange(10)
print(lax.fori_loop(0, 10, lambda i,x: x+array[i], 0)) # expected result 45
iterator = iter(range(10))
print(lax.fori_loop(0, 10, lambda i,x: x+next(iterator), 0)) # unexpected result 0
# lax.scan
def func11(arr, extra):
ones = jnp.ones(arr.shape)
def body(carry, aelems):
ae1, ae2 = aelems
return (carry + ae1 * ae2 + extra, carry)
return lax.scan(body, 0., (arr, ones))
make_jaxpr(func11)(jnp.arange(16), 5.)
# make_jaxpr(func11)(iter(range(16)), 5.) # throws error
# lax.cond
array_operand = jnp.array([0.])
lax.cond(True, lambda x: x+1, lambda x: x-1, array_operand)
iter_operand = iter(range(10))
# lax.cond(True, lambda x: next(x)+1, lambda x: next(x)-1, iter_operand) # throws error
"""
Explanation: It is not recommended to use iterators in any JAX function you want to jit or in any control-flow primitive. The reason is that an iterator is a python object which introduces state to retrieve the next element. Therefore, it is incompatible with JAX functional programming model. In the code below, there are some examples of incorrect attempts to use iterators with JAX. Most of them return an error, but some give unexpected results.
End of explanation
"""
numpy_array = np.zeros((3,3), dtype=np.float32)
print("original array:")
print(numpy_array)
# In place, mutating update
numpy_array[1, :] = 1.0
print("updated array:")
print(numpy_array)
"""
Explanation: 🔪 In-Place Updates
In Numpy you're used to doing this:
End of explanation
"""
jax_array = jnp.zeros((3,3), dtype=jnp.float32)
# In place update of JAX's array will yield an error!
try:
jax_array[1, :] = 1.0
except Exception as e:
print("Exception {}".format(e))
"""
Explanation: If we try to update a JAX device array in-place, however, we get an error! (☉_☉)
End of explanation
"""
updated_array = jax_array.at[1, :].set(1.0)
print("updated array:\n", updated_array)
"""
Explanation: Allowing mutation of variables in-place makes program analysis and transformation difficult. JAX requires that programs are pure functions.
Instead, JAX offers a functional array update using the .at property on JAX arrays.
️⚠️ inside jit'd code and lax.while_loop or lax.fori_loop the size of slices can't be functions of argument values but only functions of argument shapes -- the slice start indices have no such restriction. See the below Control Flow Section for more information on this limitation.
Array updates: x.at[idx].set(y)
For example, the update above can be written as:
End of explanation
"""
print("original array unchanged:\n", jax_array)
"""
Explanation: JAX's array update functions, unlike their NumPy versions, operate out-of-place. That is, the updated array is returned as a new array and the original array is not modified by the update.
End of explanation
"""
print("original array:")
jax_array = jnp.ones((5, 6))
print(jax_array)
new_jax_array = jax_array.at[::2, 3:].add(7.)
print("new array post-addition:")
print(new_jax_array)
"""
Explanation: However, inside jit-compiled code, if the input value x of x.at[idx].set(y) is not reused, the compiler will optimize the array update to occur in-place.
Array updates with other operations
Indexed array updates are not limited simply to overwriting values. For example, we can perform indexed addition as follows:
End of explanation
"""
try:
np.arange(10)[11]
except Exception as e:
print("Exception {}".format(e))
"""
Explanation: For more details on indexed array updates, see the documentation for the .at property.
🔪 Out-of-Bounds Indexing
In Numpy, you are used to errors being thrown when you index an array outside of its bounds, like this:
End of explanation
"""
jnp.arange(10)[11]
"""
Explanation: However, raising an error from code running on an accelerator can be difficult or impossible. Therefore, JAX must choose some non-error behavior for out of bounds indexing (akin to how invalid floating point arithmetic results in NaN). When the indexing operation is an array index update (e.g. index_add or scatter-like primitives), updates at out-of-bounds indices will be skipped; when the operation is an array index retrieval (e.g. NumPy indexing or gather-like primitives) the index is clamped to the bounds of the array since something must be returned. For example, the last value of the array will be returned from this indexing operation:
End of explanation
"""
np.sum([1, 2, 3])
"""
Explanation: Note that due to this behavior for index retrieval, functions like jnp.nanargmin and jnp.nanargmax return -1 for slices consisting of NaNs whereas Numpy would throw an error.
Note also that, as the two behaviors described above are not inverses of each other, reverse-mode automatic differentiation (which turns index updates into index retrievals and vice versa) will not preserve the semantics of out of bounds indexing. Thus it may be a good idea to think of out-of-bounds indexing in JAX as a case of undefined behavior.
🔪 Non-array inputs: NumPy vs. JAX
NumPy is generally happy accepting Python lists or tuples as inputs to its API functions:
End of explanation
"""
try:
jnp.sum([1, 2, 3])
except TypeError as e:
print(f"TypeError: {e}")
"""
Explanation: JAX departs from this, generally returning a helpful error:
End of explanation
"""
def permissive_sum(x):
return jnp.sum(jnp.array(x))
x = list(range(10))
permissive_sum(x)
"""
Explanation: This is a deliberate design choice, because passing lists or tuples to traced functions can lead to silent performance degradation that might otherwise be difficult to detect.
For example, consider the following permissive version of jnp.sum that allows list inputs:
End of explanation
"""
make_jaxpr(permissive_sum)(x)
"""
Explanation: The output is what we would expect, but this hides potential performance issues under the hood. In JAX's tracing and JIT compilation model, each element in a Python list or tuple is treated as a separate JAX variable, and individually processed and pushed to device. This can be seen in the jaxpr for the permissive_sum function above:
End of explanation
"""
jnp.sum(jnp.array(x))
"""
Explanation: Each entry of the list is handled as a separate input, resulting in a tracing & compilation overhead that grows linearly with the size of the list. To prevent surprises like this, JAX avoids implicit conversions of lists and tuples to arrays.
If you would like to pass a tuple or list to a JAX function, you can do so by first explicitly converting it to an array:
End of explanation
"""
print(np.random.random())
print(np.random.random())
print(np.random.random())
"""
Explanation: 🔪 Random Numbers
If all scientific papers whose results are in doubt because of bad
rand()s were to disappear from library shelves, there would be a
gap on each shelf about as big as your fist. - Numerical Recipes
RNGs and State
You're used to stateful pseudorandom number generators (PRNGs) from numpy and other libraries, which helpfully hide a lot of details under the hood to give you a ready fountain of pseudorandomness:
End of explanation
"""
np.random.seed(0)
rng_state = np.random.get_state()
#print(rng_state)
# --> ('MT19937', array([0, 1, 1812433255, 1900727105, 1208447044,
# 2481403966, 4042607538, 337614300, ... 614 more numbers...,
# 3048484911, 1796872496], dtype=uint32), 624, 0, 0.0)
"""
Explanation: Underneath the hood, numpy uses the Mersenne Twister PRNG to power its pseudorandom functions. The PRNG has a period of $2^{19937}-1$ and at any point can be described by 624 32bit unsigned ints and a position indicating how much of this "entropy" has been used up.
End of explanation
"""
_ = np.random.uniform()
rng_state = np.random.get_state()
#print(rng_state)
# --> ('MT19937', array([2443250962, 1093594115, 1878467924,
# ..., 2648828502, 1678096082], dtype=uint32), 2, 0, 0.0)
# Let's exhaust the entropy in this PRNG statevector
for i in range(311):
_ = np.random.uniform()
rng_state = np.random.get_state()
#print(rng_state)
# --> ('MT19937', array([2443250962, 1093594115, 1878467924,
# ..., 2648828502, 1678096082], dtype=uint32), 624, 0, 0.0)
# Next call iterates the RNG state for a new batch of fake "entropy".
_ = np.random.uniform()
rng_state = np.random.get_state()
# print(rng_state)
# --> ('MT19937', array([1499117434, 2949980591, 2242547484,
# 4162027047, 3277342478], dtype=uint32), 2, 0, 0.0)
"""
Explanation: This pseudorandom state vector is automagically updated behind the scenes every time a random number is needed, "consuming" 2 of the uint32s in the Mersenne twister state vector:
End of explanation
"""
from jax import random
key = random.PRNGKey(0)
key
"""
Explanation: The problem with magic PRNG state is that it's hard to reason about how it's being used and updated across different threads, processes, and devices, and it's very easy to screw up when the details of entropy production and consumption are hidden from the end user.
The Mersenne Twister PRNG is also known to have a number of problems, it has a large 2.5Kb state size, which leads to problematic initialization issues. It fails modern BigCrush tests, and is generally slow.
JAX PRNG
JAX instead implements an explicit PRNG where entropy production and consumption are handled by explicitly passing and iterating PRNG state. JAX uses a modern Threefry counter-based PRNG that's splittable. That is, its design allows us to fork the PRNG state into new PRNGs for use with parallel stochastic generation.
The random state is described by two unsigned-int32s that we call a key:
End of explanation
"""
print(random.normal(key, shape=(1,)))
print(key)
# No no no!
print(random.normal(key, shape=(1,)))
print(key)
"""
Explanation: JAX's random functions produce pseudorandom numbers from the PRNG state, but do not change the state!
Reusing the same state will cause sadness and monotony, depriving the end user of lifegiving chaos:
End of explanation
"""
print("old key", key)
key, subkey = random.split(key)
normal_pseudorandom = random.normal(subkey, shape=(1,))
print(" \---SPLIT --> new key ", key)
print(" \--> new subkey", subkey, "--> normal", normal_pseudorandom)
"""
Explanation: Instead, we split the PRNG to get usable subkeys every time we need a new pseudorandom number:
End of explanation
"""
print("old key", key)
key, subkey = random.split(key)
normal_pseudorandom = random.normal(subkey, shape=(1,))
print(" \---SPLIT --> new key ", key)
print(" \--> new subkey", subkey, "--> normal", normal_pseudorandom)
"""
Explanation: We propagate the key and make new subkeys whenever we need a new random number:
End of explanation
"""
key, *subkeys = random.split(key, 4)
for subkey in subkeys:
print(random.normal(subkey, shape=(1,)))
"""
Explanation: We can generate more than one subkey at a time:
End of explanation
"""
def f(x):
if x < 3:
return 3. * x ** 2
else:
return -4 * x
print(grad(f)(2.)) # ok!
print(grad(f)(4.)) # ok!
"""
Explanation: 🔪 Control Flow
✔ python control_flow + autodiff ✔
If you just want to apply grad to your python functions, you can use regular python control-flow constructs with no problems, as if you were using Autograd (or Pytorch or TF Eager).
End of explanation
"""
@jit
def f(x):
for i in range(3):
x = 2 * x
return x
print(f(3))
"""
Explanation: python control flow + JIT
Using control flow with jit is more complicated, and by default it has more constraints.
This works:
End of explanation
"""
@jit
def g(x):
y = 0.
for i in range(x.shape[0]):
y = y + x[i]
return y
print(g(jnp.array([1., 2., 3.])))
"""
Explanation: So does this:
End of explanation
"""
@jit
def f(x):
if x < 3:
return 3. * x ** 2
else:
return -4 * x
# This will fail!
try:
f(2)
except Exception as e:
print("Exception {}".format(e))
"""
Explanation: But this doesn't, at least by default:
End of explanation
"""
def f(x):
if x < 3:
return 3. * x ** 2
else:
return -4 * x
f = jit(f, static_argnums=(0,))
print(f(2.))
"""
Explanation: What gives!?
When we jit-compile a function, we usually want to compile a version of the function that works for many different argument values, so that we can cache and reuse the compiled code. That way we don't have to re-compile on each function evaluation.
For example, if we evaluate an @jit function on the array jnp.array([1., 2., 3.], jnp.float32), we might want to compile code that we can reuse to evaluate the function on jnp.array([4., 5., 6.], jnp.float32) to save on compile time.
To get a view of your Python code that is valid for many different argument values, JAX traces it on abstract values that represent sets of possible inputs. There are multiple different levels of abstraction, and different transformations use different abstraction levels.
By default, jit traces your code on the ShapedArray abstraction level, where each abstract value represents the set of all array values with a fixed shape and dtype. For example, if we trace using the abstract value ShapedArray((3,), jnp.float32), we get a view of the function that can be reused for any concrete value in the corresponding set of arrays. That means we can save on compile time.
But there's a tradeoff here: if we trace a Python function on a ShapedArray((), jnp.float32) that isn't committed to a specific concrete value, when we hit a line like if x < 3, the expression x < 3 evaluates to an abstract ShapedArray((), jnp.bool_) that represents the set {True, False}. When Python attempts to coerce that to a concrete True or False, we get an error: we don't know which branch to take, and can't continue tracing! The tradeoff is that with higher levels of abstraction we gain a more general view of the Python code (and thus save on re-compilations), but we require more constraints on the Python code to complete the trace.
The good news is that you can control this tradeoff yourself. By having jit trace on more refined abstract values, you can relax the traceability constraints. For example, using the static_argnums argument to jit, we can specify to trace on concrete values of some arguments. Here's that example function again:
End of explanation
"""
def f(x, n):
y = 0.
for i in range(n):
y = y + x[i]
return y
f = jit(f, static_argnums=(1,))
f(jnp.array([2., 3., 4.]), 2)
"""
Explanation: Here's another example, this time involving a loop:
End of explanation
"""
def example_fun(length, val):
return jnp.ones((length,)) * val
# un-jit'd works fine
print(example_fun(5, 4))
bad_example_jit = jit(example_fun)
# this will fail:
try:
print(bad_example_jit(10, 4))
except Exception as e:
print("Exception {}".format(e))
# static_argnums tells JAX to recompile on changes at these argument positions:
good_example_jit = jit(example_fun, static_argnums=(0,))
# first compile
print(good_example_jit(10, 4))
# recompiles
print(good_example_jit(5, 4))
"""
Explanation: In effect, the loop gets statically unrolled. JAX can also trace at higher levels of abstraction, like Unshaped, but that's not currently the default for any transformation
️⚠️ functions with argument-value dependent shapes
These control-flow issues also come up in a more subtle way: numerical functions we want to jit can't specialize the shapes of internal arrays on argument values (specializing on argument shapes is ok). As a trivial example, let's make a function whose output happens to depend on the input variable length.
End of explanation
"""
@jit
def f(x):
print(x)
y = 2 * x
print(y)
return y
f(2)
"""
Explanation: static_argnums can be handy if length in our example rarely changes, but it would be disastrous if it changed a lot!
Lastly, if your function has global side-effects, JAX's tracer can cause weird things to happen. A common gotcha is trying to print arrays inside jit'd functions:
End of explanation
"""
from jax import lax
operand = jnp.array([0.])
lax.cond(True, lambda x: x+1, lambda x: x-1, operand)
# --> array([1.], dtype=float32)
lax.cond(False, lambda x: x+1, lambda x: x-1, operand)
# --> array([-1.], dtype=float32)
"""
Explanation: Structured control flow primitives
There are more options for control flow in JAX. Say you want to avoid re-compilations but still want to use control flow that's traceable, and that avoids un-rolling large loops. Then you can use these 4 structured control flow primitives:
lax.cond differentiable
lax.while_loop fwd-mode-differentiable
lax.fori_loop fwd-mode-differentiable in general; fwd and rev-mode differentiable if endpoints are static.
lax.scan differentiable
cond
python equivalent:
python
def cond(pred, true_fun, false_fun, operand):
if pred:
return true_fun(operand)
else:
return false_fun(operand)
End of explanation
"""
init_val = 0
cond_fun = lambda x: x<10
body_fun = lambda x: x+1
lax.while_loop(cond_fun, body_fun, init_val)
# --> array(10, dtype=int32)
"""
Explanation: while_loop
python equivalent:
def while_loop(cond_fun, body_fun, init_val):
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
End of explanation
"""
init_val = 0
start = 0
stop = 10
body_fun = lambda i,x: x+i
lax.fori_loop(start, stop, body_fun, init_val)
# --> array(45, dtype=int32)
"""
Explanation: fori_loop
python equivalent:
def fori_loop(start, stop, body_fun, init_val):
val = init_val
for i in range(start, stop):
val = body_fun(i, val)
return val
End of explanation
"""
x = random.uniform(random.PRNGKey(0), (1000,), dtype=jnp.float64)
x.dtype
"""
Explanation: Summary
$$
\begin{array} {r|rr}
\hline \
\textrm{construct}
& \textrm{jit}
& \textrm{grad} \
\hline \
\textrm{if} & ❌ & ✔ \
\textrm{for} & ✔ & ✔\
\textrm{while} & ✔ & ✔\
\textrm{lax.cond} & ✔ & ✔\
\textrm{lax.while_loop} & ✔ & \textrm{fwd}\
\textrm{lax.fori_loop} & ✔ & \textrm{fwd}\
\textrm{lax.scan} & ✔ & ✔\
\hline
\end{array}
$$
<center>
$\ast$ = argument-<b>value</b>-independent loop condition - unrolls the loop
</center>
🔪 NaNs
Debugging NaNs
If you want to trace where NaNs are occurring in your functions or gradients, you can turn on the NaN-checker by:
setting the JAX_DEBUG_NANS=True environment variable;
adding from jax.config import config and config.update("jax_debug_nans", True) near the top of your main file;
adding from jax.config import config and config.parse_flags_with_absl() to your main file, then set the option using a command-line flag like --jax_debug_nans=True;
This will cause computations to error-out immediately on production of a NaN. Switching this option on adds a nan check to every floating point type value produced by XLA. That means values are pulled back to the host and checked as ndarrays for every primitive operation not under an @jit. For code under an @jit, the output of every @jit function is checked and if a nan is present it will re-run the function in de-optimized op-by-op mode, effectively removing one level of @jit at a time.
There could be tricky situations that arise, like nans that only occur under a @jit but don't get produced in de-optimized mode. In that case you'll see a warning message print out but your code will continue to execute.
If the nans are being produced in the backward pass of a gradient evaluation, when an exception is raised several frames up in the stack trace you will be in the backward_pass function, which is essentially a simple jaxpr interpreter that walks the sequence of primitive operations in reverse. In the example below, we started an ipython repl with the command line env JAX_DEBUG_NANS=True ipython, then ran this:
```
In [1]: import jax.numpy as jnp
In [2]: jnp.divide(0., 0.)
FloatingPointError Traceback (most recent call last)
<ipython-input-2-f2e2c413b437> in <module>()
----> 1 jnp.divide(0., 0.)
.../jax/jax/numpy/lax_numpy.pyc in divide(x1, x2)
343 return floor_divide(x1, x2)
344 else:
--> 345 return true_divide(x1, x2)
346
347
.../jax/jax/numpy/lax_numpy.pyc in true_divide(x1, x2)
332 x1, x2 = _promote_shapes(x1, x2)
333 return lax.div(lax.convert_element_type(x1, result_dtype),
--> 334 lax.convert_element_type(x2, result_dtype))
335
336
.../jax/jax/lax.pyc in div(x, y)
244 def div(x, y):
245 r"""Elementwise division: :math:x \over y."""
--> 246 return div_p.bind(x, y)
247
248 def rem(x, y):
... stack trace ...
.../jax/jax/interpreters/xla.pyc in handle_result(device_buffer)
103 py_val = device_buffer.to_py()
104 if np.any(np.isnan(py_val)):
--> 105 raise FloatingPointError("invalid value")
106 else:
107 return DeviceArray(device_buffer, *result_shape)
FloatingPointError: invalid value
```
The nan generated was caught. By running %debug, we can get a post-mortem debugger. This also works with functions under @jit, as the example below shows.
```
In [4]: from jax import jit
In [5]: @jit
...: def f(x, y):
...: a = x * y
...: b = (x + y) / (x - y)
...: c = a + 2
...: return a + b * c
...:
In [6]: x = jnp.array([2., 0.])
In [7]: y = jnp.array([3., 0.])
In [8]: f(x, y)
Invalid value encountered in the output of a jit function. Calling the de-optimized version.
FloatingPointError Traceback (most recent call last)
<ipython-input-8-811b7ddb3300> in <module>()
----> 1 f(x, y)
... stack trace ...
<ipython-input-5-619b39acbaac> in f(x, y)
2 def f(x, y):
3 a = x * y
----> 4 b = (x + y) / (x - y)
5 c = a + 2
6 return a + b * c
.../jax/jax/numpy/lax_numpy.pyc in divide(x1, x2)
343 return floor_divide(x1, x2)
344 else:
--> 345 return true_divide(x1, x2)
346
347
.../jax/jax/numpy/lax_numpy.pyc in true_divide(x1, x2)
332 x1, x2 = _promote_shapes(x1, x2)
333 return lax.div(lax.convert_element_type(x1, result_dtype),
--> 334 lax.convert_element_type(x2, result_dtype))
335
336
.../jax/jax/lax.pyc in div(x, y)
244 def div(x, y):
245 r"""Elementwise division: :math:x \over y."""
--> 246 return div_p.bind(x, y)
247
248 def rem(x, y):
... stack trace ...
```
When this code sees a nan in the output of an @jit function, it calls into the de-optimized code, so we still get a clear stack trace. And we can run a post-mortem debugger with %debug to inspect all the values to figure out the error.
⚠️ You shouldn't have the NaN-checker on if you're not debugging, as it can introduce lots of device-host round-trips and performance regressions!
⚠️ The NaN-checker doesn't work with pmap. To debug nans in pmap code, one thing to try is replacing pmap with vmap.
🔪 Double (64bit) precision
At the moment, JAX by default enforces single-precision numbers to mitigate the Numpy API's tendency to aggressively promote operands to double. This is the desired behavior for many machine-learning applications, but it may catch you by surprise!
End of explanation
"""
import jax.numpy as jnp
from jax import random
x = random.uniform(random.PRNGKey(0), (1000,), dtype=jnp.float64)
x.dtype # --> dtype('float64')
"""
Explanation: To use double-precision numbers, you need to set the jax_enable_x64 configuration variable at startup.
There are a few ways to do this:
You can enable 64bit mode by setting the environment variable JAX_ENABLE_X64=True.
You can manually set the jax_enable_x64 configuration flag at startup:
python
# again, this only works on startup!
from jax.config import config
config.update("jax_enable_x64", True)
You can parse command-line flags with absl.app.run(main)
python
from jax.config import config
config.config_with_absl()
If you want JAX to run absl parsing for you, i.e. you don't want to do absl.app.run(main), you can instead use
python
from jax.config import config
if __name__ == '__main__':
# calls config.config_with_absl() *and* runs absl parsing
config.parse_flags_with_absl()
Note that #2-#4 work for any of JAX's configuration options.
We can then confirm that x64 mode is enabled:
End of explanation
"""
|
atulsingh0/MachineLearning
|
HandsOnML/code/15_autoencoders.ipynb
|
gpl-3.0
|
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "autoencoders"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
"""
Explanation: Chapter 15 – Autoencoders
This notebook contains all the sample code and solutions to the exercises in chapter 15.
Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
End of explanation
"""
def plot_image(image, shape=[28, 28]):
plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
plt.axis("off")
def plot_multiple_images(images, n_rows, n_cols, pad=2):
images = images - images.min() # make the minimum == 0, so the padding looks white
w,h = images.shape[1:]
image = np.zeros(((w+pad)*n_rows+pad, (h+pad)*n_cols+pad))
for y in range(n_rows):
for x in range(n_cols):
image[(y*(h+pad)+pad):(y*(h+pad)+pad+h),(x*(w+pad)+pad):(x*(w+pad)+pad+w)] = images[y*n_cols+x]
plt.imshow(image, cmap="Greys", interpolation="nearest")
plt.axis("off")
"""
Explanation: A couple utility functions to plot grayscale 28x28 image:
End of explanation
"""
rnd.seed(4)
m = 200
w1, w2 = 0.1, 0.3
noise = 0.1
angles = rnd.rand(m) * 3 * np.pi / 2 - 0.5
data = np.empty((m, 3))
data[:, 0] = np.cos(angles) + np.sin(angles)/2 + noise * rnd.randn(m) / 2
data[:, 1] = np.sin(angles) * 0.7 + noise * rnd.randn(m) / 2
data[:, 2] = data[:, 0] * w1 + data[:, 1] * w2 + noise * rnd.randn(m)
"""
Explanation: PCA with a linear Autoencoder
Build 3D dataset:
End of explanation
"""
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(data[:100])
X_test = scaler.transform(data[100:])
"""
Explanation: Normalize the data:
End of explanation
"""
import tensorflow as tf
reset_graph()
n_inputs = 3
n_hidden = 2 # codings
n_outputs = n_inputs
learning_rate = 0.01
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden)
outputs = tf.layers.dense(hidden, n_outputs)
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
n_iterations = 1000
codings = hidden
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
training_op.run(feed_dict={X: X_train})
codings_val = codings.eval(feed_dict={X: X_test})
fig = plt.figure(figsize=(4,3))
plt.plot(codings_val[:,0], codings_val[:, 1], "b.")
plt.xlabel("$z_1$", fontsize=18)
plt.ylabel("$z_2$", fontsize=18, rotation=0)
save_fig("linear_autoencoder_pca_plot")
plt.show()
"""
Explanation: Now let's build the Autoencoder...
Note: instead of using the fully_connected() function from the tensorflow.contrib.layers module (as in the book), we now use the dense() function from the tf.layers module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while tf.layers is part of the official API. As you will see, the code is mostly the same.
The main differences relevant to this chapter are:
* the scope parameter was renamed to name, and the _fn suffix was removed in all the parameters that had it (for example the activation_fn parameter was renamed to activation).
* the weights parameter was renamed to kernel and the weights variable is now named "kernel" rather than "weights",
* the bias variable is now named "bias" rather than "biases",
* the default activation is None instead of tf.nn.relu
End of explanation
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
"""
Explanation: Stacked Autoencoders
Let's use MNIST:
End of explanation
"""
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
he_init = tf.contrib.layers.variance_scaling_initializer() # He initialization
#Equivalent to:
#he_init = lambda shape, dtype=tf.float32: tf.truncated_normal(shape, 0., stddev=np.sqrt(2/shape[0]))
l2_regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
my_dense_layer = partial(tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=he_init,
kernel_regularizer=l2_regularizer)
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3 = my_dense_layer(hidden2, n_hidden3)
outputs = my_dense_layer(hidden3, n_outputs, activation=None)
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver() # not shown in the book
"""
Explanation: Train all layers at once
Let's build a stacked Autoencoder with 3 hidden layers and 1 output layer (ie. 2 stacked Autoencoders). We will use ELU activation, He initialization and L2 regularization.
Note: since the tf.layers.dense() function is incompatible with tf.contrib.layers.arg_scope() (which is used in the book), we now use python's functools.partial() function instead. It makes it easy to create a my_dense_layer() function that just calls tf.layers.dense() with the desired parameters automatically set (unless they are overridden when calling my_dense_layer()).
End of explanation
"""
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="") # not shown in the book
sys.stdout.flush() # not shown
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch}) # not shown
print("\r{}".format(epoch), "Train MSE:", loss_train) # not shown
saver.save(sess, "./my_model_all_layers.ckpt") # not shown
"""
Explanation: Now let's train it! Note that we don't feed target values (y_batch is not used). This is unsupervised training.
End of explanation
"""
def show_reconstructed_digits(X, outputs, model_path = None, n_test_digits = 2):
with tf.Session() as sess:
if model_path:
saver.restore(sess, model_path)
X_test = mnist.test.images[:n_test_digits]
outputs_val = outputs.eval(feed_dict={X: X_test})
fig = plt.figure(figsize=(8, 3 * n_test_digits))
for digit_index in range(n_test_digits):
plt.subplot(n_test_digits, 2, digit_index * 2 + 1)
plot_image(X_test[digit_index])
plt.subplot(n_test_digits, 2, digit_index * 2 + 2)
plot_image(outputs_val[digit_index])
show_reconstructed_digits(X, outputs, "./my_model_all_layers.ckpt")
save_fig("reconstruction_plot")
"""
Explanation: This function loads the model, evaluates it on the test set (it measures the reconstruction error), then it displays the original image and its reconstruction:
End of explanation
"""
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.transpose(weights2, name="weights3") # tied weights
weights4 = tf.transpose(weights1, name="weights4") # tied weights
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_loss = regularizer(weights1) + regularizer(weights2)
loss = reconstruction_loss + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 5
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_tying_weights.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_tying_weights.ckpt")
"""
Explanation: Tying weights
It is common to tie the weights of the encoder and the decoder (weights_decoder = tf.transpose(weights_encoder)). Unfortunately this makes it impossible (or very tricky) to use the tf.layers.dense() function, so we need to build the Autoencoder manually:
End of explanation
"""
reset_graph()
from functools import partial
def train_autoencoder(X_train, n_neurons, n_epochs, batch_size,
learning_rate = 0.01, l2_reg = 0.0005,
activation=tf.nn.elu, seed=42):
graph = tf.Graph()
with graph.as_default():
tf.set_random_seed(seed)
n_inputs = X_train.shape[1]
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
my_dense_layer = partial(
tf.layers.dense,
activation=activation,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(),
kernel_regularizer=tf.contrib.layers.l2_regularizer(l2_reg))
hidden = my_dense_layer(X, n_neurons, name="hidden")
outputs = my_dense_layer(hidden, n_inputs, activation=None, name="outputs")
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss] + reg_losses)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session(graph=graph) as sess:
init.run()
for epoch in range(n_epochs):
n_batches = len(X_train) // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(len(X_train))[:batch_size]
X_batch = X_train[indices]
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
params = dict([(var.name, var.eval()) for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])
hidden_val = hidden.eval(feed_dict={X: X_train})
return hidden_val, params["hidden/kernel:0"], params["hidden/bias:0"], params["outputs/kernel:0"], params["outputs/bias:0"]
"""
Explanation: Training one Autoencoder at a time in multiple graphs
There are many ways to train one Autoencoder at a time. The first approach it to train each Autoencoder using a different graph, then we create the Stacked Autoencoder by simply initializing it with the weights and biases copied from these Autoencoders.
Let's create a function that will train one autoencoder and return the transformed training set (i.e., the output of the hidden layer) and the model parameters.
End of explanation
"""
hidden_output, W1, b1, W4, b4 = train_autoencoder(mnist.train.images, n_neurons=300, n_epochs=4, batch_size=150)
_, W2, b2, W3, b3 = train_autoencoder(hidden_output, n_neurons=150, n_epochs=4, batch_size=150)
"""
Explanation: Now let's train two Autoencoders. The first one is trained on the training data, and the second is trained on the previous Autoencoder's hidden layer output:
End of explanation
"""
reset_graph()
n_inputs = 28*28
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden1 = tf.nn.elu(tf.matmul(X, W1) + b1)
hidden2 = tf.nn.elu(tf.matmul(hidden1, W2) + b2)
hidden3 = tf.nn.elu(tf.matmul(hidden2, W3) + b3)
outputs = tf.matmul(hidden3, W4) + b4
show_reconstructed_digits(X, outputs)
"""
Explanation: Finally, we can create a Stacked Autoencoder by simply reusing the weights and biases from the Autoencoders we just trained:
End of explanation
"""
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
l2_reg = 0.0001
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_hidden3])
weights4_init = initializer([n_hidden3, n_outputs])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
weights4 = tf.Variable(weights4_init, dtype=tf.float32, name="weights4")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_hidden3), name="biases3")
biases4 = tf.Variable(tf.zeros(n_outputs), name="biases4")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
hidden3 = activation(tf.matmul(hidden2, weights3) + biases3)
outputs = tf.matmul(hidden3, weights4) + biases4
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X))
optimizer = tf.train.AdamOptimizer(learning_rate)
with tf.name_scope("phase1"):
phase1_outputs = tf.matmul(hidden1, weights4) + biases4 # bypass hidden2 and hidden3
phase1_reconstruction_loss = tf.reduce_mean(tf.square(phase1_outputs - X))
phase1_reg_loss = regularizer(weights1) + regularizer(weights4)
phase1_loss = phase1_reconstruction_loss + phase1_reg_loss
phase1_training_op = optimizer.minimize(phase1_loss)
with tf.name_scope("phase2"):
phase2_reconstruction_loss = tf.reduce_mean(tf.square(hidden3 - hidden1))
phase2_reg_loss = regularizer(weights2) + regularizer(weights3)
phase2_loss = phase2_reconstruction_loss + phase2_reg_loss
train_vars = [weights2, biases2, weights3, biases3]
phase2_training_op = optimizer.minimize(phase2_loss, var_list=train_vars) # freeze hidden1
init = tf.global_variables_initializer()
saver = tf.train.Saver()
training_ops = [phase1_training_op, phase2_training_op]
reconstruction_losses = [phase1_reconstruction_loss, phase2_reconstruction_loss]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
sess.run(training_ops[phase], feed_dict={X: X_batch})
loss_train = reconstruction_losses[phase].eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_one_at_a_time.ckpt")
loss_test = reconstruction_loss.eval(feed_dict={X: mnist.test.images})
print("Test MSE:", loss_test)
"""
Explanation: Training one Autoencoder at a time in a single graph
Another approach is to use a single graph. To do this, we create the graph for the full Stacked Autoencoder, but then we also add operations to train each Autoencoder independently: phase 1 trains the bottom and top layer (ie. the first Autoencoder) and phase 2 trains the two middle layers (ie. the second Autoencoder).
End of explanation
"""
training_ops = [phase1_training_op, phase2_training_op]
reconstruction_losses = [phase1_reconstruction_loss, phase2_reconstruction_loss]
n_epochs = [4, 4]
batch_sizes = [150, 150]
with tf.Session() as sess:
init.run()
for phase in range(2):
print("Training phase #{}".format(phase + 1))
if phase == 1:
hidden1_cache = hidden1.eval(feed_dict={X: mnist.train.images})
for epoch in range(n_epochs[phase]):
n_batches = mnist.train.num_examples // batch_sizes[phase]
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
if phase == 1:
indices = rnd.permutation(mnist.train.num_examples)
hidden1_batch = hidden1_cache[indices[:batch_sizes[phase]]]
feed_dict = {hidden1: hidden1_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
else:
X_batch, y_batch = mnist.train.next_batch(batch_sizes[phase])
feed_dict = {X: X_batch}
sess.run(training_ops[phase], feed_dict=feed_dict)
loss_train = reconstruction_losses[phase].eval(feed_dict=feed_dict)
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_cache_frozen.ckpt")
loss_test = reconstruction_loss.eval(feed_dict={X: mnist.test.images})
print("Test MSE:", loss_test)
"""
Explanation: Cache the frozen layer outputs
End of explanation
"""
n_test_digits = 2
X_test = mnist.test.images[:n_test_digits]
with tf.Session() as sess:
saver.restore(sess, "./my_model_one_at_a_time.ckpt") # not shown in the book
outputs_val = outputs.eval(feed_dict={X: X_test})
def plot_image(image, shape=[28, 28]):
plt.imshow(image.reshape(shape), cmap="Greys", interpolation="nearest")
plt.axis("off")
for digit_index in range(n_test_digits):
plt.subplot(n_test_digits, 2, digit_index * 2 + 1)
plot_image(X_test[digit_index])
plt.subplot(n_test_digits, 2, digit_index * 2 + 2)
plot_image(outputs_val[digit_index])
"""
Explanation: Visualizing the Reconstructions
End of explanation
"""
with tf.Session() as sess:
saver.restore(sess, "./my_model_one_at_a_time.ckpt") # not shown in the book
weights1_val = weights1.eval()
for i in range(5):
plt.subplot(1, 5, i + 1)
plot_image(weights1_val.T[i])
save_fig("extracted_features_plot") # not shown
plt.show() # not shown
"""
Explanation: Visualizing the extracted features
End of explanation
"""
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150
n_outputs = 10
learning_rate = 0.01
l2_reg = 0.0005
activation = tf.nn.elu
regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.int32, shape=[None])
weights1_init = initializer([n_inputs, n_hidden1])
weights2_init = initializer([n_hidden1, n_hidden2])
weights3_init = initializer([n_hidden2, n_outputs])
weights1 = tf.Variable(weights1_init, dtype=tf.float32, name="weights1")
weights2 = tf.Variable(weights2_init, dtype=tf.float32, name="weights2")
weights3 = tf.Variable(weights3_init, dtype=tf.float32, name="weights3")
biases1 = tf.Variable(tf.zeros(n_hidden1), name="biases1")
biases2 = tf.Variable(tf.zeros(n_hidden2), name="biases2")
biases3 = tf.Variable(tf.zeros(n_outputs), name="biases3")
hidden1 = activation(tf.matmul(X, weights1) + biases1)
hidden2 = activation(tf.matmul(hidden1, weights2) + biases2)
logits = tf.matmul(hidden2, weights3) + biases3
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
reg_loss = regularizer(weights1) + regularizer(weights2) + regularizer(weights3)
loss = cross_entropy + reg_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
pretrain_saver = tf.train.Saver([weights1, weights2, biases1, biases2])
saver = tf.train.Saver()
"""
Explanation: Unsupervised pretraining
Let's create a small neural network for MNIST classification:
End of explanation
"""
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end=" ")
saver.save(sess, "./my_model_supervised.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
"""
Explanation: Regular training (without pretraining):
End of explanation
"""
n_epochs = 4
batch_size = 150
n_labeled_instances = 20000
#training_op = optimizer.minimize(loss, var_list=[weights3, biases3]) # Freeze layers 1 and 2 (optional)
with tf.Session() as sess:
init.run()
pretrain_saver.restore(sess, "./my_model_cache_frozen.ckpt")
for epoch in range(n_epochs):
n_batches = n_labeled_instances // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
indices = rnd.permutation(n_labeled_instances)[:batch_size]
X_batch, y_batch = mnist.train.images[indices], mnist.train.labels[indices]
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
accuracy_val = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
print("\r{}".format(epoch), "Train accuracy:", accuracy_val, end="\t")
saver.save(sess, "./my_model_supervised_pretrained.ckpt")
accuracy_val = accuracy.eval(feed_dict={X: mnist.test.images, y: mnist.test.labels})
print("Test accuracy:", accuracy_val)
"""
Explanation: Now reusing the first two layers of the autoencoder we pretrained:
End of explanation
"""
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
noise_level = 1.0
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
X_noisy = X + noise_level * tf.random_normal(tf.shape(X))
hidden1 = tf.layers.dense(X_noisy, n_hidden1, activation=tf.nn.relu,
name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, # not shown in the book
name="hidden2") # not shown
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, # not shown
name="hidden3") # not shown
outputs = tf.layers.dense(hidden3, n_outputs, name="outputs") # not shown
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_stacked_denoising_gaussian.ckpt")
"""
Explanation: Stacked denoising Autoencoder
Note: the book uses tf.contrib.layers.dropout() rather than tf.layers.dropout() (which did not exist when this chapter was written). It is now preferable to use tf.layers.dropout(), because anything in the contrib module may change or be deleted without notice. The tf.layers.dropout() function is almost identical to the tf.contrib.layers.dropout() function, except for a few minor differences. Most importantly:
* you must specify the dropout rate (rate) rather than the keep probability (keep_prob), where rate is simply equal to 1 - keep_prob,
* the is_training parameter is renamed to training.
Using Gaussian noise:
End of explanation
"""
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 150 # codings
n_hidden3 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.01
dropout_rate = 0.3
training = tf.placeholder_with_default(False, shape=(), name='training')
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
X_drop = tf.layers.dropout(X, dropout_rate, training=training)
hidden1 = tf.layers.dense(X_drop, n_hidden1, activation=tf.nn.relu,
name="hidden1")
hidden2 = tf.layers.dense(hidden1, n_hidden2, activation=tf.nn.relu, # not shown in the book
name="hidden2") # not shown
hidden3 = tf.layers.dense(hidden2, n_hidden3, activation=tf.nn.relu, # not shown
name="hidden3") # not shown
outputs = tf.layers.dense(hidden3, n_outputs, name="outputs") # not shown
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(reconstruction_loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 10
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, training: True})
loss_train = reconstruction_loss.eval(feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", loss_train)
saver.save(sess, "./my_model_stacked_denoising_dropout.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_stacked_denoising_dropout.ckpt")
"""
Explanation: Using dropout:
End of explanation
"""
p = 0.1
q = np.linspace(0.001, 0.999, 500)
kl_div = p * np.log(p / q) + (1 - p) * np.log((1 - p) / (1 - q))
mse = (p - q)**2
plt.plot([p, p], [0, 0.3], "k:")
plt.text(0.05, 0.32, "Target\nsparsity", fontsize=14)
plt.plot(q, kl_div, "b-", label="KL divergence")
plt.plot(q, mse, "r--", label="MSE")
plt.legend(loc="upper left")
plt.xlabel("Actual sparsity")
plt.ylabel("Cost", rotation=0)
plt.axis([0, 1, 0, 0.95])
save_fig("sparsity_loss_plot")
reset_graph()
n_inputs = 28 * 28
n_hidden1 = 1000 # sparse codings
n_outputs = n_inputs
def kl_divergence(p, q):
# Kullback Leibler divergence
return p * tf.log(p / q) + (1 - p) * tf.log((1 - p) / (1 - q))
learning_rate = 0.01
sparsity_target = 0.1
sparsity_weight = 0.2
X = tf.placeholder(tf.float32, shape=[None, n_inputs]) # not shown in the book
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.sigmoid) # not shown
outputs = tf.layers.dense(hidden1, n_outputs) # not shown
hidden1_mean = tf.reduce_mean(hidden1, axis=0) # batch mean
sparsity_loss = tf.reduce_sum(kl_divergence(sparsity_target, hidden1_mean))
reconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE
loss = reconstruction_loss + sparsity_weight * sparsity_loss
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 100
batch_size = 1000
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
reconstruction_loss_val, sparsity_loss_val, loss_val = sess.run([reconstruction_loss, sparsity_loss, loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train MSE:", reconstruction_loss_val, "\tSparsity loss:", sparsity_loss_val, "\tTotal loss:", loss_val)
saver.save(sess, "./my_model_sparse.ckpt")
show_reconstructed_digits(X, outputs, "./my_model_sparse.ckpt")
"""
Explanation: Sparse Autoencoder
End of explanation
"""
hidden1 = tf.layers.dense(X, n_hidden1, activation=tf.nn.sigmoid)
"""
Explanation: Note that the coding layer must output values from 0 to 1, which is why we use the sigmoid activation function:
End of explanation
"""
logits = tf.layers.dense(hidden1, n_outputs)
outputs = tf.nn.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_mean(xentropy)
"""
Explanation: To speed up training, you can normalize the inputs between 0 and 1, and use the cross entropy instead of the MSE for the cost function:
End of explanation
"""
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
initializer = tf.contrib.layers.variance_scaling_initializer()
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer)
X = tf.placeholder(tf.float32, [None, n_inputs])
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3_mean = my_dense_layer(hidden2, n_hidden3, activation=None)
hidden3_sigma = my_dense_layer(hidden2, n_hidden3, activation=None)
noise = tf.random_normal(tf.shape(hidden3_sigma), dtype=tf.float32)
hidden3 = hidden3_mean + hidden3_sigma * noise
hidden4 = my_dense_layer(hidden3, n_hidden4)
hidden5 = my_dense_layer(hidden4, n_hidden5)
logits = my_dense_layer(hidden5, n_outputs, activation=None)
outputs = tf.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_sum(xentropy)
eps = 1e-10 # smoothing term to avoid computing log(0) which is NaN
latent_loss = 0.5 * tf.reduce_sum(
tf.square(hidden3_sigma) + tf.square(hidden3_mean)
- 1 - tf.log(eps + tf.square(hidden3_sigma)))
loss = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
n_epochs = 50
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="")
sys.stdout.flush()
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_val, reconstruction_loss_val, latent_loss_val = sess.run([loss, reconstruction_loss, latent_loss], feed_dict={X: X_batch})
print("\r{}".format(epoch), "Train total loss:", loss_val, "\tReconstruction loss:", reconstruction_loss_val, "\tLatent loss:", latent_loss_val)
saver.save(sess, "./my_model_variational.ckpt")
reset_graph()
from functools import partial
n_inputs = 28 * 28
n_hidden1 = 500
n_hidden2 = 500
n_hidden3 = 20 # codings
n_hidden4 = n_hidden2
n_hidden5 = n_hidden1
n_outputs = n_inputs
learning_rate = 0.001
initializer = tf.contrib.layers.variance_scaling_initializer()
my_dense_layer = partial(
tf.layers.dense,
activation=tf.nn.elu,
kernel_initializer=initializer)
X = tf.placeholder(tf.float32, [None, n_inputs])
hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, n_hidden2)
hidden3_mean = my_dense_layer(hidden2, n_hidden3, activation=None)
hidden3_gamma = my_dense_layer(hidden2, n_hidden3, activation=None)
noise = tf.random_normal(tf.shape(hidden3_gamma), dtype=tf.float32)
hidden3 = hidden3_mean + tf.exp(0.5 * hidden3_gamma) * noise
hidden4 = my_dense_layer(hidden3, n_hidden4)
hidden5 = my_dense_layer(hidden4, n_hidden5)
logits = my_dense_layer(hidden5, n_outputs, activation=None)
outputs = tf.sigmoid(logits)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=X, logits=logits)
reconstruction_loss = tf.reduce_sum(xentropy)
latent_loss = 0.5 * tf.reduce_sum(
tf.exp(hidden3_gamma) + tf.square(hidden3_mean) - 1 - hidden3_gamma)
loss = reconstruction_loss + latent_loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
"""
Explanation: Variational Autoencoder
End of explanation
"""
import numpy as np
n_digits = 60
n_epochs = 50
batch_size = 150
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
n_batches = mnist.train.num_examples // batch_size
for iteration in range(n_batches):
print("\r{}%".format(100 * iteration // n_batches), end="") # not shown in the book
sys.stdout.flush() # not shown
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch})
loss_val, reconstruction_loss_val, latent_loss_val = sess.run([loss, reconstruction_loss, latent_loss], feed_dict={X: X_batch}) # not shown
print("\r{}".format(epoch), "Train total loss:", loss_val, "\tReconstruction loss:", reconstruction_loss_val, "\tLatent loss:", latent_loss_val) # not shown
saver.save(sess, "./my_model_variational.ckpt") # not shown
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
outputs_val = outputs.eval(feed_dict={hidden3: codings_rnd})
plt.figure(figsize=(8,50)) # not shown in the book
for iteration in range(n_digits):
plt.subplot(n_digits, 10, iteration + 1)
plot_image(outputs_val[iteration])
n_rows = 6
n_cols = 10
plot_multiple_images(outputs_val.reshape(-1, 28, 28), n_rows, n_cols)
save_fig("generated_digits_plot")
plt.show()
"""
Explanation: Generate digits
Let's train the model and generate a few random digits:
End of explanation
"""
latent_loss = 0.5 * tf.reduce_sum(
tf.exp(hidden3_gamma) + tf.square(hidden3_mean) - 1 - hidden3_gamma)
"""
Explanation: Note that the latent loss is computed differently in this second variant:
End of explanation
"""
n_digits = 3
X_test, y_test = mnist.test.next_batch(batch_size)
codings = hidden3
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
codings_val = codings.eval(feed_dict={X: X_test})
"""
Explanation: Encode & Decode
Encode:
End of explanation
"""
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
outputs_val = outputs.eval(feed_dict={codings: codings_val})
"""
Explanation: Decode:
End of explanation
"""
fig = plt.figure(figsize=(8, 2.5 * n_digits))
for iteration in range(n_digits):
plt.subplot(n_digits, 2, 1 + 2 * iteration)
plot_image(X_test[iteration])
plt.subplot(n_digits, 2, 2 + 2 * iteration)
plot_image(outputs_val[iteration])
"""
Explanation: Let's plot the reconstructions:
End of explanation
"""
n_iterations = 3
n_digits = 6
codings_rnd = np.random.normal(size=[n_digits, n_hidden3])
with tf.Session() as sess:
saver.restore(sess, "./my_model_variational.ckpt")
target_codings = np.roll(codings_rnd, -1, axis=0)
for iteration in range(n_iterations + 1):
codings_interpolate = codings_rnd + (target_codings - codings_rnd) * iteration / n_iterations
outputs_val = outputs.eval(feed_dict={codings: codings_interpolate})
plt.figure(figsize=(11, 1.5*n_iterations))
for digit_index in range(n_digits):
plt.subplot(1, n_digits, digit_index + 1)
plot_image(outputs_val[digit_index])
plt.show()
"""
Explanation: Interpolate digits
End of explanation
"""
|
elektrobohemian/courses
|
ImageSimilarity_and_ClusterDemo.ipynb
|
mit
|
%matplotlib inline
import os
import tarfile as TAR
import sys
from datetime import datetime
from PIL import Image
import warnings
import json
import pickle
import zipfile
from math import *
import numpy as np
import pandas as pd
from sklearn.cluster import MiniBatchKMeans
import matplotlib.pyplot as plt
import matplotlib
# enlarge plots
plt.rcParams['figure.figsize'] = [7, 5]
import imagehash
from sklearn.preprocessing import normalize
from scipy.spatial.distance import minkowski
from scipy.spatial.distance import hamming
from scipy.stats import wasserstein_distance
from scipy.stats import spearmanr
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches, BRIEF, corner_peaks, corner_harris)
from skimage.color import rgb2gray
from skimage.io import imread,imshow
def printLog(text):
now = str(datetime.now())
print("[" + now + "]\t" + text)
# forces to output the result of the print command immediately, see: http://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
sys.stdout.flush()
def findJPEGfiles(path):
# a list for the JPEG files
jpgFilePaths=[]
for root, dirs, files in os.walk(path):
for file_ in files:
if file_.endswith(".jpg"):
# debug
# print(os.path.join(root, file_))
jpgFilePaths.append(os.path.join(root, file_))
return jpgFilePaths
outputDir= "./analysis/"
verbose=True
#general preparations, e.g., create missing output directories
if not os.path.exists(outputDir):
if verbose:
print("Creating " + outputDir)
os.mkdir(outputDir)
"""
Explanation: Similarity Measures in Multimodal Retrieval
This tutorial assumes an Anaconda 3.x installation with Python 3.6.x. Missing libraries can be installed with conda or pip.
To start with prepared data, extract the attached file analysis.zip directly below the directory of this notebook.
End of explanation
"""
!pip install ImageHash
"""
Explanation: Most likely, the ImageHash library will be missing in a typical setup. The following cell, installs the library.
End of explanation
"""
#baseDir="/Users/david/src/__datasets/orbis_pictus/sbbget_downloads_comenius/download_temp/"
baseDir="/Users/david/src/__datasets/orbis_pictus/jpegs/download_temp/"
jpgFiles=findJPEGfiles(baseDir)
# extract all features
printLog("Extracting features of %i documents..."%len(jpgFiles))
histograms=[]
# "data science" utility structures
ppnList=[]
nameList=[]
combinedHistograms=[]
combinedNormalizedHistograms=[]
jpegFilePaths=[]
pHashes=[]
for jpg in jpgFiles:
tokens=jpg.split("/")
# load an image
image = Image.open(jpg)
# bug: images are not of the same size, has to be fixed to obtain normalized histograms!!!
# q'n'd fix - brute force resizing
image=image.resize((512,512),Image.LANCZOS)
histogram = image.histogram()
histogramDict=dict()
# save its unique ID and name
histogramDict['ppn']=tokens[-3]+"/"+tokens[-2]
histogramDict['extractName']=tokens[-1]
# save the histogram data in various forms
histogramDict['redHistogram'] = histogram[0:256]
histogramDict['blueHistogram'] = histogram[256:512]
histogramDict['greenHistogram'] = histogram[512:768]
hist=np.array(histogram)
normalizedRGB = (hist)/(max(hist))
# create a perceptual hash for the image
pHashes.append(imagehash.phash(image))
image.close()
# fill the DS data structures
ppnList.append(histogramDict['ppn'])
nameList.append(histogramDict['extractName'])
combinedHistograms.append(histogramDict['redHistogram']+histogramDict['blueHistogram']+histogramDict['greenHistogram'])
combinedNormalizedHistograms.append(normalizedRGB)
jpegFilePaths.append(jpg)
printLog("Done.")
"""
Explanation: Feature Extraction
In the next step, we have to find all images that we want to use in our retrieval scenario and extract the needed histogram-based and hash-based features.
End of explanation
"""
img1=imread(jpegFilePaths[0])
img2=imread(jpegFilePaths[1388])
img3=imread(jpegFilePaths[1389])
#Creates two subplots and unpacks the output array immediately
f, (ax1, ax2,ax3) = plt.subplots(1, 3, sharex='all', sharey='all')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax1.set_title("Image #0")
ax1.imshow(img1)
ax2.set_title("Image #1388")
ax2.imshow(img2)
ax3.set_title("Image #1389")
ax3.imshow(img3)
"""
Explanation: To check whether the file search has succeeded, display some found images to get a feeling of the data we are going to deal with.
End of explanation
"""
plt.plot(combinedNormalizedHistograms[0],"r")
plt.plot(combinedNormalizedHistograms[1388],"g")
histCut=np.absolute((np.subtract(combinedNormalizedHistograms[0],combinedNormalizedHistograms[1388])))
print(np.sum(histCut))
plt.plot(histCut,"k--")
plt.title("Histogramm Difference (black) of Two Histograms")
plt.show()
"""
Explanation: Detecting Similar Documents
To get started, we will inspect the results of two fairly simply approaches, the difference and intersection of two histograms.
Histogram Difference
The histogram difference is computed between the two first images displayed above. The dashed line illustrates the actual difference between the two images' histograms.
End of explanation
"""
plt.plot(combinedNormalizedHistograms[0],"r")
plt.plot(combinedNormalizedHistograms[1388],"g")
histCut=(np.minimum(combinedNormalizedHistograms[0],combinedNormalizedHistograms[1388]))
print(np.sum(histCut))
plt.plot(histCut,"k--")
plt.title("Histogramm Intersection (black) of Two Histograms")
plt.show()
"""
Explanation: Histogram Intersection
An alternative measure is the histogram intersection which computes the "amount of change" between the two histograms.
End of explanation
"""
qbeIndex=1392#1685 # beim 1685 sind p1 und p2 am Anfang gleich
img1=imread(jpegFilePaths[qbeIndex])
#plt.title(jpegFilePaths[qbeIndex])
plt.axis('off')
imshow(img1)
"""
Explanation: Comparison of Different Similarity Measures and Metrics in a QBE Scenario
To compare the effectiveness of different similarity computations, we will use a query by example (QBE) scenario in which we check a query image (#1392) against all other images in the corpus to find the most similar ones. These found images will be displayed in form of a list ordered by relevance.
End of explanation
"""
def squareRooted(x):
return round(sqrt(sum([a*a for a in x])),3)
def cosSimilarity(x,y):
numerator = sum(a*b for a,b in zip(x,y))
denominator = squareRooted(x)*squareRooted(y)
return round(numerator/float(denominator),3)
printLog("Calculating QBE scenarios...")
qbeHist=combinedNormalizedHistograms[qbeIndex]
dataDict={"index":[],"p1":[],"p2":[],"histdiff":[],"histcut":[],"emd":[],"cosine":[],"phash":[]}
for i,hist in enumerate(combinedNormalizedHistograms):
dataDict["index"].append(i)
# Manhattan distance
dataDict["p1"].append(minkowski(qbeHist,hist,p=1))
# Euclidean distance
dataDict["p2"].append(minkowski(qbeHist,hist,p=2))
# histogram difference
histDiff=np.absolute((np.subtract(qbeHist,combinedNormalizedHistograms[i])))
dataDict["histdiff"].append(np.sum(histDiff))
# histogram cut
histCut=np.minimum(qbeHist,combinedNormalizedHistograms[i])
dataDict["histcut"].append(np.sum(histCut))
# earth mover's distance aka Wasserstein
dataDict["emd"].append(wasserstein_distance(qbeHist,hist))
# cosine similarity
dataDict["cosine"].append(cosSimilarity(qbeHist,combinedNormalizedHistograms[i]))
# pHash with Hamming distance
dataDict["phash"].append(hamming(pHashes[qbeIndex],pHashes[i]))
df=pd.DataFrame(dataDict)
printLog("Done.")
"""
Explanation: The next cell computes different measures and metrics and saves them in a dataframe.
End of explanation
"""
df.sort_values(by=['p1']).head(20).describe()
"""
Explanation: If we inspect the dataframe, we will see that each measure/metric yields different results which is not very surprising...
End of explanation
"""
measures=["p1","p2","histdiff","histcut","emd","cosine","phash"]
ranks=dict()
printLog("Creating QBE report files...")
htmlFile=open(outputDir+"_qbe.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_qbe.html")
htmlFile.write("<html><head>\n")
htmlFile.write("<link href='../css/helvetica.css' rel='stylesheet' type='text/css'>\n")
#htmlFile.write("<style>body {color: black;text-align: center; font-family: helvetica;} h1 {font-size:15px;position: fixed; padding-top:5px; top: 0;width: 100%;background: rgba(255,255,255,0.5);} h2 {font-size:15px;position: fixed; right: 0;width: 150px; padding-top:25px; padding-right:15px; background: rgba(255,255,255,0.5);} p {font-size:10px;} .score{font-size:6px; text-align: right;}")
htmlFile.write("</style></head>\n")
htmlFile.write("<h2>mir comparison.</h2>")
htmlFile.write("<table><tr>\n")
for measureName in measures:
typeOfMeasure="distance"
# check whether the measure is a similarity or a distance measure
# (assuming identity (i.e., identity of indiscernibles) of the measure)
if df[df.index==qbeIndex][measureName].tolist()[0]>0:
df2=df.sort_values(by=[measureName],ascending=False).head(20)
typeOfMeasure="similarity"
else:
df2=df.sort_values(by=[measureName],ascending=True).head(20)
typeOfMeasure="distance"
htmlFile.write("<td>\n")
measureTitle=measureName
if typeOfMeasure=="similarity":
measureTitle=measureName.replace("dist_","sim_")
htmlFile.write("<h1>"+measureTitle+"</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
ranks[measureName]=df2.index.tolist()
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for row in df2.itertuples(index=False):
i=row.index
score=getattr(row, measureName)
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
#htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
# close the HTML file
htmlFile.write("</tr></table>\n")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
"""
Explanation: To facilitate the assessment of the effectiveness of the different measures and metrics, the next cell creates an HTML overview document with the first found documents.
Sample results are available here.
End of explanation
"""
qbeIndexLocalFeat=17#qbeIndex#17 #17=Welt
img1=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)
img2=imread(jpegFilePaths[1301],as_gray=True)
img3=imread(jpegFilePaths[1671],as_gray=True)
#Creates two subplots and unpacks the output array immediately
f, (ax1, ax2,ax3) = plt.subplots(1, 3, sharex='all', sharey='all')
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax1.set_title("Query #%i"%qbeIndexLocalFeat)
ax1.imshow(img1)
ax2.set_title("Index #1301")
ax2.imshow(img2)
ax3.set_title("Index #1671")
ax3.imshow(img3)
"""
Explanation: A Local Feature - ORB
The local ORB (Oriented FAST and Rotated BRIEF) feature takes interesting regions of an image into account - the so-called keypoints. In cotrast to the presented approaches which only consider the whole image at a time and which are therefore called global features, local feature extractors search for keypoints and try to match them with the ones found in another image.
Hypothetically speaking, such features should be helpful to discover similar details in different images no matter how they differ in scale or rotation. Hence, ORB is considered relatively scale and rotation invariant.
In this section, we will investigate wheter ORB can be used to find pages in the Orbis Pictus decribing the concept of the "world" which are present in three editions of the book as displayed below.
End of explanation
"""
# extract features
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
# match features
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# visualize the results
fig, ax = plt.subplots(nrows=1, ncols=1)
plt.gray()
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)
ax.axis('off')
ax.set_title("Image 1 vs. Image 2")
"""
Explanation: To give an example, we will extract ORB features from the first two images and match them. The discovered matches will be illustrated below.
End of explanation
"""
printLog("Calculating ORB QBE scenarios...")
#qbeIndexLocalFeat
# prepare QBE image
descriptor_extractor = ORB(n_keypoints=200)
# prepare QBE image
qbeImage=imread(jpegFilePaths[qbeIndexLocalFeat],as_gray=True)
descriptor_extractor.detect_and_extract(qbeImage)
qbeKeypoints = descriptor_extractor.keypoints
qbeDescriptors = descriptor_extractor.descriptors
orbDescriptors=[]
orbMatches=[]
# match QBE image against the corpus
dataDict={"index":[],"matches_orb":[]}
for i,jpeg in enumerate(jpegFilePaths):
dataDict["index"].append(i)
compImage=imread(jpeg,as_gray=True)
descriptor_extractor.detect_and_extract(compImage)
keypoints = descriptor_extractor.keypoints
descriptors = descriptor_extractor.descriptors
orbDescriptors.append(descriptors)
matches = match_descriptors(qbeDescriptors, descriptors, cross_check=True)#,max_distance=0.5)
orbMatches.append(matches)
# naive approach: count the number of matched descriptors
dataDict["matches_orb"].append(matches.shape[0])
if i%100==0:
printLog("Processed %i documents of %i."%(i,len(jpegFilePaths)))
df=pd.DataFrame(dataDict)
printLog("Done.")
df2=df.sort_values(by=['matches_orb'],ascending=False).head(20)
df2.describe()
"""
Explanation: ATTENTION ! Depending on your computer setup, the next cell will take some time to finish. See the log below to get an estimation. The experiment has been run with with MacBook Pro (13-inch, 2018, 2,7 GHz Intel Core i7, 16 GB, and macOS Mojave).
In this naive approach, we will simply count the number of matches between the query image and each image in the corpus and use this value as a similarity score.
End of explanation
"""
printLog("Calculating Hamming distances for ORB features and calculating average distance...")
averageDistancePerImage=[]
for i,matches in enumerate(orbMatches):
# matches qbe
# matches[:, 0]
# matches document
# matches[:, 1]
qbeMatchIndices=matches[:, 0]
queryMatchIndices=matches[:, 1]
sumDistances=0.0
noMatches=len(qbeMatchIndices)
for j,qbeMatchIndex in enumerate(qbeMatchIndices):
sumDistances+=hamming(qbeDescriptors[qbeMatchIndex],orbDescriptors[i][queryMatchIndices[j]])
avgDistance=sumDistances/noMatches
averageDistancePerImage.append((avgDistance,i))
if i%100==0:
printLog("Processed %i documents of %i."%(i,len(orbMatches)))
averageDistancePerImage.sort(key=lambda tup: tup[0])
printLog("Done.\n")
# create the report files
measures=["matches_orb"]
ranks=dict()
printLog("Creating QBE ORB report files...")
htmlFile=open(outputDir+"_orb.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_orb.html")
htmlFile.write("<html><head>\n")
htmlFile.write("<link href='../css/helvetica.css' rel='stylesheet' type='text/css'>\n")
#htmlFile.write("<style>body {color: black;text-align: center; font-family: helvetica;} h1 {font-size:15px;position: fixed; padding-top:5px; top: 0;width: 100%;background: rgba(255,255,255,0.5);} h2 {font-size:15px;position: fixed; right: 0;width: 150px; padding-top:25px; padding-right:15px; background: rgba(255,255,255,0.5);} p {font-size:10px;} .score{font-size:6px; text-align: right;}")
htmlFile.write("</style></head>\n")
htmlFile.write("<h2>orb comparison.</h2>")
htmlFile.write("<table><tr>\n")
for measureName in measures:
typeOfMeasure="similarity"
htmlFile.write("<td>\n")
htmlFile.write("<h1>"+measureName+"</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
ranks[measureName]=df2.index.tolist()
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for row in df2.itertuples(index=False):
i=row.index
score=getattr(row, measureName)
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
#htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
# the non-naive approach using the average distance
htmlFile.write("<td>\n")
htmlFile.write("<h1>dist_avg_orb</h1>\n")
htmlFile.write("<p>"+typeOfMeasure+"</p>\n")
for (dist,index) in averageDistancePerImage[:20]:
typeOfMeasure="similarity"
jpegFilePathsReport=[]
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
i=index
score=dist
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePathsReport.append(outputDir+jpgPath)
if i==qbeIndex:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
else:
htmlFile.write("<img height=150 src='"+jpgPath+"' alt='"+str(i)+"'/>\n")
htmlFile.write("<p class='score'>"+str(score)+"</p>")
htmlFile.write("<p class='score'> </p>\n")
htmlFile.write("</td>\n")
#eof
# close the HTML file
htmlFile.write("</tr></table>\n")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
"""
Explanation: In a little more sophisticated approach, we will compute the average distance for each query-image pair for all matches. This value yields another similarity score.
Eventually, a HTML report file is created to compare the results of both approaches.
Sample results are available here.
End of explanation
"""
printLog("Clustering...")
X=np.array(combinedHistograms)
numberOfClusters=20
kmeans = MiniBatchKMeans(n_clusters=numberOfClusters, random_state = 0, batch_size = 6)
kmeans=kmeans.fit(X)
printLog("Done.")
printLog("Creating report files...")
htmlFiles=[]
jpegFilePaths=[]
for i in range(0,numberOfClusters):
htmlFile=open(outputDir+str(i)+".html", "w")
htmlFile.write("<html><head><link href='../css/helvetica.css' rel='stylesheet' type='text/css'></head>\n<body>\n")
#htmlFile.write("<h1>Cluster "+str(i)+"</h1>\n")
htmlFile.write("<img src='"+str(i)+".png' width=200 />") # cluster center histogram will created below
htmlFiles.append(htmlFile)
# image directory must be relative to the directory of the html files
imgBaseDir="./extracted_images/"
for i, label in enumerate(kmeans.labels_):
# create JPEG copies if not available already
tiffImage=imgBaseDir+ppnList[i]+"/"+nameList[i]
jpgPath=tiffImage.replace(".tif",".jpg")
if not os.path.exists(outputDir+jpgPath):
image = Image.open(outputDir+tiffImage)
image.thumbnail((512,512))
image.save(outputDir+jpgPath)
image.close()
os.remove(outputDir+tiffImage)
jpegFilePaths.append(outputDir+jpgPath)
htmlFiles[label].write("<img height=200 src='"+jpgPath+"' alt='"+str(len(jpegFilePaths)-1)+"'/>\n")
# close the HTML files
for h in htmlFiles:
h.write("</body></html>\n")
h.close()
# create the summarization main HTML page
htmlFile = open(outputDir+"_main.html", "w")
printLog("HTML output will be saved to: %s"%outputDir+"_main.html")
htmlFile.write("<html><head><link href='../css/helvetica.css' rel='stylesheet' type='text/css'></head><body>\n")
htmlFile.write("<h2>cluster results.</h2>\n")
for i in range(0, numberOfClusters):
htmlFile.write("<iframe src='./"+str(i)+".html"+"' height=400 ><p>Long live Netscape!</p></iframe>")
htmlFile.write("</body></html>\n")
htmlFile.close()
printLog("Done.")
# save the cluster center histograms as images to assist the visualization
printLog("Rendering %i cluster center histograms..."%len(kmeans.cluster_centers_))
for j, histogram in enumerate(kmeans.cluster_centers_):
plt.figure(0)
# clean previous plots
plt.clf()
plt.title("Cluster %i"%j)
#red
for i in range(0, 256):
plt.bar(i, histogram[i],color='red', alpha=0.3)
# blue
for i in range(256, 512):
plt.bar(i-256, histogram[i], color='blue', alpha=0.3)
# green
for i in range(512, 768):
plt.bar(i-512, histogram[i], color='green', alpha=0.3)
#debug
#plt.show()
plt.savefig(outputDir+str(j)+".png")
printLog("Done.")
"""
Explanation: Histogram-based Clustering
Sample results are available here.
End of explanation
"""
|
statsmodels/statsmodels.github.io
|
v0.13.2/examples/notebooks/generated/recursive_ls.ipynb
|
bsd-3-clause
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from pandas_datareader.data import DataReader
np.set_printoptions(suppress=True)
"""
Explanation: Recursive least squares
Recursive least squares is an expanding window version of ordinary least squares. In addition to availability of regression coefficients computed recursively, the recursively computed residuals the construction of statistics to investigate parameter instability.
The RecursiveLS class allows computation of recursive residuals and computes CUSUM and CUSUM of squares statistics. Plotting these statistics along with reference lines denoting statistically significant deviations from the null hypothesis of stable parameters allows an easy visual indication of parameter stability.
Finally, the RecursiveLS model allows imposing linear restrictions on the parameter vectors, and can be constructed using the formula interface.
End of explanation
"""
print(sm.datasets.copper.DESCRLONG)
dta = sm.datasets.copper.load_pandas().data
dta.index = pd.date_range("1951-01-01", "1975-01-01", freq="AS")
endog = dta["WORLDCONSUMPTION"]
# To the regressors in the dataset, we add a column of ones for an intercept
exog = sm.add_constant(
dta[["COPPERPRICE", "INCOMEINDEX", "ALUMPRICE", "INVENTORYINDEX"]]
)
"""
Explanation: Example 1: Copper
We first consider parameter stability in the copper dataset (description below).
End of explanation
"""
mod = sm.RecursiveLS(endog, exog)
res = mod.fit()
print(res.summary())
"""
Explanation: First, construct and fit the model, and print a summary. Although the RLS model computes the regression parameters recursively, so there are as many estimates as there are datapoints, the summary table only presents the regression parameters estimated on the entire sample; except for small effects from initialization of the recursions, these estimates are equivalent to OLS estimates.
End of explanation
"""
print(res.recursive_coefficients.filtered[0])
res.plot_recursive_coefficient(range(mod.k_exog), alpha=None, figsize=(10, 6))
"""
Explanation: The recursive coefficients are available in the recursive_coefficients attribute. Alternatively, plots can generated using the plot_recursive_coefficient method.
End of explanation
"""
print(res.cusum)
fig = res.plot_cusum()
"""
Explanation: The CUSUM statistic is available in the cusum attribute, but usually it is more convenient to visually check for parameter stability using the plot_cusum method. In the plot below, the CUSUM statistic does not move outside of the 5% significance bands, so we fail to reject the null hypothesis of stable parameters at the 5% level.
End of explanation
"""
res.plot_cusum_squares()
"""
Explanation: Another related statistic is the CUSUM of squares. It is available in the cusum_squares attribute, but it is similarly more convenient to check it visually, using the plot_cusum_squares method. In the plot below, the CUSUM of squares statistic does not move outside of the 5% significance bands, so we fail to reject the null hypothesis of stable parameters at the 5% level.
End of explanation
"""
start = "1959-12-01"
end = "2015-01-01"
m2 = DataReader("M2SL", "fred", start=start, end=end)
cpi = DataReader("CPIAUCSL", "fred", start=start, end=end)
def ewma(series, beta, n_window):
nobs = len(series)
scalar = (1 - beta) / (1 + beta)
ma = []
k = np.arange(n_window, 0, -1)
weights = np.r_[beta ** k, 1, beta ** k[::-1]]
for t in range(n_window, nobs - n_window):
window = series.iloc[t - n_window : t + n_window + 1].values
ma.append(scalar * np.sum(weights * window))
return pd.Series(ma, name=series.name, index=series.iloc[n_window:-n_window].index)
m2_ewma = ewma(np.log(m2["M2SL"].resample("QS").mean()).diff().iloc[1:], 0.95, 10 * 4)
cpi_ewma = ewma(
np.log(cpi["CPIAUCSL"].resample("QS").mean()).diff().iloc[1:], 0.95, 10 * 4
)
"""
Explanation: Example 2: Quantity theory of money
The quantity theory of money suggests that "a given change in the rate of change in the quantity of money induces ... an equal change in the rate of price inflation" (Lucas, 1980). Following Lucas, we examine the relationship between double-sided exponentially weighted moving averages of money growth and CPI inflation. Although Lucas found the relationship between these variables to be stable, more recently it appears that the relationship is unstable; see e.g. Sargent and Surico (2010).
End of explanation
"""
fig, ax = plt.subplots(figsize=(13, 3))
ax.plot(m2_ewma, label="M2 Growth (EWMA)")
ax.plot(cpi_ewma, label="CPI Inflation (EWMA)")
ax.legend()
endog = cpi_ewma
exog = sm.add_constant(m2_ewma)
exog.columns = ["const", "M2"]
mod = sm.RecursiveLS(endog, exog)
res = mod.fit()
print(res.summary())
res.plot_recursive_coefficient(1, alpha=None)
"""
Explanation: After constructing the moving averages using the $\beta = 0.95$ filter of Lucas (with a window of 10 years on either side), we plot each of the series below. Although they appear to move together prior for part of the sample, after 1990 they appear to diverge.
End of explanation
"""
res.plot_cusum()
"""
Explanation: The CUSUM plot now shows substantial deviation at the 5% level, suggesting a rejection of the null hypothesis of parameter stability.
End of explanation
"""
res.plot_cusum_squares()
"""
Explanation: Similarly, the CUSUM of squares shows substantial deviation at the 5% level, also suggesting a rejection of the null hypothesis of parameter stability.
End of explanation
"""
endog = dta["WORLDCONSUMPTION"]
exog = sm.add_constant(
dta[["COPPERPRICE", "INCOMEINDEX", "ALUMPRICE", "INVENTORYINDEX"]]
)
mod = sm.RecursiveLS(endog, exog, constraints="COPPERPRICE = ALUMPRICE")
res = mod.fit()
print(res.summary())
"""
Explanation: Example 3: Linear restrictions and formulas
Linear restrictions
It is not hard to implement linear restrictions, using the constraints parameter in constructing the model.
End of explanation
"""
mod = sm.RecursiveLS.from_formula(
"WORLDCONSUMPTION ~ COPPERPRICE + INCOMEINDEX + ALUMPRICE + INVENTORYINDEX",
dta,
constraints="COPPERPRICE = ALUMPRICE",
)
res = mod.fit()
print(res.summary())
"""
Explanation: Formula
One could fit the same model using the class method from_formula.
End of explanation
"""
|
sdaros/placeword
|
build_wordlist.ipynb
|
unlicense
|
wordlists = []
"""
Explanation: Importing our wordlists
Here we import all of our wordlists and add them to an array which me can merge at the end.
This wordlists should not be filtered at this point. However they should all contain the same columns to make merging easier for later.
End of explanation
"""
!head -n 20 de-en.txt
"""
Explanation: Dictcc
Download the dictionary from http://www.dict.cc/?s=about%3Awordlist
Print out the first 20 lines of the dictionary
End of explanation
"""
import pandas as pd
dictcc_df = pd.read_csv("de-en.txt",
sep='\t',
skiprows=8,
header=None,
names=["GermanWord","Word","WordType"])
"""
Explanation: Use pandas library to import csv file
End of explanation
"""
dictcc_df[90:100]
"""
Explanation: Preview a few entries of the wordlist
End of explanation
"""
dictcc_df = dictcc_df[["Word", "WordType"]][:].copy()
"""
Explanation: We only need "Word" and "WordType" column
End of explanation
"""
word_types = dictcc_df["WordType"].astype('category')
dictcc_df["WordType"] = word_types
# show data types of each column in the dataframe
dictcc_df.dtypes
"""
Explanation: Convert WordType Column to a pandas.Categorical
End of explanation
"""
# nltk TaggedCorpusParses requires uppercase WordType
dictcc_df["WordType"] = dictcc_df["WordType"].str.upper()
dictcc_df["WordType"].value_counts().head()
"""
Explanation: List the current distribution of word types in dictcc dataframe
End of explanation
"""
wordlists.append(dictcc_df)
"""
Explanation: Add dictcc corpus to our wordlists array
End of explanation
"""
# the readme file in `nltk/corpora/moby/mpos` gives some information on how to parse the file
result = []
# replace all DOS line endings '\r' with newlines then change encoding to UTF8
moby_words = !cat nltk/corpora/moby/mpos/mobyposi.i | iconv --from-code=ISO88591 --to-code=UTF8 | tr -s '\r' '\n' | tr -s '×' '/'
result.extend(moby_words)
moby_df = pd.DataFrame(data = result, columns = ['Word'])
moby_df.tail(10)
"""
Explanation: Moby
Download the corpus from http://icon.shef.ac.uk/Moby/mpos.html
Perform some basic cleanup on the wordlist
End of explanation
"""
# Matches nouns
nouns = moby_df[moby_df["Word"].str.contains('/[Np]$')].copy()
nouns["WordType"] = "NOUN"
# Matches verbs
verbs = moby_df[moby_df["Word"].str.contains('/[Vti]$')].copy()
verbs["WordType"] = "VERB"
# Magtches adjectives
adjectives = moby_df[moby_df["Word"].str.contains('/A$')].copy()
adjectives["WordType"] = "ADJ"
"""
Explanation: sort out the nouns, verbs and adjectives
End of explanation
"""
nouns["Word"] = nouns["Word"].str.replace(r'/N$','')
verbs["Word"] = verbs["Word"].str.replace(r'/[Vti]$','')
adjectives["Word"] = adjectives["Word"].str.replace(r'/A$','')
# Merge nouns, verbs and adjectives into one dataframe
moby_df = pd.concat([nouns,verbs,adjectives])
"""
Explanation: remove the trailing stuff and concatenate the nouns, verbs and adjectives
End of explanation
"""
wordlists.append(moby_df)
"""
Explanation: Add moby corpus to wordlists array
End of explanation
"""
wordlist = pd.concat(wordlists)
"""
Explanation: Combine all wordlists
End of explanation
"""
wordlist_filtered = wordlist[wordlist["WordType"].notnull()]
"""
Explanation: Filter for results that we want
We want to remove words that aren't associated with a type (null WordType)
End of explanation
"""
# we choose [a-z] here and not [A-Za-z] because we do _not_
# want to match words starting with uppercase characters.
# ^to matches verbs in the infinitive from `dictcc`
word_chars = r'^[a-z]+$|^to\s'
is_word_chars = wordlist_filtered["Word"].str.contains(word_chars, na=False)
wordlist_filtered = wordlist_filtered[is_word_chars]
wordlist_filtered.describe()
wordlist_filtered["WordType"].value_counts()
"""
Explanation: We want to remove words that contain non word characters (whitespace, hypens, etc.)
End of explanation
"""
lt_x_letters = (wordlist_filtered["Word"].str.len() < 9) |\
((wordlist_filtered["Word"].str.contains('^to\s\w+\s')) &\
(wordlist_filtered["Word"].str.len() < 11)\
)
wordlist_filtered = wordlist_filtered[lt_x_letters]
wordlist_filtered.describe()
"""
Explanation: We want results that are less than 'x' letters long (x+3 for verbs since they are in their infinitive form in the dictcc wordlist)
End of explanation
"""
wordlist_filtered = wordlist_filtered.drop_duplicates("Word")
wordlist_filtered.describe()
wordlist_filtered["WordType"].value_counts()
"""
Explanation: We want to remove all duplicates
End of explanation
"""
# The TaggedCorpusReader likes to use the forward slash character '/'
# as seperator between the word and part-of-speech tag (WordType).
wordlist_filtered.to_csv("dictcc_moby.csv",index=False,sep="/",header=None)
from nltk.corpus import TaggedCorpusReader
from nltk.tokenize import WhitespaceTokenizer
nltk_wordlist = TaggedCorpusReader("./", "dictcc_moby.csv")
"""
Explanation: Load our wordlists into nltk
End of explanation
"""
# Our custom wordlist
import nltk
custom_cfd = nltk.ConditionalFreqDist((tag, word) for (word, tag) in nltk_wordlist.tagged_words() if len(word) < 9 and word.isalpha)
# Brown Corpus
import nltk
brown_cfd = nltk.ConditionalFreqDist((tag, word) for (word, tag) in nltk.corpus.brown.tagged_words() if word.isalpha() and len(word) < 9)
# Merge Nouns from all wordlists
nouns = set(brown_cfd["NN"]) | set(brown_cfd["NP"]) | set(custom_cfd["NOUN"])
# Lowercase all words to remove duplicates
nouns = set([noun.lower() for noun in nouns])
print("Total nouns count: " + str(len(nouns)))
# Merge Verbs from all wordlists
verbs = set(brown_cfd["VB"]) | set(brown_cfd["VBD"]) | set(custom_cfd["VERB"])
# Lowercase all words to remove duplicates
verbs = set([verb.lower() for verb in verbs])
print("Total verbs count: " + str(len(verbs)))
# Merge Adjectives from all wordlists
adjectives = set(brown_cfd["JJ"]) | set(custom_cfd["ADJ"])
# Lowercase all words to remove duplicates
adjectives = set([adjective.lower() for adjective in adjectives])
print("Total adjectives count: " + str(len(adjectives)))
"""
Explanation: NLTK
Use NLTK to help us merge our wordlists
End of explanation
"""
def populate_degrees(nouns):
degrees = {}
nouns_copy = nouns.copy()
for latitude in range(60):
for longtitude in range(190):
degrees[(latitude,longtitude)] = nouns_copy.pop()
return degrees
def populate_minutes(verbs):
minutes = {}
verbs_copy = verbs.copy()
for latitude in range(60):
for longtitude in range(60):
minutes[(latitude,longtitude)] = verbs_copy.pop()
return minutes
def populate_seconds(adjectives):
seconds = {}
adjectives_copy = adjectives.copy()
for latitude in range(60):
for longtitude in range(60):
seconds[(latitude,longtitude)] = adjectives_copy.pop()
return seconds
def populate_fractions(nouns):
fractions = {}
nouns_copy = nouns.copy()
for latitude in range(10):
for longtitude in range(10):
fractions[(latitude,longtitude)] = nouns_copy.pop()
return fractions
def placewords(degrees,minutes,seconds,fractions):
result = []
result.append(populate_degrees(nouns).get(degrees))
result.append(populate_minutes(verbs).get(minutes))
result.append(populate_seconds(adjectives).get(seconds))
result.append(populate_fractions(nouns).get(fractions))
return "-".join(result)
# Located at 50°40'47.9" N 10°55'55.2" E
ilmenau_home = placewords((50,10),(40,55),(47,55),(9,2))
print("Feel free to stalk me at " + ilmenau_home)
"""
Explanation: Make Some Placewords Magic Happen
End of explanation
"""
|
cuttlefishh/emp
|
code/10-sequence-lookup/trading-card-latex/blast_xml_to_taxonomy.ipynb
|
bsd-3-clause
|
import pandas as pd
import numpy as np
import Bio.Blast.NCBIXML
from cStringIO import StringIO
from __future__ import print_function
# convert RDP-style lineage to Greengenes-style lineage
def rdp_lineage_to_gg(lineage):
d = {}
linlist = lineage.split(';')
for i in np.arange(0, len(linlist), 2):
d[linlist[i+1]] = linlist[i]
linstr = ''
for level in ['domain', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus']:
try:
linstr += level[0] + '__' + d[level].replace('"', '') + '; '
except:
linstr += level[0] + '__' + '; '
linstr = linstr[:-2]
return(linstr)
# parse blast xml record
def parse_record_alignments_taxonomy(record):
df = pd.DataFrame(columns=('strain', 'lineage'))
for alignment in record.alignments:
strain, lineage = alignment.hit_def.split(' ')
linstr = rdp_lineage_to_gg(lineage)
df = df.append({'strain': strain, 'lineage': linstr}, ignore_index=True)
df['species'] = [(x.split(' ')[0] + ' ' + x.split(' ')[1]).replace(';', '') for x in df.strain]
num_hits = df.shape[0]
vc_species = df.species.value_counts()
vc_lineage = df.lineage.value_counts()
return(num_hits, vc_species, vc_lineage)
# main function
def xml_to_taxonomy(path_xml, path_output):
# read file as single string, generate handle, and parse xml handle to records generator
with open(path_xml) as file:
str_xml = file.read()
handle_xml = StringIO(str_xml)
records = Bio.Blast.NCBIXML.parse(handle_xml)
# write top lineage and top 3 strains for each query
with open(path_output, 'w') as target:
# write header
target.write('query\tlineage_count\tspecies_1st_count\tspecies_2nd_count\tspecies_3rd_count\n')
# iterate over records generator
for record in records:
target.write('%s' % record.query)
try:
num_hits, vc_species, vc_lineage = parse_record_alignments_taxonomy(record)
except:
pass
try:
target.write('\t%s (%s/%s)' % (vc_lineage.index[0], vc_lineage[0], num_hits))
except:
pass
try:
target.write('\t%s (%s/%s)' % (vc_species.index[0], vc_species[0], num_hits))
except:
pass
try:
target.write('\t%s (%s/%s)' % (vc_species.index[1], vc_species[1], num_hits))
except:
pass
try:
target.write('\t%s (%s/%s)' % (vc_species.index[2], vc_species[2], num_hits))
except:
pass
target.write('\n')
"""
Explanation: author: lukethompson@gmail.com<br>
date: 16 Nov 2016<br>
language: Python 2.7<br>
license: BSD3<br>
blast_xml_to_taxonomy.ipynb
Takes the XML output of blastn (query: Deblur OTU, database: RDP Release 11, percent ID: 100%), parses it, and creates a file with the query, top RDP lineage (with number of hits having that lineage over total hits), and top-3 RDP species (with number of hits having that species over total hits).
End of explanation
"""
path_xml = '../../data/sequence-lookup/rdp-taxonomy/otu_seqs_top_500_prev.emp_deblur_90bp.subset_2k.rare_5000.xml'
path_output = 'otu_seqs_top_500_prev.emp_deblur_90bp.subset_2k.rare_5000.tsv'
xml_to_taxonomy(path_xml, path_output)
"""
Explanation: Run for 90-bp sequences (top 500 by prevalence in 90-bp biom table)
End of explanation
"""
path_xml = '../../data/sequence-lookup/rdp-taxonomy/otu_seqs_top_500_prev.emp_deblur_100bp.subset_2k.rare_5000.xml'
path_output = 'otu_seqs_top_500_prev.emp_deblur_100bp.subset_2k.rare_5000.tsv'
xml_to_taxonomy(path_xml, path_output)
"""
Explanation: Run for 100-bp sequences (top 500 by prevalence in 100-bp biom table)
End of explanation
"""
path_xml = '../../data/sequence-lookup/rdp-taxonomy/otu_seqs_top_500_prev.emp_deblur_150bp.subset_2k.rare_5000.xml'
path_output = 'otu_seqs_top_500_prev.emp_deblur_150bp.subset_2k.rare_5000.tsv'
xml_to_taxonomy(path_xml, path_output)
"""
Explanation: Run for 150-bp sequences (top 500 by prevalence in 150-bp biom table)
End of explanation
"""
|
uber/pyro
|
tutorial/source/contrib_funsor_intro_ii.ipynb
|
apache-2.0
|
from collections import OrderedDict
import functools
import torch
from torch.distributions import constraints
import funsor
from pyro import set_rng_seed as pyro_set_rng_seed
from pyro.ops.indexing import Vindex
from pyro.poutine.messenger import Messenger
funsor.set_backend("torch")
torch.set_default_dtype(torch.float32)
pyro_set_rng_seed(101)
"""
Explanation: pyro.contrib.funsor, a new backend for Pyro - Building inference algorithms (Part 2)
End of explanation
"""
import pyro.contrib.funsor
import pyroapi
from pyroapi import infer, handlers, ops, optim, pyro
from pyroapi import distributions as dist
"""
Explanation: Introduction
In part 1 of this tutorial, we were introduced to the new pyro.contrib.funsor backend for Pyro.
Here we'll look at how to use the components in pyro.contrib.funsor to implement a variable elimination inference algorithm from scratch. This tutorial assumes readers are familiar with enumeration-based inference algorithms in Pyro. For background and motivation, readers should consult the enumeration tutorial.
As before, we'll use pyroapi so that we can write our model with standard Pyro syntax.
End of explanation
"""
data = [torch.tensor(1.)] * 10
def model(data, verbose):
p = pyro.param("probs", lambda: torch.rand((3, 3)), constraint=constraints.simplex)
locs_mean = pyro.param("locs_mean", lambda: torch.ones((3,)))
locs = pyro.sample("locs", dist.Normal(locs_mean, 1.).to_event(1))
if verbose:
print("locs.shape = {}".format(locs.shape))
x = 0
for i in pyro.markov(range(len(data))):
x = pyro.sample("x{}".format(i), dist.Categorical(p[x]), infer={"enumerate": "parallel"})
if verbose:
print("x{}.shape = ".format(i), x.shape)
pyro.sample("y{}".format(i), dist.Normal(Vindex(locs)[..., x], 1.), obs=data[i])
"""
Explanation: We will be working with the following model throughout. It is a discrete-state continuous-observation hidden Markov model with learnable transition and emission distributions that depend on a global random variable.
End of explanation
"""
# default backend: "pyro"
with pyroapi.pyro_backend("pyro"):
model(data, verbose=True)
# new backend: "contrib.funsor"
with pyroapi.pyro_backend("contrib.funsor"):
model(data, verbose=True)
"""
Explanation: We can run model under the default Pyro backend and the new contrib.funsor backend with pyroapi:
End of explanation
"""
from pyro.contrib.funsor.handlers.named_messenger import NamedMessenger
class EnumMessenger(NamedMessenger):
@pyroapi.pyro_backend("contrib.funsor") # necessary since we invoke pyro.to_data and pyro.to_funsor
def _pyro_sample(self, msg):
if msg["done"] or msg["is_observed"] or msg["infer"].get("enumerate") != "parallel":
return
# We first compute a raw value using the standard enumerate_support method.
# enumerate_support returns a value of shape:
# (support_size,) + (1,) * len(msg["fn"].batch_shape).
raw_value = msg["fn"].enumerate_support(expand=False)
# Next we'll use pyro.to_funsor to indicate that this dimension is fresh.
# This is guaranteed because we use msg['name'], the name of this pyro.sample site,
# as the name for this positional dimension, and sample site names must be unique.
funsor_value = pyro.to_funsor(
raw_value,
output=funsor.Bint[raw_value.shape[0]],
dim_to_name={-raw_value.dim(): msg["name"]},
)
# Finally, we convert the value back to a PyTorch tensor with to_data,
# which has the effect of reshaping and possibly permuting dimensions of raw_value.
# Applying to_funsor and to_data in this way guarantees that
# each enumerated random variable gets a unique fresh positional dimension
# and that we can convert the model's log-probability tensors to funsor.Tensors
# in a globally consistent manner.
msg["value"] = pyro.to_data(funsor_value)
msg["done"] = True
"""
Explanation: Enumerating discrete variables
Our first step is to implement an effect handler that performs parallel enumeration of discrete latent variables. Here we will implement a stripped-down version of pyro.poutine.enum, the effect handler behind Pyro's most powerful general-purpose inference algorithms pyro.infer.TraceEnum_ELBO and pyro.infer.mcmc.HMC.
We'll do that by constructing a funsor.Tensor representing the support of each discrete latent variable and using the new pyro.to_data primitive from part 1 to convert it to a torch.Tensor with the appropriate shape.
End of explanation
"""
with pyroapi.pyro_backend("contrib.funsor"), \
EnumMessenger():
model(data, True)
"""
Explanation: Because this is an introductory tutorial, this implementation of EnumMessenger works directly with the site's PyTorch distribution since users familiar with PyTorch and Pyro may find it easier to understand. However, when using contrib.funsor to implement an inference algorithm in a more realistic setting, it is usually preferable to do as much computation as possible on funsors, as this tends to simplify complex indexing, broadcasting or shape manipulation logic.
For example, in EnumMessenger, we might instead call pyro.to_funsor on msg["fn"]:
```py
funsor_dist = pyro.to_funsor(msg["fn"], output=funsor.Real)(value=msg["name"])
enumerate_support defined whenever isinstance(funsor_dist, funsor.distribution.Distribution)
funsor_value = funsor_dist.enumerate_support(expand=False)
raw_value = pyro.to_data(funsor_value)
``
Most of the more complete inference algorithms implemented inpyro.contrib.funsorfollow this pattern, and we will see an example later in this tutorial. Before we continue, let's see what effectEnumMessenger` has on the shapes of random variables in our model:
End of explanation
"""
from pyro.contrib.funsor.handlers.named_messenger import GlobalNamedMessenger
from pyro.contrib.funsor.handlers.runtime import DimRequest, DimType
class VectorizeMessenger(GlobalNamedMessenger):
def __init__(self, size, name="_PARTICLES"):
super().__init__()
self.name = name
self.size = size
@pyroapi.pyro_backend("contrib.funsor")
def _pyro_sample(self, msg):
if msg["is_observed"] or msg["done"] or msg["infer"].get("enumerate") == "parallel":
return
# we'll first draw a raw batch of samples similarly to EnumMessenger.
# However, since we are drawing a single batch from the joint distribution,
# we don't need to take multiple samples if the site is already batched.
if self.name in pyro.to_funsor(msg["fn"], funsor.Real).inputs:
raw_value = msg["fn"].rsample()
else:
raw_value = msg["fn"].rsample(sample_shape=(self.size,))
# As before, we'll use pyro.to_funsor to register the new dimension.
# This time, we indicate that the particle dimension should be treated as a global dimension.
fresh_dim = len(msg["fn"].event_shape) - raw_value.dim()
funsor_value = pyro.to_funsor(
raw_value,
output=funsor.Reals[tuple(msg["fn"].event_shape)],
dim_to_name={fresh_dim: DimRequest(value=self.name, dim_type=DimType.GLOBAL)},
)
# finally, convert the sample to a PyTorch tensor using to_data as before
msg["value"] = pyro.to_data(funsor_value)
msg["done"] = True
"""
Explanation: Vectorizing a model across multiple samples
Next, since our priors over global variables are continuous and cannot be enumerated exactly, we will implement an effect handler that uses a global dimension to draw multiple samples in parallel from the model. Our implementation will allocate a new particle dimension using pyro.to_data as in EnumMessenger above, but unlike the enumeration dimensions, we want the particle dimension to be shared across all sample sites, so we will mark it as a DimType.GLOBAL dimension when invoking pyro.to_funsor.
Recall that in part 1 we saw that DimType.GLOBAL dimensions must be deallocated manually or they will persist until the final effect handler has exited. This low-level detail is taken care of automatically by the GlobalNameMessenger handler provided in pyro.contrib.funsor as a base class for any effect handlers that allocate global dimensions. Our vectorization effect handler will inherit from this class.
End of explanation
"""
with pyroapi.pyro_backend("contrib.funsor"), \
VectorizeMessenger(size=10):
model(data, verbose=True)
"""
Explanation: Let's see what effect VectorizeMessenger has on the shapes of the values in model:
End of explanation
"""
with pyroapi.pyro_backend("contrib.funsor"), \
VectorizeMessenger(size=10), EnumMessenger():
model(data, verbose=True)
"""
Explanation: And now in combination with EnumMessenger:
End of explanation
"""
class LogJointMessenger(Messenger):
def __enter__(self):
self.log_joint = funsor.Number(0.)
return super().__enter__()
@pyroapi.pyro_backend("contrib.funsor")
def _pyro_post_sample(self, msg):
# for Monte Carlo-sampled variables, we don't include a log-density term:
if not msg["is_observed"] and not msg["infer"].get("enumerate"):
return
with funsor.interpreter.interpretation(funsor.terms.lazy):
funsor_dist = pyro.to_funsor(msg["fn"], output=funsor.Real)
funsor_value = pyro.to_funsor(msg["value"], output=funsor_dist.inputs["value"])
self.log_joint += funsor_dist(value=funsor_value)
"""
Explanation: Computing an ELBO with variable elimination
Now that we have tools for enumerating discrete variables and drawing batches of samples, we can use those to compute quantities of interest for inference algorithms.
Most inference algorithms in Pyro work with pyro.poutine.Traces, custom data structures that contain parameters and sample site distributions and values and all of the associated metadata needed for inference computations. Our third effect handler LogJointMessenger departs from this design pattern, eliminating a tremendous amount of boilerplate in the process. It will automatically build up a lazy Funsor expression for the logarithm of the joint probability density of a model; when working with Traces, this process must be triggered manually by calling Trace.compute_log_probs() and eagerly computing an objective from the resulting individual log-probability tensors in the trace.
In our implementation of LogJointMessenger, unlike the previous two effect handlers, we will call pyro.to_funsor on both the sample value and the distribution to show how nearly all inference operations including log-probability density evaluation can be performed on funsor.Funsors directly.
End of explanation
"""
@pyroapi.pyro_backend("contrib.funsor")
def log_z(model, model_args, size=10):
with LogJointMessenger() as tr, \
VectorizeMessenger(size=size) as v, \
EnumMessenger():
model(*model_args)
with funsor.interpreter.interpretation(funsor.terms.lazy):
prod_vars = frozenset({v.name})
sum_vars = frozenset(tr.log_joint.inputs) - prod_vars
# sum over the discrete random variables we enumerated
expr = tr.log_joint.reduce(funsor.ops.logaddexp, sum_vars)
# average over the sample dimension
expr = expr.reduce(funsor.ops.add, prod_vars) - funsor.Number(float(size))
return pyro.to_data(funsor.optimizer.apply_optimizer(expr))
"""
Explanation: And finally the actual loss function, which applies our three effect handlers to compute an expression for the log-density, marginalizes over discrete variables with funsor.ops.logaddexp, averages over Monte Carlo samples with funsor.ops.add, and evaluates the final lazy expression using Funsor's optimize interpretation for variable elimination.
Note that log_z exactly collapses the model's local discrete latent variables but is an ELBO wrt any continuous latent variables, and is thus equivalent to a simple version of TraceEnum_ELBO with an empty guide.
End of explanation
"""
with pyroapi.pyro_backend("contrib.funsor"):
model(data, verbose=False) # initialize parameters
params = [pyro.param("probs").unconstrained(), pyro.param("locs_mean").unconstrained()]
optimizer = torch.optim.Adam(params, lr=0.1)
for step in range(5):
optimizer.zero_grad()
log_marginal = log_z(model, (data, False))
(-log_marginal).backward()
optimizer.step()
print(log_marginal)
"""
Explanation: Putting it all together
Finally, with all this machinery implemented, we can compute stochastic gradients wrt the ELBO.
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/70d3a0e5dfbb415abf141d93f82df981/plot_55_setting_eeg_reference.ipynb
|
bsd-3-clause
|
import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
raw.pick(['EEG 0{:02}'.format(n) for n in range(41, 60)])
"""
Explanation: Setting the EEG reference
This tutorial describes how to set or change the EEG reference in MNE-Python.
:depth: 2
As usual we'll start by importing the modules we need, loading some
example data <sample-dataset>, and cropping it to save memory. Since
this tutorial deals specifically with EEG, we'll also restrict the dataset to
just a few EEG channels so the plots are easier to see:
End of explanation
"""
# code lines below are commented out because the sample data doesn't have
# earlobe or mastoid channels, so this is just for demonstration purposes:
# use a single channel reference (left earlobe)
# raw.set_eeg_reference(ref_channels=['A1'])
# use average of mastoid channels as reference
# raw.set_eeg_reference(ref_channels=['M1', 'M2'])
"""
Explanation: Background
^^^^^^^^^^
EEG measures a voltage (difference in electric potential) between each
electrode and a reference electrode. This means that whatever signal is
present at the reference electrode is effectively subtracted from all the
measurement electrodes. Therefore, an ideal reference signal is one that
captures none of the brain-specific fluctuations in electric potential,
while capturing all of the environmental noise/interference that is being
picked up by the measurement electrodes.
In practice, this means that the reference electrode is often placed in a
location on the subject's body and close to their head (so that any
environmental interference affects the reference and measurement electrodes
similarly) but as far away from the neural sources as possible (so that the
reference signal doesn't pick up brain-based fluctuations). Typical reference
locations are the subject's earlobe, nose, mastoid process, or collarbone.
Each of these has advantages and disadvantages regarding how much brain
signal it picks up (e.g., the mastoids pick up a fair amount compared to the
others), and regarding the environmental noise it picks up (e.g., earlobe
electrodes may shift easily, and have signals more similar to electrodes on
the same side of the head).
Even in cases where no electrode is specifically designated as the reference,
EEG recording hardware will still treat one of the scalp electrodes as the
reference, and the recording software may or may not display it to you (it
might appear as a completely flat channel, or the software might subtract out
the average of all signals before displaying, making it look like there is
no reference).
Setting or changing the reference channel
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you want to recompute your data with a different reference than was used
when the raw data were recorded and/or saved, MNE-Python provides the
:meth:~mne.io.Raw.set_eeg_reference method on :class:~mne.io.Raw objects
as well as the :func:mne.add_reference_channels function. To use an
existing channel as the new reference, use the
:meth:~mne.io.Raw.set_eeg_reference method; you can also designate multiple
existing electrodes as reference channels, as is sometimes done with mastoid
references:
End of explanation
"""
raw.plot()
"""
Explanation: If a scalp electrode was used as reference but was not saved alongside the
raw data (reference channels often aren't), you may wish to add it back to
the dataset before re-referencing. For example, if your EEG system recorded
with channel Fp1 as the reference but did not include Fp1 in the data
file, using :meth:~mne.io.Raw.set_eeg_reference to set (say) Cz as the
new reference will then subtract out the signal at Cz without restoring
the signal at Fp1. In this situation, you can add back Fp1 as a flat
channel prior to re-referencing using :func:~mne.add_reference_channels.
(Since our example data doesn't use the 10-20 electrode naming system_, the
example below adds EEG 999 as the missing reference, then sets the
reference to EEG 050.) Here's how the data looks in its original state:
End of explanation
"""
# add new reference channel (all zero)
raw_new_ref = mne.add_reference_channels(raw, ref_channels=['EEG 999'])
raw_new_ref.plot()
"""
Explanation: By default, :func:~mne.add_reference_channels returns a copy, so we can go
back to our original raw object later. If you wanted to alter the
existing :class:~mne.io.Raw object in-place you could specify
copy=False.
End of explanation
"""
# set reference to `EEG 050`
raw_new_ref.set_eeg_reference(ref_channels=['EEG 050'])
raw_new_ref.plot()
"""
Explanation: .. KEEP THESE BLOCKS SEPARATE SO FIGURES ARE BIG ENOUGH TO READ
End of explanation
"""
# use the average of all channels as reference
raw_avg_ref = raw.copy().set_eeg_reference(ref_channels='average')
raw_avg_ref.plot()
"""
Explanation: Notice that the new reference (EEG 050) is now flat, while the original
reference channel that we added back to the data (EEG 999) has a non-zero
signal. Notice also that EEG 053 (which is marked as "bad" in
raw.info['bads']) is not affected by the re-referencing.
Setting average reference
^^^^^^^^^^^^^^^^^^^^^^^^^
To set a "virtual reference" that is the average of all channels, you can use
:meth:~mne.io.Raw.set_eeg_reference with ref_channels='average'. Just
as above, this will not affect any channels marked as "bad", nor will it
include bad channels when computing the average. However, it does modify the
:class:~mne.io.Raw object in-place, so we'll make a copy first so we can
still go back to the unmodified :class:~mne.io.Raw object later:
End of explanation
"""
raw.set_eeg_reference('average', projection=True)
print(raw.info['projs'])
"""
Explanation: Creating the average reference as a projector
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If using an average reference, it is possible to create the reference as a
:term:projector rather than subtracting the reference from the data
immediately by specifying projection=True:
End of explanation
"""
for title, proj in zip(['Original', 'Average'], [False, True]):
fig = raw.plot(proj=proj, n_channels=len(raw))
# make room for title
fig.subplots_adjust(top=0.9)
fig.suptitle('{} reference'.format(title), size='xx-large', weight='bold')
"""
Explanation: Creating the average reference as a projector has a few advantages:
It is possible to turn projectors on or off when plotting, so it is easy
to visualize the effect that the average reference has on the data.
If additional channels are marked as "bad" or if a subset of channels are
later selected, the projector will be re-computed to take these changes
into account (thus guaranteeing that the signal is zero-mean).
If there are other unapplied projectors affecting the EEG channels (such
as SSP projectors for removing heartbeat or blink artifacts), EEG
re-referencing cannot be performed until those projectors are either
applied or removed; adding the EEG reference as a projector is not subject
to that constraint. (The reason this wasn't a problem when we applied the
non-projector average reference to raw_avg_ref above is that the
empty-room projectors included in the sample data :file:.fif file were
only computed for the magnetometers.)
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.14/_downloads/plot_raw_objects.ipynb
|
bsd-3-clause
|
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
"""
Explanation: .. _tut_raw_objects
The :class:Raw <mne.io.RawFIF> data structure: continuous data
End of explanation
"""
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
"""
Explanation: Continuous data is stored in objects of type :class:Raw <mne.io.RawFIF>.
The core data structure is simply a 2D numpy array (channels × samples,
._data) combined with an :class:Info <mne.io.meas_info.Info> object
(.info) (:ref:tut_info_objects.
The most common way to load continuous data is from a .fif file. For more
information on :ref:loading data from other formats <ch_raw>, or creating
it :ref:from scratch <tut_creating_data_structures>.
Loading continuous data
End of explanation
"""
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
"""
Explanation: Information about the channels contained in the :class:Raw <mne.io.RawFIF>
object is contained in the :class:Info <mne.io.meas_info.Info> attribute.
This is essentially a dictionary with a number of relevant fields (see
:ref:tut_info_objects).
Indexing data
There are two ways to access the data stored within :class:Raw
<mne.io.RawFIF> objects. One is by accessing the underlying data array, and
the other is to index the :class:Raw <mne.io.RawFIF> object directly.
To access the data array of :class:Raw <mne.io.Raw> objects, use the
_data attribute. Note that this is only present if preload==True.
End of explanation
"""
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
"""
Explanation: You can also pass an index directly to the :class:Raw <mne.io.RawFIF>
object. This will return an array of times, as well as the data representing
those timepoints. This may be used even if the data is not preloaded:
End of explanation
"""
# Pull all MEG gradiometer channels:
# Make sure to use copy==True or it will overwrite the data
meg_only = raw.pick_types(meg=True, copy=True)
eeg_only = raw.pick_types(meg=False, eeg=True, copy=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.pick_types(meg='grad', copy=True)
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.pick_channels(pick_chans, copy=True)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
"""
Explanation: Selecting subsets of channels and samples
It is possible to use more intelligent indexing to extract data, using
channel names, types or time ranges.
End of explanation
"""
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
"""
Explanation: Notice the different scalings of these types
End of explanation
"""
restricted = raw.crop(5, 7) # in seconds
print('New time range from', restricted.times.min(), 's to',
restricted.times.max(), 's')
"""
Explanation: You can restrict the data to a specific time range
End of explanation
"""
restricted = restricted.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', raw.info['nchan'], 'to',
restricted.info['nchan'])
"""
Explanation: And drop channels by name
End of explanation
"""
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 100)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
"""
Explanation: Concatenating :class:Raw <mne.io.RawFIF> objects
:class:Raw <mne.io.RawFIF> objects can be concatenated in time by using the
:func:append <mne.io.RawFIF.append> function. For this to work, they must
have the same number of channels and their :class:Info
<mne.io.meas_info.Info> structures should be compatible.
End of explanation
"""
|
yw-fang/readingnotes
|
machine-learning/handson_scikitlearn_tf_2017/ch01-notebook.ipynb
|
apache-2.0
|
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42) # I don't understand this line very much!
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "fundamentals"
# prepare a function to save figures
def save_fig(fig_id, tight_layout=True, dpi=300):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=dpi)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
"""
Explanation: Ch01
What is machine learning
There are seveal different definitions of ML.
For me, my favouriate definition comes from Tom Mitchell, 1997 in a engineering-oriented way:
A computer program is said to learn from experience E with respect to some task T and some performance
measure P, if its performance on T, as measured by P, improves with experice E.
Why use machine learning
Machine learning is great for:
comlex problem/fluctuating environments/large data/no good solution using traditionl method
Types of Machine Learning Systems
We can classify them in borad categories based on:
whether or not they are trained with human supervison (supervise, unsupervised, semisupervised, reinforcement)
Supervised learning: traning data includins the desired solutions, called labels. 邮件的垃圾邮件分类是一个非常好的supervised learning的例子。在训练集中,每一个sample(邮件)都被标记为spam或者ham,这就是labels,而这个过程的最终结果就是让邮箱系统能够自己将邮件进行分类。
另一个典型的任务是去预测一个目标数值,即regression问题。例如预测一辆车的价格:给定一些列的汽车features(mileage,age,brand等等),这些特征被称作 predictors。为了实现训练目标,我们需要一个包含许多案例的数据集,其中不仅包含了这些predictors,还有相对应的labels(这里,就是价格)。
注意 attribute 和 feature 的区别:尽管很多人经常相互替换地使用这两个词。但是严格地说,在机器学习中,attribute 是一种data type,例如 Mileage。然而 feature 则可以根据不同上下文关系有不同的含义,但通常意义上来说,feature 是 attribute 加上它的 value,例如 Mileage = 15000.
注意 feature 和 preditors 的区别:在我看来,predictors 一定属于feature,但是feature不一定就是predictor。只有当你把这个feature用来作为训练时,这个feature才叫做predictor
Unsupervised learning: 没有labels的机器学习。
Semisupervised learning: partially labeled traning data
Reinforcement learning: it is a very different beast (野兽). The learning system, called an agent in this context,
can observe the environment, slect, and perform actions, and get rewards (or the nagative rewards, penalties). It must then learn by itself what is the best strategy, called a policy, to get the most reward over time. A policy defines what action the agent should choose when it is in a given situation.
whether or not they can incrementally on the fly (online versus batch learning)
batch learning: incapable of learning incrementally; offline learning
online learning: learning incarementally; Note tahat the who process is usually done offline (i.e., not on the live system), so online learning can be a confusing name. Think of it as incremental learning.
whether they work by simply comparing new data points to known data points, or instead detect patterns in the traning data and build a predictive model, much like scientists do (instance-based versus model-based learning).
Example 1-1
Model-based learning
Make some preparations for the environment
End of explanation
"""
# Define a function
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
"""
Explanation: Author's Note
Author's note: This function just merges the OECD's life satisfaction data and the IMF's GDP per capita data. It's a bit too long and boring and it's not specific to Machine Learning, which is why I left it out of the book.
Prepare a function which merges the data into DataFrame
End of explanation
"""
# Code example
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import os
# Load the data thousands : str, default None, 千分位分割符,如“,”或者“."
datapath = os.path.join("datasets", "lifesat", "")
oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',')
gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',
delimiter='\t',
encoding='latin1', na_values="n/a")
# Prepare the data
country_stats = prepare_country_stats(oecd_bli, gdp_per_capita)
# print(type(country_stats)) # country_stats is a DataFrame.
# print(country_stats.head(10))
xdata = np.c_[country_stats["GDP per capita"]]
# print(xdata[:10]) # xdata and ydata are python lists.
ydata = np.c_[country_stats["Life satisfaction"]]
# print(ydata[:10])
# Visualize the data by scatter
country_stats.plot(kind='scatter', s=80, color='red',
x="GDP per capita", y = "Life satisfaction")
# Select a linear model
model = sklearn.linear_model.LinearRegression()
# Train the model
model.fit(xdata, ydata)
# Get the optimized paraters for the model
k, b = model.intercept_[0], model.coef_[0][0]
print(k, b)
"""
Explanation: I just tweaked the data files here to fetch the files in datasets/lifesat.
Plot the data pints with scatters
End of explanation
"""
country_stats.plot(kind='scatter', s=80, color='red',
x="GDP per capita", y = "Life satisfaction")
# plot the best fit, k, b can be found in the output of previous cell
b = 4.853052800266436
k = 4.911544589158484E-5
x_tmp = np.linspace(0, 60000, 1000)
plt.plot(x_tmp, b + k*x_tmp, "blue")
# print(x_tmp)
"""
Explanation: Plot the best fit
End of explanation
"""
# Make a prediction for Cyprus
x_new = [[22587]] # Cyprus's GDP per capita
print("Life satisfaction of Cyprus is", model.predict(x_new))
Life satisfaction
"""
Explanation: Predict Life satisfaction for a new instance
End of explanation
"""
# Select a linear model
# from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KNeighborsRegressor
clf = KNeighborsRegressor(n_neighbors=3)
# Train the model
clf.fit(xdata, ydata)
# Make a prediction for Cyprus
x_new = [[22587]] # Cyprus's GDP per capita
print(clf.predict(x_new))
"""
Explanation: Apply instance-based learning into the Example1-1
In the following cell, I use $K$-Nearest Neighbours regression model to train the data. It's a widely used instance-based learning algorithm.
In this example, I will use $K$ = 3.
End of explanation
"""
10**9.5
"""
Explanation: If it (instance-based learning) goes well, our model will make good predictions, however, if it doesn't work well, we need to use more attributes like employment rate, healty, air poluttion, etc, in other words, we need get more data in good quality, or perhaps select a more powerful model like Polynominal Regression model.
In Summary, a machine learning project usually look like
We stuided the data
We slected a model
We trained it on the training data (i.e. the learning algorithm searched for the model parameter values that minimize a cost function).
Finnaly, we applied the model to make predictions on new cases (this is called $inference$), hoping that this model will generalize well.
The challenges of Machine Learning
The things may make your machine learning project into a mess is 'bad algorithm' and 'bad data'.
We usually need achive a tradeoff between the data development and spending money and time on algorithm development.
Data part
Insufficient quality of training data
Nonrepresentative training data
Check whether your dataset has sampling bias/sampling noise or not.
Poor-quality data
It's often well worth the effort to spend time cleaning up the training data. The truth is most data scientists spend a significant part of their time doing just that. For example:
If some instances are clearly outliers, it may help to simply discard them or try to fix the errors manually
If some instances are missing a few features (e.g., 50% of the customers did not specify their age), you msut decide whether you want to ignore this attibute altogether, ignore these instanxes, fill in the missing values (e.g., wthe the median age), or train one model with the feature and one model without it, and so on)
Irrelevant Features
including too many irrelevant features will influce machine learning results. Hence, feature engineering is quite important, and it usually involves
Feature selection: choose the most useful features to train on aming existing features
Feature extraction: combining exsisting featuers to produce a more useful one (dimensionality reduction algorithms can help)
Creating new features by gathering new data
Algorithm part
Overfitting the traning data
Overfitting: model performs well on the training data, but does not generalize well. This usually happens when the model is highly complex with respect to the amount and noisiness of the training data.
To solve this problem, we have some solutions that we can try:
To simplify the model
To get more good data
To reduce the noise in the exsiting training data (fix or remove some bad data)
Constraining a model to make it simpler and reduce the risk of overfitting: regularization.
To simplify a model, we can use other models instead of it, or we can simplify it by constraining it. The latter (constraining model) is called regularization
The amount of regularization to apply during learning can be controlled by a hyperparameter which is set before training and will not be influed by the learning algorithm.
Underfitting the traning data
Underfitting is the opposite of overfitting: it occurs when your model is too simple to learn the underlying structure of the data.
The iptions to fix underfitting:
select a more powerful model
feeding beter features
reducing the constraints on the model (e.g. reducng the regularization hyperparameter)
Stepping back
Let us step back and look at the big picture:
In a ML project, we feed the training set to a learning algorithm. If the algorithm is model-based it tunes some parameters to fit the model to the training set, and we will hope it be able to make good predictions on new cases as well. If the algorithm is instance-based, it just learns the examples by heart and uses a similarity measure to generalize to new instances.
Testing and validating
We train the model using the training set and test the model using test set. The error rate on new cases is called the 'generalization error' or 'out-of-sample error', and by evaluating our model on the test set, we can estimate the error. This value indicate how well our model will perform on new instances.
In general, 80% of the data for training, and hold out 20% for testing.
However, sometimes we may find that even the generalization error in the test set is quite low, it still performs bad in the new instances out of the dataset, the solution is to have a second holdout set called the 'validation set'.
To avoid 'wasting' too much training data in validation sets, a common technique is to use cross-validation: the training set is split into complementary subsets, and each model is trained against a different combination of these subsets and validated against the ramaining parts. Once the model type and hyperparameters have been selected, a final model is trained using these hyperparameters on the full training set, and the generalized error is measured on the test set.
-------------No Free Lunch Theorem-------------------
A model is a simplied version of the observations. In the famous paper 'The lack of a priori distinctions between learning algorithms' by D. Wolperts in 1996, it demonsted that if you make absolutedly no assumption about the data, then there is no reason to prefer one model over any other. This is called the 'No Free Lunch Theorem'.
End of explanation
"""
%matplotlib inline
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
mpl.rcParams['figure.facecolor'] = '1'
#if choose the grey backgroud, use 0.75
mpl.rcParams['figure.figsize'] = [10,8/1.618]
mpl.rcParams['lines.linewidth'] = 2.2
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.fontsize'] = 19
mpl.rcParams['legend.scatterpoints'] = 1 #scatterpoints,
mpl.rcParams["axes.formatter.useoffset"]=False #turn off the axis offset-values.
# If on. the axis label will use a offset value by the side of axis
mpl.rcParams["axes.linewidth"] = 2.5 #change the boarder width
#plt.rcParams["axes.edgecolor"] = "0.15" #change the boarder color
ticklabel_size = 22
mpl.rcParams['xtick.labelsize'] = ticklabel_size
mpl.rcParams['ytick.labelsize'] = ticklabel_size
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame
import sklearn
########################Load data########################
# Define a function used for merging data
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
return full_country_stats[["GDP per capita", 'Life satisfaction']]
# remove_indices = [0, 1, 6, 8, 33, 34, 35]
# keep_indices = list(set(range(36)) - set(remove_indices))
# return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
datapath = os.path.join("datasets", "lifesat", "")
oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',')
gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',
delimiter='\t',
encoding='latin1', na_values="n/a")
# Prepare the data
print(keep_indices)
full_data = prepare_country_stats(oecd_bli, gdp_per_capita)
# devide the data into two part. I will compare the different learning by using partial data and all data
sample_data = full_data.iloc[keep_indices]
supplementary_data = full_data.iloc[remove_indices]
print(type(sample_data))
#################training#########################
## Method 1: use sample_data for training, use LinearRegression algorithm
#choose data
xdata_sample = np.c_[sample_data['GDP per capita']]
ydata_sample = np.c_[sample_data['Life satisfaction']]
#choose model
linear_model1 = sklearn.linear_model.LinearRegression()
# train the model
linear_model1.fit(xdata_sample, ydata_sample)
# Get the optimized paraters for the model
b1, k1 = linear_model1.intercept_[0], linear_model1.coef_[0][0]
print(k1,b1)
## Method 2: use full_data for training, use LinearRegression algorithm
# choose data
xdata_full = np.c_[full_data['GDP per capita']]
ydata_full = np.c_[full_data['Life satisfaction']]
# choose model, use the same model as method 1
linear_model2 = sklearn.linear_model.LinearRegression()
# train the model
linear_model2.fit(xdata_full, ydata_full)
# Get the optimized paraters for the model
b2, k2 = linear_model2.intercept_[0], linear_model2.coef_[0][0]
print(k2,b2)
## Method 3: use sample_data for training, perform "Regularization" to reduce the risk of overfitting
# choose data, here, I use the data as Method 1
# choose model, use the Ridge regularization method
ridge_model = sklearn.linear_model.Ridge(alpha=10**9.5)
# train the model
ridge_model.fit(xdata_sample, ydata_sample)
b3, k3 = ridge_model.intercept_[0], ridge_model.coef_[0][0]
## Method 3-1:
ridge_model_1 = sklearn.linear_model.Ridge(alpha=20**9.5)
ridge_model_1.fit(xdata_sample, ydata_sample)
b4, k4 = ridge_model_1.intercept_[0], ridge_model_1.coef_[0][0]
#################Ploting part#####################
fig1 = plt.figure()
ax1=fig1.add_subplot(1,1,1)
# plot all the data using scatters
ax1.plot(sample_data['GDP per capita'], sample_data['Life satisfaction'], 'o',color='red',markersize=15,
label='sample dataset')
ax1.plot(supplementary_data['GDP per capita'], supplementary_data['Life satisfaction'], '<', color='blue',
markersize=15, label='supplementary dataset')
# plot the fitting by Method 1
# XX = np.linspace(0, 120000, 1000)
XX = np.linspace(0, 110000, 1000)
ax1.plot(XX, k1*XX+b1, "b-", label='Linear model on sample data')
# plot the fitting by Method 2
ax1.plot(XX, k2*XX+b2, "b--", label='Linear model on all the data')
# plot the fitting by Method 3: apply the L2 regularization
ax1.plot(XX, k3*XX+b3, "-", color='orange', label="Regularization on sample data")
# plot the fitting by Method 3-1: apply the L2 regularization, but with a very large value of alpha
ax1.plot(XX, k4*XX+b4, "--", color='orange', label=r"Regularization on sample data with laerger $\alpha$")
ax1.legend(loc='best')
plt.show()
"""
Explanation: Example code of using Regularization to reduce the risk of overfitting
End of explanation
"""
|
mrcinv/matpy
|
03c_bisekcija.ipynb
|
gpl-2.0
|
f = lambda x: x-2**(-x)
a,b=(0,1) # začetni interval
(f(a),f(b))
"""
Explanation: ^ gor: Uvod
Reševanje enačb z bisekcijo
Vsako enačbo $l(x)=d(x)$ lahko prevedemo na iskanje ničle funkcije
$$f(x)=l(x)-d(x)=0.$$
Ničlo zvezne funkcije lahko zanesljivo poiščemo z bisekcijo. Ideja je preprosta. Če so vrednosti funkcije v krajiščih intervala $[a,b]$ različnega predznaka, potem znotraj intervala $(a,b)$ zagotovo leži ničla ($x\in(a,b)$, za katerega je $f(x)=0$).
Recimo, da je $f(a)>0$ in $f(b)<0$. Če izračunamo vrednost funkcije v središču intervala $c=\frac{1}{2}(a+b)$, lahko interval, za katerega vemo, da vsebuje ničlo, zmanjšamo na polovico.
Če je $f(c)=0$, smo ničlo že našli in lahko prenehamo z iskanjem.
Če je $f(c)<0$, potem je ničla zagotovo na intervalu $(a,c)$,
če pa je $f(c)>0$, je ničla zagotovo na intervalu $(c,b)$.
Če opisani postopek ponavljamo dovolj dolgo, lahko Interval, ki vsebuje ničlo, poljubno zmanjšamo,
Primer
Reši enačbo
$$x=2^{-x}.$$
Rešitev
Enačbo lahko prevedemo na iskanje ničle funkcije
$$f(x) = x-2^{-x}.$$
Najprej poiščemo interval za katerega smo prepričani, da vsebuje ničlo. Iščemo dve vrednosti $x$, za katere ima $f(x)$ različni predznak.
End of explanation
"""
def bisekcija(f,a,b,n):
"""bisekcija(f,a,b,n) z bisekcijo izračuna interval širine (b-a)/2**n na katerem leži ničla funkcije f."""
if n<=0:
return (a,b)
fa, fb = (f(a),f(b))
assert (fa*fb)<=0, "Predznaka v krajiščih intervala [%f,%f] sta enaka" % (a,b)
c = (a+b)/2 # središče intervala
fc = f(c)
if fc == 0:
return (c,c)
elif fc*fa<=0:
return bisekcija(f,a,c,n-1)
else:
return bisekcija(f,c,b,n-1)
a,b = (0,1)
# 10 korakov bisekcije
for i in range(10):
print(bisekcija(f,a,b,i))
"""
Explanation: Vidimo, da sta $f(0)$ in $f(1)$ različnega predznaka, kar pomeni, da je na intervalu $(0,1)$ ničla.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
a , b = (0,1)
t = np.linspace(a,b)
plt.plot(t,f(t))
plt.plot([0,1],[0,0],'k')
for i in range(6):
plt.plot([a,a],[0,f(a)],'r-o') # levo krajišče
plt.plot([b,b],[0,f(b)],'k-o') # desno krajišče
plt.annotate("$a_%d$" % i, xy = (a,0),xytext = (a,0.07*(i+1)),fontsize=12)
plt.annotate("$b_%d$" % i, xy = (b,0),xytext = (b,-0.07*(i+1)),fontsize=12)
a,b = bisekcija(f,a,b,1)
plt.grid()
import disqus
%reload_ext disqus
%disqus matpy
"""
Explanation: Predstavimo bisekcijo še grafično.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/production_ml/labs/tfdv_basic_spending.ipynb
|
apache-2.0
|
!pip install pyarrow==5.0.0
!pip install numpy==1.19.2
!pip install tensorflow-data-validation
"""
Explanation: Introduction to TensorFlow Data Validation
Learning Objectives
Review TFDV methods
Generate statistics
Visualize statistics
Infer a schema
Update a schema
Introduction
This lab is an introduction to TensorFlow Data Validation (TFDV), a key component of TensorFlow Extended. This lab serves as a foundation for understanding the features of TFDV and how it can help you understand, validate, and monitor your data.
TFDV can be used for generating schemas and statistics about the distribution of every feature in the dataset. Such information is useful for comparing multiple datasets (e.g. training vs inference datasets) and reporting:
Statistical differences in the features distribution
TFDV also offers visualization capabilities for comparing datasets based on the Google PAIR Facets project.
Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the Solution Notebook for reference.
Import Libraries
End of explanation
"""
import pandas as pd
import tensorflow_data_validation as tfdv
import sys
import warnings
warnings.filterwarnings('ignore')
print('Installing TensorFlow Data Validation')
!pip install -q tensorflow_data_validation[visualization]
print('TFDV version: {}'.format(tfdv.version.__version__))
# Confirm that we're using Python 3
assert sys.version_info.major is 3, 'Oops, not running Python 3. Use Runtime > Change runtime type'
"""
Explanation: Restart the kernel (Kernel > Restart kernel > Restart).
Re-run the above cell and proceed further.
Note: Please ignore any incompatibility warnings and errors.
End of explanation
"""
# Read a comma-separated values (csv) file into DataFrame.
# TODO: Your code goes here
score_train.head()
# Read a comma-separated values (csv) file into DataFrame.
# TODO: Your code goes here
score_test.head()
score_train.info()
"""
Explanation: Load the Consumer Spending Dataset
We will download our dataset from Google Cloud Storage. The columns in the dataset are:
'Graduated': Whether or not the person is a college graduate
'Work Experience': The number of years in the workforce
'Family Size': The size of the family unit
'Spending Score': The spending score for consumer spending
End of explanation
"""
# check methods present in tfdv
# TODO: Your code goes here
"""
Explanation: Review the methods present in TFDV
End of explanation
"""
# Compute data statistics for the input pandas DataFrame.
# TODO: Your code goes here
"""
Explanation: Describing data with TFDV
The usual workflow when using TFDV during training is as follows:
Generate statistics for the data
Use those statistics to generate a schema for each feature
Visualize the schema and statistics and manually inspect them
Update the schema if needed
Compute and visualize statistics
First we'll use tfdv.generate_statistics_from_csv to compute statistics for our training data. (ignore the snappy warnings)
TFDV can compute descriptive statistics that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions.
Internally, TFDV uses Apache Beam's data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation.
NOTE: Compute statistics
* tfdv.generate_statistics_from_csv
* tfdv.generate_statistics_from_dataframe
* tfdv.generate_statistics_from_tfrecord
Generate Statistics from a Pandas DataFrame
End of explanation
"""
# Visualize the input statistics using Facets.
# TODO: Your code goes here
"""
Explanation: Now let's use tfdv.visualize_statistics, which uses Facets to create a succinct visualization of our training data:
Notice that numeric features and categorical features are visualized separately, and that charts are displayed showing the distributions for each feature.
Notice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature.
Notice that there are no examples with values for pickup_census_tract. This is an opportunity for dimensionality reduction!
Try clicking "expand" above the charts to change the display
Try hovering over bars in the charts to display bucket ranges and counts
Try switching between the log and linear scales, and notice how the log scale reveals much more detail about the payment_type categorical feature
Try selecting "quantiles" from the "Chart to show" menu, and hover over the markers to show the quantile percentages
End of explanation
"""
train_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_train)
test_stats = tfdv.generate_statistics_from_dataframe(dataframe=score_test)
tfdv.visualize_statistics(
lhs_statistics=train_stats, lhs_name='TRAIN_DATASET',
rhs_statistics=test_stats, rhs_name='NEW_DATASET')
"""
Explanation: TFDV generates different types of statistics based on the type of features.
For numerical features, TFDV computes for every feature:
* Count of records
* Number of missing (i.e. null values)
* Histogram of values
* Mean and standard deviation
* Minimum and maximum values
* Percentage of zero values
For categorical features, TFDV provides:
* Count of values
* Percentage of missing values
* Number of unique values
* Average string length
* Count for each label and its rank
Let's compare the score_train and the score_test datasets
End of explanation
"""
# Infers schema from the input statistics.
# TODO: Your code goes here
print(schema)
"""
Explanation: Infer a schema
Now let's use tfdv.infer_schema to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics.
Getting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct.
Generating Schema
Once statistics are generated, the next step is to generate a schema for our dataset. This schema will map each feature in the dataset to a type (float, bytes, etc.). Also define feature boundaries (min, max, distribution of values and missings, etc.).
Link to infer schema
https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema
With TFDV, we generate schema from statistics using
End of explanation
"""
tfdv.display_schema(schema=schema)
"""
Explanation: The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use tfdv.display_schema to display the inferred schema so that we can review it.
End of explanation
"""
# Update Family_Size from FLOAT to Int
Graduated_feature = tfdv.get_feature(schema, 'Graduated')
Graduated_feature.presence.min_fraction = 1.0
Profession_feature = tfdv.get_feature(schema, 'Profession')
Profession_feature.presence.min_fraction = 1.0
Family_Size_feature = tfdv.get_feature(schema, 'Family_Size')
Family_Size_feature.presence.min_fraction = 1.0
tfdv.display_schema(schema)
"""
Explanation: TFDV provides a API to print a summary of each feature schema using
In this visualization, the columns stand for:
Type indicates the feature datatype.
Presence indicates whether the feature must be present in 100% of examples (required) or not (optional).
Valency indicates the number of values required per training example.
Domain and Values indicates The feature domain and its values
In the case of categorical features, single indicates that each training example must have exactly one category for the feature.
Updating the Schema
As stated above, Presence indicates whether the feature must be present in 100% of examples (required) or not (optional). Currently, all of our features except for our target label are shown as "optional". We need to make our features all required except for "Work Experience". We will need to update the schema.
TFDV lets you update the schema according to your domain knowledge of the data if you are not satisfied by the auto-generated schema. We will update three use cases: Making a feature required, adding a value to a feature, and change a feature from a float to an integer.
Change optional features to required.
End of explanation
"""
Profession_domain = tfdv.get_domain(schema, 'Profession')
Profession_domain.value.insert(0, 'Self-Employed')
Profession_domain.value
# [0 indicates I want 'Self-Employed to come first', if the number were 3,
# it would be placed after the third value. ]
"""
Explanation: Update a feature with a new value
Let's add "self-employed" to the Profession feature
End of explanation
"""
Profession_domain = tfdv.get_domain(schema, 'Profession')
Profession_domain.value.remove('Homemaker')
Profession_domain.value
"""
Explanation: Let's remove "Homemaker" from "Profession"
End of explanation
"""
# Update Family_Size to Int
size = tfdv.get_feature(schema, 'Family_Size')
size.type=2
tfdv.display_schema(schema)
"""
Explanation: Change a feature from a float to an integer
End of explanation
"""
|
bwgref/nustar_pysolar
|
notebooks/20200912/Planning 20200912.ipynb
|
mit
|
fname = io.download_occultation_times(outdir='../data/')
print(fname)
"""
Explanation: Download the list of occultation periods from the MOC at Berkeley.
Note that the occultation periods typically only are stored at Berkeley for the future and not for the past. So this is only really useful for observation planning.
End of explanation
"""
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
"""
Explanation: Download the NuSTAR TLE archive.
This contains every two-line element (TLE) that we've received for the whole mission. We'll expand on how to use this later.
The times, line1, and line2 elements are now the TLE elements for each epoch.
End of explanation
"""
tstart = '2020-09-12T08:30:00'
tend = '2020-09-13T01:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
orbits
# Get the solar parameter
from sunpy.coordinates import sun
angular_size = sun.angular_radius(t='now')
dx = angular_size.arcsec
print(dx)
offset = [-dx, 0]*u.arcsec
for ind, orbit in enumerate(orbits):
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.3f} Dec (deg): {sky_pos[1]:8.3f}')
print("")
"""
Explanation: Here is where we define the observing window that we want to use.
Note that tstart and tend must be in the future otherwise you won't find any occultation times and sunlight_periods will return an error.
End of explanation
"""
from importlib import reload
reload(planning)
pa = planning.get_nustar_roll(tstart, 0)
print(tstart)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
# We're actually using a SKY PA of 340. So...we'll need to rotate
target_pa = 150
extra_roll = ( 150 - pa.value ) * u.deg
print(f'Extra roll used: {extra_roll}')
# Just use the first orbit...or choose one. This may download a ton of deltat.preds, which is a known
# bug to be fixed.
orbit = orbits[0].copy()
print(orbit)
#...adjust the index above to get the correct orbit. Then uncomment below.
planning.make_mosaic(orbit, make_regions=True, extra_roll = extra_roll, outfile='orbit0_mosaic.txt', write_output=True)
"""
Explanation: This is where you actually make the Mosaic for Orbit 0
End of explanation
"""
|
ellisonbg/talk-2014
|
Notebook Usage.ipynb
|
mit
|
from IPython.display import display, Image, HTML
from talktools import website, nbviewer
"""
Explanation: How are people using the Jupyter Notebook and IPython?
End of explanation
"""
website('http://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/')
"""
Explanation: Cam Davidson-Pilon's book
Cam Davidson-Pilon has written an entire book on Bayesian Statistics as a set of Jupyter+IPython Notebooks that are hosted on GitHub and viewed on https://nbviewer.jupyter.org.
End of explanation
"""
website('http://shop.oreilly.com/product/0636920030195.do')
"""
Explanation: Mining the Social Web
Matthew Russell has written an O'Reilly published book, Mining The Social Web, that includes Jupyter+IPython Notebooks for all examples.
End of explanation
"""
Image('images/twitter_post.png')
"""
Explanation: Python for Signal Processing
Jose Unpingco has written a series of blog posts on Signal Processing Jupyter+IPython Notebooks. These blog posts were the basis of a full length book Python for Signal Processing published by Springer in 2013.
<img src="images/p4sp.png">
Technical blogging
Jake Vanderplas at the University of Washington blogs using the IPython Notebook:
Pythonic Perambulations, Personal blog.
Visualizing 4 Dimensional Asteroids, Scientific American, Sept. 2014.
<img src="images/jakevdp_sa.png">
Social media
Many individuals are now using http://nbviewer.jupyter.org with Twitter to speak about a wide range of technical work.
End of explanation
"""
website('https://nbviewer.jupyter.org/url/norvig.com/ipython/xkcd1313.ipynb')
"""
Explanation: Peter Norvig
Peter Norvig (Director of Research at Google) has been using the Jupyter Notebook to explore interesting algorithmic problems. He began with a notebook inspired by a regular expression golf question from xkcd #1313:
Can I come up with an algorithm to find a short regex that matches the [presidential] winners and not the losers?
End of explanation
"""
website("https://nbviewer.jupyter.org/url/norvig.com/ipython/xkcd1313-part2.ipynb")
"""
Explanation: This notebok spawned a huge amount of interest in this problem. A number of individuals started to interact with Peter over email about the algorithmic questions involved. This led to a second notebook on the topic:
End of explanation
"""
Image('images/NASA-nbviewer.jpg')
"""
Explanation: The Notebook makes its easy to have detailed technical discussions involving code and data that are open and reproducible.
NASA/JPL
NASA/JPL is using the Jupyter Notebook on on various teams and is running nbviewer internally to share Jupyter Notebooks:
End of explanation
"""
website("https://nbviewer.jupyter.org/github/brianckeegan/Bechdel/blob/master/Bechdel_test.ipynb")
"""
Explanation: Data-driven journalism
The Notebook is being used in reproducible, data-driven journalism.
538
Here is an example from 538.
On April 1, 2014, Walter Hickey of 538, published an article entitled The Dollars-and-Cents Case Against Hollywood's Exclusion of Women about the Bechdel test, which provides a simple way to model the prominence of women in movies.
On April 7, 2014, Brian Keegan, a post-doctoral research fellow in computational social science Northeastern University, published a blog post entitled The Need for Openness in Data Journalism. Along with the blog post, he published a replication of Walter Hickey's analysis using the Jupyter Notebook and nbviewer.
End of explanation
"""
website("https://nbviewer.jupyter.org/github/ellisonbg/data/blob/r-notebook/bechdel/analyze-bechdel.ipynb")
"""
Explanation: This caught the attention of Walter Hickey:
<img src="images/walt_hickey.png">
and led to a follow up article on 538 along with the posting of Walter Hickey's data and code on GitHub. We followed that up by creating an Jupyter+IR Notebook version of the original data analysis posted in the 538 GitHub repository:
End of explanation
"""
website("https://nbviewer.jupyter.org/github/buzzfeednews/2014-08-irs-scams/blob/master/notebooks/irs-scams.ipynb")
"""
Explanation: BuzzFeed
BuzzFeed has started to publish data and Jupyter Notebooks for their articles in their BuzzFeedNews repository on GitHub. Here are a few highlights:
On August 7, 2014, BuzzFeed published an article by John Templon, entitled Tax Collection Scams Skyrocket. Here is their notebook that replicates the analysis:
End of explanation
"""
website('http://nbviewer.ipython.org/github/BuzzFeedNews/2014-08-st-louis-county-segregation/blob/master/notebooks/segregation-analysis.ipynb')
"""
Explanation: On August 20, 2014, BuzzFeed published an article by Jeremy Singer-Vine, entitled The Ferguson Area Is Even More Segregated Than You Probably Guessed. Here is their notebook that replicates the analysis:
End of explanation
"""
website("http://www.nature.com/ng/journal/vaop/ncurrent/full/ng.3051.html")
"""
Explanation: Reproducible academic publications
Publications in traditional academic journals are being accompanied by notebooks that reproduce and expand upon the main computational results of the work. See this section of the Notebook Gallery for a full list. Here is one notable example from Nature Genetics:
End of explanation
"""
website("https://nbviewer.jupyter.org/github/theandygross/TCGA/tree/master/Analysis_Notebooks/")
"""
Explanation: The authors published a GitHub repository with notebooks that replicate the results:
End of explanation
"""
%%file courses.csv
"Course","University","Instructor"
"Python for Data Science","UC Berkeley","Josh Bloom"
"Introduction to Data Science","UC Berkeley","Michael Franklin"
"Working with Open Data","UC Berkeley","Raymond Yee"
"Introduction to Signal Processing","UC Berkeley","Miki Lustig"
"Data Science (CS 109)","Harvard University","Pfister and Blitzstein"
"Practical Data Science","NYU","Josh Attenberg"
"Scientific Computing (ASTR 599)","University of Washington","Jake Vanderplas"
"Computational Physics","Cal Poly","Jennifer Klay"
"Introduction to Programming","Alaskan High School","Eric Matthes"
"Aerodynamics-Hydrodynamics (MAE 6226)","George Washington University","Lorena Barba"
"HyperPython: hyperbolic conservation laws","KAUST","David Ketcheson"
"Quantitative Economics","NYU","Sargent and Stachurski"
"Practical Numerical Methods with Python","4 separate universities + MOOC","Barba, et al."
import pandas
df = pandas.read_csv('courses.csv'); df
"""
Explanation: Teaching
The Jupyter Notebook is being used for lecture materials and student work in a large number of university and high school courses on scientific computing and data science. Most of these courses are being developed publicly on GitHub. Here are a few of the ones we know about:
End of explanation
"""
|
tebeka/pythonwise
|
First-Contact-With-Data.ipynb
|
bsd-3-clause
|
# Command line
!ls -lh taxi.csv
# Python
from os import path
print('%.2f KB' % (path.getsize('taxi.csv')/(1<<10)))
print('%.2f MB' % (path.getsize('taxi.csv')/(1<<20)))
"""
Explanation: First Contact with Data
Every time I encounter new data file. There are few initial "looks" that I take on it. This help me understand if I can load the whole set to memory and what are the fields there. Since I'm command line oriented, I use linux command line utilities to do that (which are easily accesible from Jupython with !), but it's easily done with Python as well.
As an example, we'll use a subset of the NYC taxi dataset. The file is called taxi.csv.
File Size
End of explanation
"""
# Command line
!wc -l taxi.csv
# Python
with open('taxi.csv') as fp:
print(sum(1 for _ in fp))
"""
Explanation: Number of Lines
End of explanation
"""
# Command line
!head -1 taxi.csv | tr , \\n
!printf "%d fields" $(head -1 taxi.csv | tr , \\n | wc -l)
# Python
import csv
with open('taxi.csv') as fp:
fields = next(csv.reader(fp))
print('\n'.join(fields))
print('%d fields' % len(fields))
"""
Explanation: Header
End of explanation
"""
# Command line
!head -2 taxi.csv | tail -1 | tr , \\n
!printf "%d values" $(head -2 taxi.csv | tail -1 | tr , \\n | wc -l)
# Python
with open('taxi.csv') as fp:
fp.readline() # Skip header
values = next(csv.reader(fp))
print('\n'.join(values))
print('%d values' % len(values))
# Python (with field names)
from itertools import zip_longest
with open('taxi.csv') as fp:
reader = csv.reader(fp)
header = next(reader)
values = next(reader)
for col, val in zip_longest(header, values, fillvalue='???'):
print('%-20s: %s' % (col, val))
"""
Explanation: Sample Data
End of explanation
"""
import pandas as pd
import numpy as np
date_cols = ['lpep_pickup_datetime', 'Lpep_dropoff_datetime']
with open('taxi.csv') as fp:
header = next(csv.reader(fp))
df = pd.read_csv(fp, names=header, usecols=np.arange(len(header)), parse_dates=date_cols)
df.head()
"""
Explanation: In both methods (with fields or without) we see that we have some extra empty fields at the end of each data row.
Loading as DataFrame
After the initial look, we know we can load the whole data to memory and have a good idea what to tell pandas for parsing it.
End of explanation
"""
|
dmnfarrell/mhcpredict
|
examples/advanced.ipynb
|
apache-2.0
|
import numpy as np
import pandas as pd
pd.set_option('display.width', 100)
pd.set_option('max_colwidth', 80)
%matplotlib inline
import matplotlib as mpl
import seaborn as sns
sns.set_context("notebook", font_scale=1.4)
from IPython.display import display, HTML
import epitopepredict as ep
from epitopepredict import base, sequtils, analysis
genbankfile = '../testing/zaire-ebolavirus.gb'
zaire = sequtils.genbank_to_dataframe(genbankfile, cds=True)
savepath = 'test_iedbmhc1'
Pi = ep.get_predictor('iedbmhc1')
alleles = ep.mhc1_supertypes
Pi.predictProteins(zaire,length=11,alleles=alleles,path=savepath,overwrite=False)
Pi.load(path=savepath)
"""
Explanation: epitopepredict advanced analyses
References:
Paul, S. et al., 2015. Development and validation of a broad scheme for prediction of HLA class II restricted T cell epitopes. Journal of Immunological Methods, 422, pp.28–34. Available at: http://dx.doi.org/10.1016/j.jim.2015.03.022.
Terry, F.E. et al., 2014. Time for T? Immunoinformatics addresses vaccine design for neglected tropical and emerging infectious diseases. Expert review of vaccines, 9, pp.1–15. Available at: http://www.ncbi.nlm.nih.gov/pubmed/25193104.
End of explanation
"""
#find clusters of binders in these results
pb = Pi.promiscuousBinders(cutoff=5,n=2)
cl = analysis.find_clusters(pb, dist=10, min_size=3, genome=zaire)
display(cl[:20])
"""
Explanation: Binder clustering for finding regions with possible epitopes
One application of immunoinformatics is to screen out likely candidate antigens from the genome for further study. To do this using epitope prediction requires filtering the many potential binders in each protein. There is no theoretical basis for doing this.
Epitope clustering has been previously observed to be an indicator of T cell epitope regions. The findClusters method in the analysis module allows automatic cluster detection from a set of predicted binders from one or more proteins. It can be done for a whole genome.
The result is a table of sequence regions with the number of binders and density of epitope cluster.
End of explanation
"""
name = 'ZEBOVgp6'
#ep.plotting.plot_multiple([Pi], name, regions=cl, n=2)
"""
Explanation: plot regions on sequence
We can use this to see where our clusters are in the sequence, for exmaple
End of explanation
"""
Pn = ep.get_predictor('netmhciipan')
savepath2='test_netmhciipan'
alleles2 = ep.mhc2_supertypes
Pn.predictProteins(zaire,length=11,alleles=alleles2,path=savepath2,overwrite=False)
Pn.load(path=savepath2)
cl = analysis.get_overlaps(cl,Pn.promiscuousBinders(n=2,cutoff=5),label='mhc2_ovlp')
#plot both sets of binders and overlay region of cluster in previous data
ax = ep.plot_tracks([Pi,Pn],name=name,legend=True,figsize=(14,4),n=2)
r = cl[cl.name==name]
print r
coords = (list(r.start),list(r.end-r.start))
coords = zip(*coords)
ep.plot_regions(coords, ax, color='gray')
"""
Explanation: overlaps
End of explanation
"""
reload(ep.base)
reload(analysis)
reload(sequtils)
name = 'ZEBOVgp2'
proteinseq = zaire[zaire.locus_tag==name].translation.iloc[0]
#print proteinseq
#print pb
seqs = pb.peptide
#provide a list of seqs (e.g. strains)
filename='ebola_VP35.fa'
r = sequtils.fetch_protein_sequences('Filovirus[Orgn] AND VP35[Gene]', filename=filename)
#align fasta sequences
aln = sequtils.muscle_alignment(filename)
#sequtils.showAlignment(aln)
alnrows = sequtils.alignment_to_dataframe(aln)
#print (sequtils.formatAlignment(aln))
#print alnrows[:25][['accession','definition','perc_ident']]
c = analysis.epitope_conservation(seqs, alnrows=alnrows)
"""
Explanation: conservation analysis
Conservation of epitopes may be important across strains or species.
End of explanation
"""
#localdb must be present in your file system, in the case it's in a folder called db
localdb = '../db/viral_refseq'
blr = analysis.get_orthologs(proteinseq, db=localdb)
alnrows, aln = analysis.align_blast_results(blr)
alnrows.to_csv('%s_aligned.csv' %name)
print alnrows
#now we run the same analysis using the set of refseq orthologs
c = analysis.epitope_conservation(seqs, alnrows=alnrows)
"""
Explanation: use a local blast database to find orthologs and conservation of your epitopes
Blast the protein sequence locally to get an alignment with any orthologs, then perform the analysis as above. In this case we have created a blast database of viral refseq proteins locally. Online blast is generally too slow for such an analysis, especially if we want to examine epitopes in many proteins.
End of explanation
"""
|
datactive/bigbang
|
examples/git-analysis/Git Interaction Graph.ipynb
|
mit
|
%matplotlib inline
from bigbang.ingress.git_repo import GitRepo;
from bigbang.analysis import repo_loader;
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
repos = repo_loader.get_org_repos("codeforamerica")
repo = repo_loader.get_multi_repo(repos=repos)
full_info = repo.commit_data;
"""
Explanation: This notebook creates a graph representation of the collaboration between contributors of a Git repository, where nodes are authors, and edges are weighted by the parent/child dependencies between the commits of authors.
End of explanation
"""
class Commit:
def __init__(self, message, hexsha, parents):
self.message = message
self.hexsha = hexsha
self.parents = parents
def __repr__(self):
return ' '.join(self.message.split(' ')[:4])
class Author:
def __init__(self, name, commits):
self.name = name
self.commits = commits
self.number_of_commits = 1
def add_commit(self, commit):
self.commits.append(commit)
self.number_of_commits += 1
def __repr__(self):
return self.name
"""
Explanation: Nodes will be Author objects, each of which holds a list of Commit objects.
End of explanation
"""
def get_authors():
authors = []
names = []
for index, row in full_info.iterrows():
name = row["Committer Name"]
hexsha = row["HEXSHA"]
parents = row["Parent Commit"]
message = row["Commit Message"]
if name not in names:
authors.append(Author(name, [Commit(message, hexsha, parents)]))
names.append(name)
else:
for author in authors:
if author.name == name:
author.add_commit(Commit(message, hexsha, parents))
return authors
"""
Explanation: We create a list of authors, also separately keeping track of committer names to make sure we only add each author once. If a commit by an already stored author is found, we add it to that authors list of commits.
End of explanation
"""
def make_graph(nodes):
G = nx.Graph()
for author in nodes:
for commit in author.commits:
for other in nodes:
for other_commit in other.commits:
if commit.hexsha in other_commit.parents:
if G.has_edge(author, other):
G[author][other]['weight'] += 1
else:
G.add_edge(author, other, weight = 1)
return G
nodes = get_authors()
G = make_graph(nodes)
pos = nx.spring_layout(G, iterations=100)
nx.draw(G, pos, font_size=8, with_labels = False)
# nx.draw_networkx_labels(G, pos);
"""
Explanation: We create our graph by forming an edge whenever an author has a commit which is the parent of another author's commit, and only increasing the weight of that edge if an edge between those two authors already exists.
End of explanation
"""
|
SchwaZhao/networkproject1
|
03_Introduction_To_Supervised_Machine_Learning.ipynb
|
mit
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(-10,10)
y = 1/(1+np.exp(-x))
p = plt.plot(x,y)
plt.grid(True)
"""
Explanation: In this section we will see the basics of supervised machine learning with a logistic regression classifier. We will see a simple example and see how to evaluate the performance of a binary classifier and avoid over-fitting.
Supervised machine learning
This section is partially inspired by the following Reference: http://cs229.stanford.edu/notes/cs229-notes1.pdf
Supervised learning consists of inferring a function from a labeled training set. On the other hand, unsupervised learning is a machine learning technique used when the input data is not labeled. Clustering is a example of unsupervised learning.
For supervised learning, we define:
The features (input variables) $x^{(i)}\in \mathbb{X}$
The target (output we are trying to predict) $y^{(i)} \in \mathbb{Y}$
A pair $(x^{(i)},y^{(i)})$ is a training example.
The set ${(x^{(i)},y^{(i)}); i = 1,...,m}$ is the training set:
The goal of supervised learning is to learn a function $h: \mathbb{X}\mapsto\mathbb{Y}$, called the hypothesis, so that $h(x)$ is a good
predictor of the corresponding $y$.
Regression correspond to the case where $y$ is a continuous variable
Classification correspond to the case where $y$ can only take a small number of discrete values
Examples:
- Univariate Linear Regression: $h_w(x) = w_0+w_1x$, with $\mathbb{X} = \mathbb{Y} = \mathbb{R}$
- Multivariate Linear Regression: $$h_w(x) = w_0+w_1x_1 + ... + w_nx_n = \sum_{i=0}^{n}w_ix_i = w^Tx,$$
with $\mathbb{Y} = \mathbb{R}$ and $\mathbb{X} = \mathbb{R^n}$.
Here $w_0$ is the intercept with the convention that $x_0=1$ to simplify notation.
Binary Classification with Logistic Regression
$y$ can take only two values, 0 or 1. For example, if $y$ is the sentiment associated with the tweet,
$y=1$ if the tweet is "positive" and $y=0$ is the tweet is "negative".
$x^{(i)}$ represents the features of a tweet. For example the presence or absence of certain words.
$y^{(i)}$ is the label of the training example represented by $x^{(i)}$.
Since $y\in{0,1}$ we want to limit $h_w(x)$ between $[0,1]$.
The Logistic regression consists of choosing $h_w(x)$ as
$$
h_w(x) = \frac{1}{1+e^{-w^Tx}}
$$
where $w^Tx = \sum_{i=0}^{n}w_ix_i$ and $h_w(x) = g(w^Tx)$ with
$$
g(x)=\frac{1}{1+e^{-w^Tx}}.
$$
$g(x)$ is the logistic function or sigmoid function
End of explanation
"""
# Simple example:
# we have 20 students that took an exam and we want to know if we can use
# the number of hours they studied to predict if they pass or fail the
# exam
# m = 20 training samples
# n = 1 feature (number of hours)
X = np.array([0.50, 0.75, 1.00, 1.25, 1.50, 1.75, 1.75, 2.00, 2.25, 2.50,
2.75, 3.00, 3.25, 3.50, 4.00, 4.25, 4.50, 4.75, 5.00, 5.50])
# 1 = pass, 0 = fail
y = np.array([0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1])
print(X.shape)
print(y.shape)
p = plt.plot(X,y,'o')
tx = plt.xlabel('x [h]')
ty = plt.ylabel('y ')
"""
Explanation: $g(x)\rightarrow 1$ for $x\rightarrow\infty$
$g(x)\rightarrow 0$ for $x\rightarrow -\infty$
$g(0) = 1/2$
Finally, to go from the regression to the classification, we can simply apply the following condition:
$$
y=\left{
\begin{array}{@{}ll@{}}
1, & \text{if}\ h_w(x)>=1/2 \
0, & \text{otherwise}
\end{array}\right.
$$
Let's clarify the notation. We have $m$ training samples and $n$ features, our training examples can be represented by a $m$-by-$n$ matrix $\underline{\underline{X}}=(x_{ij})$ ($m$-by-$n+1$, if we include the intercept term) that contains the training examples, $x^{(i)}$, in its rows.
The target values of the training set can be represented as a $m$-dimensional vector $\underline{y}$ and the parameters
of our model as
a $n$-dimensional vector $\underline{w}$ ($n+1$ if we take into account the intercept).
Now, for a given training example $x^{(i)}$, the function that we want to learn (or fit) can be written:
$$
h_\underline{w}(x^{(i)}) = \frac{1}{1+e^{-\sum_{j=0}^n w_j x_{ij}}}
$$
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fx = np.linspace(-5,5)
Ly1 = np.log2(1+np.exp(-fx))
Ly0 = np.log2(1+np.exp(-fx)) - np.log2(np.exp(-fx))
p = plt.plot(fx,Ly1,label='L(1,f(x))')
p = plt.plot(fx,Ly0,label='L(0,f(x))')
plt.xlabel('f(x)')
plt.ylabel('L')
plt.legend()
# coming back to our simple example
def Loss(x_i,y_i, w0, w1):
fx = w0 + x_i*w1
if y_i == 1:
return np.log2(1+np.exp(-fx))
if y_i == 0:
return np.log2(1+np.exp(-fx)) - np.log2(np.exp(-fx))
else:
raise Exception('y_i must be 0 or 1')
def sumLoss(x,y, w0, w1):
sumloss = 0
for x_i, y_i in zip(x,y):
sumloss += Loss(x_i,y_i, w0, w1)
return sumloss
# lets compute the loss function for several values
w0s = np.linspace(-10,20,100)
w1s = np.linspace(-10,20,100)
sumLoss_vals = np.zeros((w0s.size, w1s.size))
for k, w0 in enumerate(w0s):
for l, w1 in enumerate(w1s):
sumLoss_vals[k,l] = sumLoss(X,y,w0,w1)
# let's find the values of w0 and w1 that minimize the loss
ind0, ind1 = np.where(sumLoss_vals == sumLoss_vals.min())
print((ind0,ind1))
print((w0s[ind0], w1s[ind1]))
# plot the loss function
p = plt.pcolor(w0s, w1s, sumLoss_vals)
c = plt.colorbar()
p2 = plt.plot(w1s[ind1], w0s[ind0], 'ro')
tx = plt.xlabel('w1')
ty = plt.ylabel('w0')
"""
Explanation: Likelihood of the model
How to find the parameters, also called weights, $\underline{w}$ that best fit our training data?
We want to find the weights $\underline{w}$ that maximize the likelihood of observing the target $\underline{y}$ given the observed features $\underline{\underline{X}}$.
We need a probabilistic model that gives us the probability of observing the value $y^{(i)}$ given the features $x^{(i)}$.
The function $h_\underline{w}(x^{(i)})$ can be used precisely for that:
$$
P(y^{(i)}=1|x^{(i)};\underline{w}) = h_\underline{w}(x^{(i)})
$$
$$
P(y^{(i)}=0|x^{(i)};\underline{w}) = 1 - h_\underline{w}(x^{(i)})
$$
we can write it more compactly as:
$$
P(y^{(i)}|x^{(i)};\underline{w}) = (h_\underline{w}(x^{(i)}))^{y^{(i)}} ( 1 - h_\underline{w}(x^{(i)}))^{1-y^{(i)}}
$$
where $y^{(i)}\in{0,1}$
We see that $y^{(i)}$ is a random variable following a Bernouilli distribution with expectation $h_\underline{w}(x^{(i)})$.
The Likelihood function of a statistical model is defined as:
$$
\mathcal{L}(\underline{w}) = \mathcal{L}(\underline{w};\underline{\underline{X}},\underline{y}) = P(\underline{y}|\underline{\underline{X}};\underline{w}).
$$
The likelihood takes into account all the $m$ training samples of our training dataset and estimates the likelihood
of observing $\underline{y}$ given $\underline{\underline{X}}$ and $\underline{w}$. Assuming that the $m$ training examples were generated independently, we can write:
$$
\mathcal{L}(\underline{w}) = P(\underline{y}|\underline{\underline{X}};\underline{w}) = \prod_{i=1}^m P(y^{(i)}|x^{(i)};\underline{w}) = \prod_{i=1}^m (h_\underline{w}(x^{(i)}))^{y^{(i)}} ( 1 - h_\underline{w}(x^{(i)}))^{1-y^{(i)}}.
$$
This is the function that we want to maximize. It is usually much simpler to maximize the logarithm of this function, which is equivalent.
$$
l(\underline{w}) = \log\mathcal{L}(\underline{w}) = \sum_{i=1}^{m} \left(y^{(i)} \log h_\underline{w}(x^{(i)}) + (1- y^{(i)})\log\left(1- h_\underline{w}(x^{(i)})\right) \right)
$$
Loss function and linear models
An other way of formulating this problem is by defining a Loss function $L\left(y^{(i)}, f(x^{(i)})\right)$ such that:
$$
\sum_{i=1}^{m} L\left(y^{(i)}, f(x^{(i)})\right) = - l(\underline{w}).
$$
And now the problem consists of minimizing $\sum_{i=1}^{m} L\left(y^{(i)}, f(x^{(i)})\right)$ over all the possible values of $\underline{w}$.
Using the definition of $h_\underline{w}(x^{(i)})$ you can show that $L$ can be written as:
$$
L\left(y^{(i)}=1, f(x^{(i)})\right) = \log_2\left(1+e^{-f(x^{(i)})}\right)
$$
and
$$
L\left(y^{(i)}=0, f(x^{(i)})\right) = \log_2\left(1+e^{-f(x^{(i)})}\right) - \log_2\left(e^{-f(x^{(i)})}\right)
$$
where $f(x^{(i)}) = \sum_{j=0}^n w_j x_{ij}$ is called the decision function.
End of explanation
"""
# plot the solution
x = np.linspace(0,6,100)
def h_w(x, w0=w0s[ind0], w1=w1s[ind1]):
return 1/(1+np.exp(-(w0+x*w1)))
p1 = plt.plot(x, h_w(x))
p2 = plt.plot(X,y,'ro')
tx = plt.xlabel('x [h]')
ty = plt.ylabel('y ')
# probability of passing the exam if you worked 5 hours:
print(h_w(5))
"""
Explanation: Here we found the minimum of the loss function simply by computing it over a large range of values. In practice, this approach is not possible when the dimensionality of the loss function (number of weights) is very large. To find the minimum of the loss function, the gradient descent algorithm (or stochastic gradient descent) is often used.
End of explanation
"""
# The same thing using the sklearn module
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=1e10)
# to train our model we use the "fit" method
# we have to reshape X because we have only one feature here
model.fit(X.reshape(-1,1),y)
# to see the weights
print(model.coef_)
print(model.intercept_)
# use the trained model to predict new values
print(model.predict_proba(5))
print(model.predict(5))
"""
Explanation: We will use the package sci-kit learn (http://scikit-learn.org/) that provide access to many tools for machine learning, data mining and data analysis.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
fx = np.linspace(-5,5, 200)
Logit = np.log2(1+np.exp(-fx))
Percep = np.maximum(0,- fx)
Hinge = np.maximum(0, 1- fx)
ZeroOne = np.ones(fx.size)
ZeroOne[fx>=0] = 0
p = plt.plot(fx,Logit,label='Logistic Regression')
p = plt.plot(fx,Percep,label='Perceptron')
p = plt.plot(fx,Hinge,label='Hinge-loss')
p = plt.plot(fx,ZeroOne,label='Zero-One loss')
plt.xlabel('f(x)')
plt.ylabel('L')
plt.legend()
ylims = plt.ylim((0,7))
"""
Explanation: Note that although the loss function is not linear, the decision function is a linear function of the weights and features. This is why the Logistic regression is called a linear model.
Other linear models are defined by different loss functions. For example:
- Perceptron: $L \left(y^{(i)}, f(x^{(i)})\right) = \max(0, -y^{(i)}\cdot f(x^{(i)}))$
- Hinge-loss (soft-margin Support vector machine (SVM) classification): $L \left(y^{(i)}, f(x^{(i)})\right) = \max(0, 1-y^{(i)}\cdot f(x^{(i)}))$
See http://scikit-learn.org/stable/modules/sgd.html for more examples.
End of explanation
"""
# for example
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
# logistic regression with L2 regularization, C controls the strength of the regularization
# C = 1/lambda
model = LogisticRegression(C=1, penalty='l2')
# cross validation using 10 folds
y_pred = cross_val_predict(model, X.reshape(-1,1), y=y, cv=10)
print(confusion_matrix(y,y_pred))
print('Accuracy = ' + str(accuracy_score(y, y_pred)))
print('Precision = ' + str(precision_score(y, y_pred)))
print('Recall = ' + str(precision_score(y, y_pred)))
print('F_1 = ' + str(f1_score(y, y_pred)))
# try to run it with different number of folds for the cross-validation
# and different values of the regularization strength
"""
Explanation: Evaluating the performance of a binary classifier
The confusion matrix allows to visualize the performance of a classifier:
| | predicted positive | predicted negative |
| --- |:---:|:---:|
| real positive | TP | FN |
| real negative | FP | TN |
For each prediction $y_p$, we put it in one of the four categories based on the true value of $y$:
- TP = True Positive
- FP = False Positive
- TN = True Negative
- FN = False Negative
We can then evalute several measures, for example:
Accuracy:
$\text{Accuracy}=\frac{TP+TN}{TP+TN+FP+FN}$
Accuracy is the proportion of true results (both true positives and true negatives) among the total number of cases examined. However, accuracy is not necessarily a good measure of the predictive power of a model. See the example below:
Accuracy paradox:
A classifier with these results:
| |Predicted Negative | Predicted Positive|
| --- |---|---|
|Negative Cases |9,700 | 150|
|Positive Cases |50 |100|
has an accuracy = 98%.
Now consider the results of a classifier that systematically predict a negative result independently of the input:
| |Predicted Negative| Predicted Positive|
|---|---|---|
|Negative Cases| 9,850 | 0|
|Positive Cases| 150 |0 |
The accuracy of this classifier is 98.5% while it is clearly useless. Here the less accurate model is more useful than the more accurate one. This is why accuracy should not be used (alone) to evaluate the performance of a classifier.
Precision and Recall are usually prefered:
Precision:
$\text{Precision}=\frac{TP}{TP+FP}$
Precision measures the fraction of correct positive or the lack of false positive.
It answers the question: "Given a positive prediction from the classifier, how likely is it to be correct ?"
Recall:
$\text{Recall}=\frac{TP}{TP+FN}$
Recall measures the proportion of positives that are correctly identified as such or the lack of false negative.
It answers the question: "Given a positive example, will the classifier detect it ?"
$F_1$ score:
In order to account for the precision and recall of a classifier, $F_1$ score is the harmonic mean of both measures:
$F_1 = 2 \cdot \frac{\mathrm{precision} \cdot \mathrm{recall}}{ \mathrm{precision} + \mathrm{recall}} = 2 \frac{TP}{2TP +FP+FN}$
When evaluating the performance of a classifier it is important to test is on a different set of values than then set we used to train it. Indeed, we want to know how the classifier performs on new data not on the training data. For this purpose we separate the training set in two: a part that we use to train the model and a part that we use to test it. This method is called cross-validation. Usually, we split the training set in N parts (typically 3 or 10), train the model on N-1 parts and test it on the remaining part. We then repeat this procedure with all the combination of training and testing parts and average the performance metrics from each tests. Sci-kit learn allows to easily perform cross-validation: http://scikit-learn.org/stable/modules/cross_validation.html
Regularization and over-fitting
Overfitting happens when your model is too complicated to generalise for new data. When your model fits your data perfectly, it is unlikely to fit new data well.
<img src="https://upload.wikimedia.org/wikipedia/commons/1/19/Overfitting.svg" style="width: 250px;"/>
The model in green is over-fitted. It performs very well on the training set, but it does not generalize well to new data compared to the model in black.
To avoid over-fitting, it is important to have a large training set and to use cross-validation to evaluate the performance of a model. Additionally, regularization is used to make the model less "complex" and more general.
Regularization consists in adding a term $R(\underline{w})$, that penalizes too "complex" models, to the loss function, so that the training error that we want to minimize is:
$E(\underline{w}) = \sum_{i=1}^{m} L\left(y^{(i)}, f(x^{(i)})\right) + \lambda R(\underline{w})$,
where $\lambda$ is a parameter that controls the strength of the regularization.
Usual choices for $R(\underline{w})$ are:
- L2 norm of the weights: $R(\underline{w}) := \frac{1}{2} \sum_{i=1}^{n} w_j^2$, which forces small weights in the solution,
- L1 norm of the weights: $R(\underline{w}) := \sum_{i=1}^{n} |w_j|$, (also refered as Lasso) which leads to sparse solutions (with several zero weights).
The choice of the regularization and of the its strength are usually done by selecting the best choice during the cross-validation.
End of explanation
"""
|
fastai/fastai
|
dev_nbs/explorations/tokenizing.ipynb
|
apache-2.0
|
path = untar_data(URLs.IMDB_SAMPLE)
df = pd.read_csv(path/'texts.csv')
df.head(2)
ss = L(list(df.text))
ss[0]
"""
Explanation: Let's look at how long it takes to tokenize a sample of 1000 IMDB review.
End of explanation
"""
def delim_tok(s, delim=' '): return L(s.split(delim))
s = ss[0]
delim_tok(s)
"""
Explanation: We'll start with the simplest approach:
End of explanation
"""
def apply(func, items): return list(map(func, items))
"""
Explanation: ...and a general way to tokenize a bunch of strings:
End of explanation
"""
%%timeit -n 2 -r 3
global t
t = apply(delim_tok, ss)
"""
Explanation: Let's time it:
End of explanation
"""
%%timeit -n 2 -r 3
parallel(delim_tok, ss, n_workers=2, progress=False)
"""
Explanation: ...and the same thing with 2 workers:
End of explanation
"""
batches32 = [L(list(o)).map(str) for o in np.array_split(ss, 32)]
batches8 = [L(list(o)).map(str) for o in np.array_split(ss, 8 )]
batches = [L(list(o)).map(str) for o in np.array_split(ss, 2 )]
%%timeit -n 2 -r 3
parallel(partial(apply, delim_tok), batches, progress=False, n_workers=2)
"""
Explanation: How about if we put half the work in each worker?
End of explanation
"""
%%timeit -n 2 -r 3
global t
t = parallel(noop, batches, progress=False, n_workers=2)
"""
Explanation: So there's a lot of overhead in using parallel processing in Python. :(
Let's see why. What if we do nothing interesting in our function?
End of explanation
"""
def f(x): return 1
%%timeit -n 2 -r 3
global t
t = parallel(f, batches, progress=False, n_workers=2)
"""
Explanation: That's quite fast! (Although still slower than single process.)
What if we don't return much data?
End of explanation
"""
def f(items):
o = [s.split(' ') for s in items]
return [s for s in items]
"""
Explanation: That's a bit faster still.
What if we don't actually return the lists of tokens, but create them still?
End of explanation
"""
%%timeit -n 2 -r 3
global t
t = parallel(f, batches, progress=False, n_workers=2)
"""
Explanation: So creating the tokens, isn't taking the time, but returning them over the process boundary is.
End of explanation
"""
sarr = np.array(ss)
%%timeit -n 2 -r 3
global t
t = np.char.split(sarr)
"""
Explanation: Is numpy any faster?
End of explanation
"""
from spacy.lang.en import English
def conv_sp(doc): return L(doc).map(str)
class SpTok:
def __init__(self):
nlp = English()
self.tok = nlp.Defaults.create_tokenizer(nlp)
def __call__(self, x): return L(self.tok(str(x))).map(conv_sp)
"""
Explanation: Spacy
End of explanation
"""
%%timeit -n 2 -r 3
SpTok()
nlp = English()
sp_tokenizer = nlp.Defaults.create_tokenizer(nlp)
def spacy_tok(s): return L(sp_tokenizer(str(s))).map(str)
"""
Explanation: Let's see how long it takes to create a tokenizer in Spacy:
End of explanation
"""
%%timeit -r 3
global t
t = apply(spacy_tok, ss)
"""
Explanation: Time tokenize in Spacy using a loop:
End of explanation
"""
%%timeit -r 3
global t
t = parallel(partial(apply, spacy_tok), batches, progress=False, n_workers=2)
"""
Explanation: ...and the same thing in parallel:
End of explanation
"""
%%timeit -r 3
global t
t = parallel(partial(apply, spacy_tok), batches8, progress=False, n_workers=8)
"""
Explanation: ...and with more workers:
End of explanation
"""
def f(its):
tok = SpTok()
return [[str(o) for o in tok(p)] for p in its]
%%timeit -r 3
global t
t = parallel(f, batches8, progress=False, n_workers=8)
"""
Explanation: ...and with creating the tokenizer in the child process:
End of explanation
"""
%%timeit -r 3
global t
t = L(nlp.tokenizer.pipe(ss)).map(conv_sp)
def f(its): return L(nlp.tokenizer.pipe(its)).map(conv_sp)
%%timeit -r 3
global t
t = parallel(f, batches8, progress=False, n_workers=8)
test_eq(chunked(range(12),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
test_eq(chunked(range(11),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]])
test_eq(chunked(range(10),n_chunks=4), [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]])
test_eq(chunked(range( 9),n_chunks=3), [[0, 1, 2], [3, 4, 5], [6, 7, 8]])
%%timeit -r 3
global t
t = parallel_chunks(f, ss, n_workers=8, progress=False)
def array_split(arr, n): return chunked(arr, math.floor(len(arr)/n))
"""
Explanation: Let's try pipe
End of explanation
"""
|
OceanPARCELS/parcels
|
parcels/examples/tutorial_diffusion.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
from datetime import timedelta
from parcels import ParcelsRandom
from parcels import (FieldSet, Field, ParticleSet, JITParticle, AdvectionRK4, ErrorCode,
DiffusionUniformKh, AdvectionDiffusionM1, AdvectionDiffusionEM)
from parcels import plotTrajectoriesFile
K_bar = 0.5 # Average diffusivity
alpha = 1. # Profile steepness
L = 1. # Basin scale
Ny = 103 # Number of grid cells in y_direction (101 +2, one level above and one below, where fields are set to zero)
dy = 1.03/Ny # Spatial resolution
y = np.linspace(-0.01, 1.01, 103) # y-coordinates for grid
y_K = np.linspace(0., 1., 101) # y-coordinates used for setting diffusivity
beta = np.zeros(y_K.shape) # Placeholder for fraction term in K(y) formula
for yi in range(len(y_K)):
if y_K[yi] < L/2:
beta[yi] = y_K[yi]*np.power(L - 2*y_K[yi], 1/alpha)
elif y_K[yi] >= L/2:
beta[yi] = (L - y_K[yi])*np.power(2*y_K[yi] - L, 1/alpha)
Kh_meridional = 0.1*(2*(1+alpha)*(1+2*alpha))/(alpha**2*np.power(L, 1+1/alpha))*beta
Kh_meridional = np.concatenate((np.array([0]), Kh_meridional, np.array([0])))
plt.plot(Kh_meridional, y)
plt.ylabel("y")
plt.xlabel(r"$K_{meridional}$")
plt.show()
"""
Explanation: Tutorial: advection-diffusion kernels in Parcels
In Eulerian ocean models, sub-grid scale dispersion of tracers such as heat, salt, or nutrients is often parameterized as a diffusive process. In Lagrangian particle simulations, sub-grid scale effects can be parameterized as a stochastic process, randomly displacing a particle position in proportion to the local eddy diffusivity (Van Sebille et al. 2018). Parameterizing sub-grid scale dispersion may be especially important when coarse velocity fields are used that do not resolve mesoscale eddies (Shah et al., 2017). This tutorial explains how to use a sub-grid scale parameterization in Parcels that is consistent with the advection-diffusion equation used in Eulerian models.
Stochastic differential equations (SDE) consistent with advection-diffusion
The time-evolution of a stochastic process is described by a stochastic differential equation. The time-evolution of the conditional probability density of a stochastic process is described by a Fokker-Planck equation (FPE). The advection-diffusion equation, describing the evolution of a tracer, can be written as a Fokker-Planck equation. Therefore, we can formulate a stochastic differential equation for a particle in the Lagrangian frame undergoing advection with stochastic noise proportional to the local diffusivity in a way that is consistent with advection-diffusion in the Eulerian frame. For details, see Shah et al., 2011 and van Sebille et al., 2018.
The stochastic differential equation for a particle trajectory including diffusion is
$$
\begin{aligned}
d\mathbf{X}(t) &\overset{\text{Îto}}{=} (\mathbf{u} + \nabla \cdot \mathbf{K}) dt + \mathbf{V}(t, \mathbf{X})\cdot d\mathbf{W}(t), \
\mathbf{X}(t_0) &= \mathbf{x}_0,
\end{aligned}
$$
where $\mathbf{X}$ is the particle position vector ($\mathbf{x}_0$ being the initial position vector), $\mathbf{u}$ the velocity vector, $\mathbf{K} = \frac{1}{2} \mathbf{V} \cdot \mathbf{V}^T$ the diffusivity tensor, and $d\mathbf{W}(t)$ a Wiener increment (normally distributed with zero mean and variance $dt$). Particle distributions obtained by solving the above equation are therefore consistent with Eulerian concentrations found by solving the advection-diffusion equation.
In three-dimensional ocean models diffusion operates along slopes of neutral buoyancy. To account for these slopes, the 3D diffusivity tensor $\mathbf{K}$ (and therefore $\mathbf{V}$) contains off-diagonal components. Three-dimensional advection-diffusion is not yet implemented in Parcels, but it is currently under development. Here we instead focus on the simpler case of diffusion in a horizontal plane, where diffusivity is specified only in the zonal and meridional direction, i.e.
$$\mathbf{K}(x,y)=\begin{bmatrix}
K_x(x,y) & 0\
0 & K_y(x,y)
\end{bmatrix}.$$
The above stochastic differential equation then becomes
$$
\begin{align}
dX(t) &= a_x dt + b_x dW_x(t), \quad &X(t_0) = x_0,\
dY(t) &= a_y dt + b_y dW_y(t), \quad &Y(t_0) = y_0,
\end{align}
$$
where $a_i = v_i + \partial_i K_i(x, y)$ is the deterministic drift term and $b_i = \sqrt{2K_i(x, y)}$ a stochastic noise term ($\partial_i$ denotes the partial derivative with respect to $i$).
Numerical Approximations of SDEs
The simplest numerical approximation of the above SDEs is obtained by replacing $dt$ by a finite time discrete step $\Delta t$ and $dW$ by a discrete increment $\Delta W$, yielding the Euler-Maruyama (EM) scheme (Maruyama, 1955):
$$
\begin{equation}
X_{n+1} = X_n + a_x \Delta t + b_x \Delta W_{n, x},
\end{equation}
$$
with a similar expression for $Y$.
A higher-order scheme is found by including extra terms from a Taylor expansion on our SDE, yielding the Milstein scheme of order 1 (M1):
$$
\begin{equation}
X_{n+1} = X_n + a_x \Delta t + b_x \Delta W_x + \frac{1}{2}b_x \partial_x b_x(\Delta W_{n, x}^2 - \Delta t),
\end{equation}
$$
which can be rewritten by explicitly writing $b_x\partial_x b_x$ as $\partial_x K_x(z)$:
$$
\begin{equation}
X_{n+1} = X_n + v_x \Delta t + \frac{1}{2}\partial_x K_x(\Delta W_{n, x}^2 + \Delta t) + b\Delta W_n.
\end{equation}
$$
The extra term in the M1 scheme provides extra accuracy at negligible computational cost.
The spatial derivatives in the EM and M1 schemes can be approximated by a central difference. Higher order numerical schemes (see Gräwe et al., 2012) include higher order derivatives. Since Parcels uses bilinear interpolation, these higher order derivatives cannot be computed, meaning that higher order numerical schemes cannot be used.
An overview of numerical approximations for SDEs in a particle tracking setting can be found in Gräwe (2011).
Using Advection-Diffusion Kernels in Parcels
The EM and M1 advection-diffusion approximations are available as AdvectionDiffusionEM and AdvectionDiffusionM1, respectively. The AdvectionDiffusionM1 kernel should be the default choice, as the increased accuracy comes at negligible computational cost.
The advection component of these kernels is similar to that of the Explicit Euler advection kernel (AdvectionEE). In the special case where diffusivity is constant over the entire domain, the diffusion-only kernel DiffusionUniformKh can be used in combination with an advection kernel of choice. Since the diffusivity here is space-independent, gradients are not calculated, increasing efficiency. The diffusion-step can in this case be computed after or before advection, thus allowing you to chain kernels using the + operator.
Just like velocities, diffusivities are passed to Parcels in the form of Field objects. When using DiffusionUniformKh, they should be added to the FieldSet object as constant fields, e.g. fieldset.add_constant_field("Kh_zonal", 1, mesh="flat").
To make a central difference approximation for computing the gradient in diffusivity, a resolution for this approximation dres is needed: Parcels approximates the gradients in diffusivities by using their values at the particle's location ± dres (in both $x$ and $y$). A value of dres must be specified and added to the FieldSet by the user (e.g. fieldset.add_constant("dres", 0.01)). Currently, it is unclear what the best value of dres is. From experience, its size of dres should be smaller than the spatial resolution of the data, but within reasonable limits of machine precision to avoid numerical errors. We are working on a method to compute gradients differently so that specifying dres is not necessary anymore.
Example: Impermeable Diffusivity Profile
Let's see the AdvectionDiffusionM1 in action and see why it's preferable over the AdvectionDiffusionEM kernel. To do so, we create an idealized profile with diffusivities $K_\text{zonal}$ uniform everywhere ($K_\text{zonal} = \bar{K}=0.5$) and $K_\text{meridional}$ constant in the zonal direction, while having the following profile in the meridional direction:
$$
K_\text{meridional}(y) = \bar{K}\frac{2(1+\alpha)(1+2\alpha)}{\alpha^2H^{1+1/\alpha}} \begin{cases}
y(L-2y)^{1/\alpha},\quad 0 \leq y \leq L/2,\
(L-y)(2y-1)^{1/a},\quad H/2 \leq y \leq L,
\end{cases}
$$
with $L$ being the basin length scale, $\alpha$ as a parameter determining the steepness in the gradient in the profile. This profile is similar to that used by Gräwe (2011), now used in the meridional direction for illustrative purposes.
Let's plot $K_\text{meridional}(y)$:
End of explanation
"""
xdim, ydim = (1, Ny)
data = {'U': np.zeros(ydim),
'V': np.zeros(ydim),
'Kh_zonal': K_bar*np.ones(ydim),
'Kh_meridional': Kh_meridional}
dims = {'lon': 1,
'lat': np.linspace(-0.01, 1.01, ydim, dtype=np.float32)}
fieldset = FieldSet.from_data(data, dims, mesh='flat', allow_time_extrapolation=True)
fieldset.add_constant('dres', 0.00005)
"""
Explanation: In this profile, diffusivity drops to 0 at $y=0.5$ and at $y=0$ and $y=1$. In the absence of advection, particles starting out in one half of the domain should remain confined to that half as they are unable to cross the points where the diffusivity drops to 0. The line $y=0.5$ should therefore provide an impermeable barrier.
Now we can put this idealized profile into a flat fieldset:
End of explanation
"""
def get_test_particles():
return ParticleSet.from_list(fieldset,
pclass=JITParticle,
lon=np.zeros(100),
lat=np.ones(100)*0.75,
time=np.zeros(100),
lonlatdepth_dtype=np.float64)
"""
Explanation: We release 100 particles at ($x=0$, $y=0.75$).
End of explanation
"""
dt = 0.001
testParticles = get_test_particles()
output_file = testParticles.ParticleFile(name="M1_out.nc",
outputdt=timedelta(seconds=dt))
ParcelsRandom.seed(1636) # Random seed for reproducibility
testParticles.execute(AdvectionDiffusionM1,
runtime=timedelta(seconds=0.3),
dt=timedelta(seconds=dt),
output_file=output_file,
verbose_progress=True)
output_file.close() # to write the output to a netCDF file, since `output_file` does not close automatically when using notebooks
M1_out = xr.open_dataset("M1_out.nc")
"""
Explanation: Now we will simulate the advection and diffusion of the particles using the AdvectionDiffusionM1 kernel. We run the simulation for 0.3 seconds, with a numerical timestep $\Delta t = 0.001$s. We also write away particle locations at each timestep for plotting. Note that this will hinder a runtime comparison between kernels, since it will cause most time to be spent on I/O.
End of explanation
"""
fig, ax = plt.subplots(1, 2)
fig.set_figwidth(12)
for data, ai, dim, ystart, ylim in zip([M1_out.lat, M1_out.lon], ax, ('y', 'x'), (0.75, 0), [(0, 1), (-1, 1)]):
ai.plot(np.arange(0, 0.3002, 0.001), data.T, alpha=0.3)
ai.scatter(0, ystart, s=20, c='r', zorder=3)
ai.set_xlabel("t")
ai.set_ylabel(dim)
ai.set_xlim(0, 0.3)
ai.set_ylim(ylim)
fig.suptitle("`AdvectionDiffusionM1` Simulation: Particle trajectories in the x- and y-directions against time")
plt.show()
"""
Explanation: We can plot the individual coordinates of the particle trajectories against time ($x$ against $t$ and $y$ against $t$) to investigate how diffusion works along each axis.
End of explanation
"""
dt = 0.001
testParticles = get_test_particles()
output_file = testParticles.ParticleFile(name="EM_out.nc",
outputdt=timedelta(seconds=dt))
ParcelsRandom.seed(1636) # Random seed for reproducibility
testParticles.execute(AdvectionDiffusionEM,
runtime=timedelta(seconds=0.3),
dt=timedelta(seconds=dt),
output_file=output_file,
verbose_progress=True)
output_file.close() # to write the output to a netCDF file, since `output_file` does not close automatically when using notebooks
EM_out = xr.open_dataset("EM_out.nc")
fig, ax = plt.subplots(1, 2)
fig.set_figwidth(12)
for data, ai, dim, ystart, ylim in zip([EM_out.lat, EM_out.lon], ax, ('y', 'x'), (0.75, 0), [(0, 1), (-1, 1)]):
ai.plot(np.arange(0, 0.3002, 0.001), data.T, alpha=0.3)
ai.scatter(0, ystart, s=20, c='r', zorder=3)
ai.set_xlabel("t")
ai.set_ylabel(dim)
ai.set_xlim(0, 0.3)
ai.set_ylim(ylim)
fig.suptitle("`AdvectionDiffusionEM` Simulation: Particle trajectories in the x- and y-directions against time")
plt.show()
"""
Explanation: We see that the along the meridional direction, particles remain confined to the ‘upper’ part of the domain, not crossing the impermeable barrier where the diffusivity drops to zero. In the zonal direction, particles follow random walks, since all terms involving gradients of the diffusivity are zero.
Now let's execute the simulation with the AdvectionDiffusionEM kernel instead.
End of explanation
"""
def smagdiff(particle, fieldset, time):
dx = 0.01
# gradients are computed by using a local central difference.
dudx = (fieldset.U[time, particle.depth, particle.lat, particle.lon+dx]-fieldset.U[time, particle.depth, particle.lat, particle.lon-dx]) / (2*dx)
dudy = (fieldset.U[time, particle.depth, particle.lat+dx, particle.lon]-fieldset.U[time, particle.depth, particle.lat-dx, particle.lon]) / (2*dx)
dvdx = (fieldset.V[time, particle.depth, particle.lat, particle.lon+dx]-fieldset.V[time, particle.depth, particle.lat, particle.lon-dx]) / (2*dx)
dvdy = (fieldset.V[time, particle.depth, particle.lat+dx, particle.lon]-fieldset.V[time, particle.depth, particle.lat-dx, particle.lon]) / (2*dx)
A = fieldset.cell_areas[time, 0, particle.lat, particle.lon]
sq_deg_to_sq_m = (1852*60)**2*math.cos(particle.lat*math.pi/180)
A = A / sq_deg_to_sq_m
Kh = fieldset.Cs * A * math.sqrt(dudx**2 + 0.5*(dudy + dvdx)**2 + dvdy**2)
dlat = ParcelsRandom.normalvariate(0., 1.) * math.sqrt(2*math.fabs(particle.dt)* Kh)
dlon = ParcelsRandom.normalvariate(0., 1.) * math.sqrt(2*math.fabs(particle.dt)* Kh)
particle.lat += dlat
particle.lon += dlon
"""
Explanation: The Wiener increments for both simulations are equal, as they are fixed through a random seed. As we can see, the Euler-Maruyama scheme performs worse than the Milstein scheme, letting particles cross the impermeable barrier at $y=0.5$. In contrast, along the zonal direction, particles follow the same random walk as in the Milstein scheme, which is expected since the extra terms in the Milstein scheme are zero in this case.
Example: Using horizontal diffusion calculated from velocity fields
In the case when velocity fields are available, there is the possibility to calculate coefficients of diffusion based on closure parameterizations. The Smagorinsky method (Smagorinsky, 1963), which was originally proposed as a parameterization for horizontal eddy viscosity, is often used to parameterize horizontal eddy diffusivity as well. It computes the eddy diffusivity as
$$
K = C_s \Delta x \Delta y \sqrt{\left(\frac{\partial u}{\partial x}\right)^2 + \left(\frac{\partial v}{\partial y}\right)^2 + \frac{1}{2}\left(\frac{\partial u}{\partial y} +\frac{\partial v}{\partial x}\right)^2},
$$
where $C_s$, the Smagorinsky constant, is a dimensionless tuning parameter. It uses the grid area $\Delta x \Delta y$ as its spatial scale, and the norm of the strain rate tensor as its time scale, given as the square-rooted term.
Let’s see the example of implementation of the Smagorinsky method to the GlobalCurrents files of the region around South Africa. For simplicity, we are not taking gradients in the Smagorinsky-computed diffusivity field into account here.
First, create a new kernel for Smagorinsky diffusion method:
End of explanation
"""
filenames = {'U': 'GlobCurrent_example_data/20*.nc', 'V': 'GlobCurrent_example_data/20*.nc'}
variables = {'U': 'eastward_eulerian_current_velocity', 'V': 'northward_eulerian_current_velocity'}
dimensions = {'lat': 'lat', 'lon': 'lon', 'time': 'time'}
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions)
"""
Explanation: Reading velocity fields from netcdf files
End of explanation
"""
x = fieldset.U.grid.lon
y = fieldset.U.grid.lat
cell_areas = Field(name='cell_areas', data=fieldset.U.cell_areas(), lon=x, lat=y)
fieldset.add_field(cell_areas)
fieldset.add_constant('Cs', 0.1)
"""
Explanation: Adding parameters (cell_areas – areas of computational cells, and Cs – Smagorinsky constant) to fieldset that are needed for the smagdiff kernel
End of explanation
"""
lon = 29
lat = -33
repeatdt = timedelta(hours=12)
pset = ParticleSet(fieldset=fieldset, pclass=JITParticle,
lon=lon, lat=lat,
repeatdt=repeatdt)
"""
Explanation: In the example, particles are released at one location periodically (every 12 hours)
End of explanation
"""
def DeleteParticle(particle, fieldset, time):
particle.delete()
"""
Explanation: If particles leave model area, they are deleted
End of explanation
"""
kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(smagdiff)
output_file = pset.ParticleFile(name='Global_smagdiff.nc', outputdt=timedelta(hours=6))
pset.execute(kernels, runtime=timedelta(days=5), dt=timedelta(minutes=5), output_file=output_file, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
pset.show(field=fieldset.U)
"""
Explanation: Modeling the particles moving during 5 days using advection (AdvectionRK4) and diffusion (smagdiff) kernels.
End of explanation
"""
pset.repeatdt = None
pset.execute(kernels, runtime=timedelta(days=25), dt=timedelta(minutes=5), output_file=output_file, recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
pset.show(field=fieldset.U)
"""
Explanation: Stop new particles appearing and continue the particleset execution for another 25 days
End of explanation
"""
output_file.export()
plotTrajectoriesFile('Global_smagdiff.nc',
tracerfile='GlobCurrent_example_data/20020120000000-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc',
tracerlon='lon', tracerlat='lat', tracerfield='eastward_eulerian_current_velocity');
"""
Explanation: Save the output file and visualise the trajectories
End of explanation
"""
|
changshuaiwei/Udc-ML
|
creating_customer_segments/customer_segments.ipynb
|
gpl-3.0
|
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import renders as rs
from IPython.display import display # Allows the use of display() for DataFrames
# Show matplotlib plots inline (nicely formatted in the notebook)
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
"""
Explanation: Machine Learning Engineer Nanodegree
Unsupervised Learning
Project 3: Creating Customer Segments
Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Getting Started
In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in monetary units) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.
The dataset for this project can be found on the UCI Machine Learning Repository. For the purposes of this project, the features 'Channel' and 'Region' will be excluded in the analysis — with focus instead on the six product categories recorded for customers.
Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
End of explanation
"""
# Display a description of the dataset
display(data.describe())
"""
Explanation: Data Exploration
In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.
Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: 'Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', and 'Delicatessen'. Consider what each category represents in terms of products you could purchase.
End of explanation
"""
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [30,60,90]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
display(samples - data.median().round())
"""
Explanation: Implementation: Selecting Samples
To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add three indices of your choice to the indices list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.
End of explanation
"""
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.drop('Milk', axis = 1)
from sklearn import cross_validation
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = cross_validation.train_test_split(new_data, data['Milk'], test_size =0.25, random_state = 0)
from sklearn import tree
# TODO: Create a decision tree regressor and fit it to the training set
regressor = tree.DecisionTreeRegressor(min_samples_leaf = 20, random_state = 0)
regressor.fit(X_train, y_train)
from sklearn import metrics
# TODO: Report the score of the prediction using the testing set
score = metrics.r2_score(y_test, regressor.predict(X_test))
print 'R2 score is {}'.format(score)
"""
Explanation: Question 1
Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers.
What kind of establishment (customer) could each of the three samples you've chosen represent?
Hint: Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying "McDonalds" when describing a sample customer as a restaurant.
Answer: By comparing the spendings with median spendings, customer 0 and customer 1 have more spendings on Grocery, Detegents_paper, and thus are more like a retailer. Custormer 2 have more spending on fresh food and frozen food and thus is more like a restaurant.
Implementation: Feature Relevance
One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.
In the code block below, you will need to implement the following:
- Assign new_data a copy of the data by removing a feature of your choice using the DataFrame.drop function.
- Use sklearn.cross_validation.train_test_split to split the dataset into training and testing sets.
- Use the removed feature as your target label. Set a test_size of 0.25 and set a random_state.
- Import a decision tree regressor, set a random_state, and fit the learner to the training data.
- Report the prediction score of the testing set using the regressor's score function.
End of explanation
"""
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
data.corr()
data.corr('spearman')
"""
Explanation: Question 2
Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits?
Hint: The coefficient of determination, R^2, is scored between 0 and 1, with 1 being a perfect fit. A negative R^2 implies the model fails to fit the data.
Answer: I attempt to predict Milk. The reported R2 is 0.556. Though 'Milk' can be partially predicted by other feature. The feature is necessary for identifying custormers' spending habits because using other feature can not well predict Milk.
Visualize Feature Distributions
To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.
End of explanation
"""
# TODO: Scale the data using the natural logarithm
log_data = np.log(data)
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(samples)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
"""
Explanation: Question 3
Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?
Hint: Is the data normally distributed? Where do most of the data points lie?
Answer: The pair 'Detergents_paper' and 'Grocery' seems to have high correlation. Meanwhile, the pair {'Milk','Grocery'} and {'Milk','Detergents_paper'} also have some degree of correlatin.
This agree with my concluding about relevence of 'Milk' with other features. They have some relavence, but 'Milk' contain information that can not be well present by other features.
The data is not normally distributed. Most features are skewed distribution.
Data Preprocessing
In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.
Implementation: Feature Scaling
If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most often appropriate to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a Box-Cox test, which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.
In the code block below, you will need to implement the following:
- Assign a copy of the data to log_data after applying a logarithm scaling. Use the np.log function for this.
- Assign a copy of the sample data to log_samples after applying a logrithm scaling. Again, use np.log.
End of explanation
"""
# Display the log-transformed sample data
display(log_samples)
"""
Explanation: Observation
After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).
Run the code below to see how the sample data has changed after having the natural logarithm applied to it.
End of explanation
"""
# For each feature find the data points with extreme high or low values
count = np.zeros(len(log_data.index))
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], q=25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], q=75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5 * (Q3 - Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
display(log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))])
count += ((log_data[feature] < Q1 - step) | (log_data[feature] > Q3 + step))
# OPTIONAL: Select the indices for data points you wish to remove
row_outlier = np.where(count>1)
print "Data points considered outliers more than once '{}':".format(row_outlier)
outliers = row_outlier
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
"""
Explanation: Implementation: Outlier Detection
Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use Tukey's Method for identfying outliers: An outlier step is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
In the code block below, you will need to implement the following:
- Assign the value of the 25th percentile for the given feature to Q1. Use np.percentile for this.
- Assign the value of the 75th percentile for the given feature to Q3. Again, use np.percentile.
- Assign the calculation of an outlier step for the given feature to step.
- Optionally remove data points from the dataset by adding indices to the outliers list.
NOTE: If you choose to remove any outliers, ensure that the sample data does not contain any of these points!
Once you have performed this implementation, the dataset will be stored in the variable good_data.
End of explanation
"""
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
from sklearn import decomposition
pca = decomposition.PCA(n_components=6)
pca.fit(good_data)
# TODO: Transform the sample log-data using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = rs.pca_results(good_data, pca)
"""
Explanation: Question 4
Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the outliers list to be removed, explain why.
Answer: Yes, there are 5 data points considered outlier for more than one feature. These data should be removed. If a data point have more than one outlier, then the data point might have sevier measrement error or the data is not plausable.
Feature Transformation
In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.
Implementation: PCA
Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the good_data to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the explained variance ratio of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data.
In the code block below, you will need to implement the following:
- Import sklearn.decomposition.PCA and assign the results of fitting PCA in six dimensions with good_data to pca.
- Apply a PCA transformation of the sample log-data log_samples using pca.transform, and assign the results to pca_samples.
End of explanation
"""
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
"""
Explanation: Question 5
How much variance in the data is explained in total by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.
Hint: A positive increase in a specific dimension corresponds with an increase of the positive-weighted features and a decrease of the negative-weighted features. The rate of increase or decrease is based on the indivdual feature weights.
Answer: 1st and 2nd PC explained 0.7068 of variance. The first 4 PCs explained 0.9311 of variance. The 1st PC resprents patterns of spending with positive wegiht on detergents_papers, milk and Grocery, and negative weights on Fresh and Frozen. That is to say, customers with large value on 1st PC would spend a lot on detergents_papers, milk, and Grocery and spend little on Fresh and Frozen. And customor with small value on 1st PC would spend little on detergents_papers, milk, and Grocery and spend a lot on Frozen and Fresh. The 2nd PC represents patterns of spending with heavy positive weight on Fresh, Frozen and Delicatessen. That is to say, customer with large value on 2nd PC would spend a lot on Fresh, Frozen and Delicatessen, and customer with small value on 2nd PC would spend little on Fresh, Frozen and Delicatessen. The 3rd PC represents pattern of spending with positive weight on Delicatssen and negative weight on Fresh. Customers with large value on 3rd PC would spend a lot on Delicatssen and little on Fresh, vice versa for those with small value on 3rd PC. The 4th PC represents pattern of spending with positive weight on frozen and negative weight on Delicatssen. Customers with large value on 4th PC would spend a lot on Frozen and little on Delicatssen, vice versa for those with small value on 4th PC.
Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.
End of explanation
"""
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = decomposition.PCA(n_components=2)
pca.fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.transform(good_data)
# TODO: Transform the sample log-data using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# Produce a scatter matrix for pca reduced data
pd.scatter_matrix(reduced_data, alpha = 0.8, figsize = (8,4), diagonal = 'kde');
"""
Explanation: Implementation: Dimensionality Reduction
When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the cumulative explained variance ratio is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.
In the code block below, you will need to implement the following:
- Assign the results of fitting PCA in two dimensions with good_data to pca.
- Apply a PCA transformation of good_data using pca.transform, and assign the reuslts to reduced_data.
- Apply a PCA transformation of the sample log-data log_samples using pca.transform, and assign the results to pca_samples.
End of explanation
"""
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
"""
Explanation: Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.
End of explanation
"""
# TODO: Apply your clustering algorithm of choice to the reduced data
from sklearn import mixture
from sklearn import cluster
for i in range(4,1,-1):
clusterer = cluster.KMeans(i, random_state=0).fit(reduced_data)
# Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# Calculate the mean silhouette coefficient for the number of clusters chosen
score = metrics.silhouette_score(reduced_data, preds)
print i, 'clusters:', score.round(5)
# TODO: Apply your clustering algorithm of choice to the reduced data
from sklearn import mixture
from sklearn import cluster
#clusterer = mixture.GMM(n_components=2, random_state=0 )
clusterer = cluster.KMeans(n_clusters=2, random_state=0)
clusterer.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
#centers = clusterer.means_
centers = clusterer.cluster_centers_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = metrics.silhouette_score(reduced_data, preds, random_state=0)
print 'silhouette score is {}'.format(score)
"""
Explanation: Clustering
In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale.
Question 6
What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?
Answer: K-means Clustering is very fast and always converge, but it may reach to a local minimum. GMM is also fast, though not as fast as K-means. GMM has problistic interpratation for clustering. I would use K-means, since it always converge and runs faster.
Implementation: Creating Clusters
Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known a priori, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's silhouette coefficient. The silhouette coefficient for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the mean silhouette coefficient provides for a simple scoring method of a given clustering.
In the code block below, you will need to implement the following:
- Fit a clustering algorithm to the reduced_data and assign it to clusterer.
- Predict the cluster for each data point in reduced_data using clusterer.predict and assign them to preds.
- Find the cluster centers using the algorithm's respective attribute and assign them to centers.
- Predict the cluster for each sample data point in pca_samples and assign them sample_preds.
- Import sklearn.metrics.silhouette_score and calculate the silhouette score of reduced_data against preds.
- Assign the silhouette score to score and print the result.
End of explanation
"""
# Display the results of the clustering from implementation
rs.cluster_results(reduced_data, preds, centers, pca_samples)
"""
Explanation: Question 7
Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?
Answer: I tried n-cluster = 2, 3, 4, with sihouette score = 0.426, 0.397, 0.332. n-cluster = 2 have the best silhouette score.
Cluster Visualization
Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters.
End of explanation
"""
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
display(true_centers - (np.exp(good_data)).mean().round())
display(true_centers - (np.exp(good_data)).median().round())
"""
Explanation: Implementation: Data Recovery
Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the averages of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to the average customer of that segment. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.
In the code block below, you will need to implement the following:
- Apply the inverse transform to centers using pca.inverse_transform and assign the new centers to log_centers.
- Apply the inverse function of np.log to log_centers using np.exp and assign the true centers to true_centers.
End of explanation
"""
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
"""
Explanation: Question 8
Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. What set of establishments could each of the customer segments represent?
Hint: A customer who is assigned to 'Cluster X' should best identify with the establishments represented by the feature set of 'Segment X'.
Answer: By comparing with mean and median with original data, we see that data points assigned to cluster 0 have more than average spending on Milk, Grocery and Detergents_Paper and less than average spending on Fresh and Frozen, and thus likely to represent 'Retailer' cluster. Data points assigned to cluster 1 have less than average spending on Milk, Grocery and Detergents_Paper and more tha average spending on Fresh and Frozen, and thus likely to represent 'Restraunt' cluster.
Question 9
For each sample point, which customer segment from Question 8 best represents it? Are the predictions for each sample point consistent with this?
Run the code block below to find which cluster each sample point is predicted to be.
End of explanation
"""
display(samples - (np.exp(good_data)).median().round())
"""
Explanation: Answer: Sample point 0 predicted to be in Cluster 0,
Sample point 1 predicted to be in Cluster 0,
Sample point 2 predicted to be in Cluster 1. The predictins are consistent. Sample 0 and 1 have above median spending on Grocery and Detergents_Paper and below median spending on Frozen, and thus closer to center of cluster 0. Sample 2 have below median spending on Grocery and Detergents_Paper and above median spending on Frozen, and thus closer to center of cluster 1.
End of explanation
"""
# Display the clustering results based on 'Channel' data
rs.channel_results(reduced_data, outliers, pca_samples)
"""
Explanation: Conclusion
In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the customer segments, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which segment that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the customer segments to a hidden variable present in the data, to see whether the clustering identified certain relationships.
Question 10
Companies will often run A/B tests when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. How can the wholesale distributor use the customer segments to determine which customers, if any, would reach positively to the change in delivery service?
Hint: Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most?
Answer: the wholesale distributor can first cluster their custermor to several group, for example, using the above 2 clusters. Then using A/B test to test whether the small change affect customers positively or negatively. Yes, we can determine which group it affects the most, by comparing the difference for each group (with consideration of multiple testing).
Question 11
Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a customer segment it best identifies with (depending on the clustering algorithm applied), we can consider 'customer segment' as an engineered feature for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a customer segment to determine the most appropriate delivery service.
How can the wholesale distributor label the new customers using only their estimated product spending and the customer segment data?
Hint: A supervised learner could be used to train on the original customers. What would be the target variable?
Answer: Without knowing the clustering algorithm applied on original data, we can treat the product spending as features and 'custermor segment' as labels. Then we can use a supervised learner, e.g., SVM, to learn the "clustering algorithm", and use it to predict "custermor segment" for the new customers.
Visualizing Underlying Distributions
At the beginning of this project, it was discussed that the 'Channel' and 'Region' features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the 'Channel' feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset.
Run the code block below to see how each data point is labeled either 'HoReCa' (Hotel/Restaurant/Cafe) or 'Retail' the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.
End of explanation
"""
|
AdityaSoni19031997/Machine-Learning
|
cmu/pytorch_tutorial_gpu.ipynb
|
mit
|
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import time
print(torch.__version__)
%matplotlib inline
def sample_points(n):
# returns (X,Y), where X of shape (n,2) is the numpy array of points and Y is the (n) array of classes
radius = np.random.uniform(low=0,high=2,size=n).reshape(-1,1) # uniform radius between 0 and 2
angle = np.random.uniform(low=0,high=2*np.pi,size=n).reshape(-1,1) # uniform angle
x1 = radius*np.cos(angle)
x2=radius*np.sin(angle)
y = (radius<1).astype(int).reshape(-1)
x = np.concatenate([x1,x2],axis=1)
return x,y
# Generate the data
trainx,trainy = sample_points(10000)
valx,valy = sample_points(500)
testx,testy = sample_points(500)
print(trainx.shape,trainy.shape)
"""
Explanation: A training example in Pytorch
Introduction
Task
In this notebook we will train a neural network to do a simple task. This will be a classification task : as explained in the first week of lectures, classification basically means to find a decision boundary over a space of real numbers. For representation purposes we will work with a 2D example : the decision boundary will be a cercle. More precisely, it will be the unit circle in the plan.
Sampling
We will generate points $(x_1,x_2)$ to classify, and their class $y$. The actual decision fonction is $y=1_{x_1^2+x_2^2<1}$.
To have a balanced dataset with about as many points in each class, we will sample uniformly over polar coordinates, within the circle of center 0 and radius 2.
End of explanation
"""
def generate_single_hidden_MLP(n_hidden_neurons):
return nn.Sequential(nn.Linear(2,n_hidden_neurons),nn.ReLU(),nn.Linear(n_hidden_neurons,2))
model1 = generate_single_hidden_MLP(6)
"""
Explanation: Our model will be a multi-layer perceptron with one hidden layer, and an output of size 2 since we have two classes. Since it is a binary classification task we could also use just one output and a zero threshold, but we will use two to illustrate the use of the pytorch Cross-Entropy loss (with one output, you would use BinaryCrossEntropy).
As you know from the lectures, such a model cannot represent a circular boundary but could represent a polygonal boundary, whose number of sides is the number of neurons on the hidden layer. For example, with 6 hidden neurons the model could compute a hexagonal boundary that approximate the unity circle, such as :
Of course the trained model won't compute an actual hexagone, due to the activation that isn't a threshold, and the liberty of the final layer's weights (it does not have to be an AND). We can actually expect better accuracy than what a hexagon could do.
End of explanation
"""
trainx = torch.from_numpy(trainx).float()
valx = torch.from_numpy(valx).float()
testx = torch.from_numpy(testx).float()
trainy = torch.from_numpy(trainy).long()
valy = torch.from_numpy(valy).long()
testy = torch.from_numpy(testy).long()
print(trainx.type(),trainy.type())
"""
Explanation: To train our model, we will need to feed it with tensors. Let's transform our generated numpy arrays :
End of explanation
"""
def training_routine(net,dataset,n_iters,gpu):
# organize the data
train_data,train_labels,val_data,val_labels = dataset
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(),lr=0.01)
# use the flag
if gpu:
train_data,train_labels = train_data.cuda(),train_labels.cuda()
val_data,val_labels = val_data.cuda(),val_labels.cuda()
net = net.cuda() # the network parameters also need to be on the gpu !
print("Using GPU")
else:
print("Using CPU")
for i in range(n_iters):
# forward pass
train_output = net(train_data)
train_loss = criterion(train_output,train_labels)
# backward pass and optimization
train_loss.backward()
optimizer.step()
optimizer.zero_grad()
# Once every 100 iterations, print statistics
if i%100==0:
print("At iteration",i)
# compute the accuracy of the prediction
train_prediction = train_output.cpu().detach().argmax(dim=1)
train_accuracy = (train_prediction.numpy()==train_labels.cpu().numpy()).mean()
# Now for the validation set
val_output = net(val_data)
val_loss = criterion(val_output,val_labels)
# compute the accuracy of the prediction
val_prediction = val_output.cpu().detach().argmax(dim=1)
val_accuracy = (val_prediction.numpy()==val_labels.cpu().numpy()).mean()
print("Training loss :",train_loss.cpu().detach().numpy())
print("Training accuracy :",train_accuracy)
print("Validation loss :",val_loss.cpu().detach().numpy())
print("Validation accuracy :",val_accuracy)
net = net.cpu()
dataset = trainx,trainy,valx,valy
gpu = True
gpu = gpu and torch.cuda.is_available() # to know if you actually can use the GPU
begin = time.time()
training_routine(model1,dataset,10000,gpu)
end=time.time()
print("Training time :",end-begin)
# Let's try with 3 hidden neurons.
model2 = generate_single_hidden_MLP(3)
training_routine(model2,dataset,10000,gpu)
out = model2(testx).argmax(dim=1).detach().numpy()
green = testx.numpy()[np.where(out==1)]
red = testx.numpy()[np.where(out==0)]
print(green.shape,red.shape)
def print_model(model,datapoints):
out = model(datapoints).argmax(dim=1).detach().numpy()
green = datapoints.numpy()[np.where(out==1)]
red = datapoints.numpy()[np.where(out==0)]
circle1 = plt.Circle((0, 0), 1, color='y')
circle2 = plt.Circle((0, 0), 1, color='b',fill=False)
fig, ax = plt.subplots() # note we must use plt.subplots, not plt.subplot
# (or if you have an existing figure)
# fig = plt.gcf()
# ax = fig.gca()
plt.xlim((-2,2))
plt.ylim((-2,2))
pos_values = plt.scatter(x=green[:,0],y=green[:,1], color='g',)
neg_values = plt.scatter(x=red[:,0],y=red[:,1], color='r',)
ax.add_artist(circle1)
ax.add_artist(circle2)
ax.add_artist(pos_values)
ax.add_artist(neg_values)
print_model(model1,testx)
print_model(model2,testx)
model3 = generate_single_hidden_MLP(2)
training_routine(model3,dataset,10000,gpu)
print_model(model3,testx)
"""
Explanation: Now we will define our training routine. There is the question of whether to perform our traing on CPU or GPU. The best thing to do is to use a flag variable that you will set, when you actually do the training.
End of explanation
"""
|
tensorflow/docs-l10n
|
site/ko/guide/variable.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
# Uncomment to see where your variables get placed (see below)
# tf.debugging.set_log_device_placement(True)
"""
Explanation: 변수 소개
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/guide/variable"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서소스 보기</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/variable.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드하기</a></td>
</table>
TensorFlow 변수는 프로그램이 조작하는 공유 영구 상태를 나타내는 권장 방법입니다. 이 가이드는 TensorFlow에서 tf.Variable 인스턴스를 작성, 업데이트 및 관리하는 방법을 설명합니다.
변수는 tf.Variable 클래스를 통해 생성 및 추적됩니다. tf.Variable은 ops를 실행하여 값을 변경할 수 있는 텐서를 나타냅니다. 특정 ops를 사용하면 이 텐서의 값을 읽고 수정할 수 있습니다. tf.keras와 같은 상위 수준의 라이브러리는 tf.Variable을 사용하여 모델 매개변수를 저장합니다.
설정
변수 배치에 대해 설명하는 노트북입니다. 변수가 어떤 기기에 배치되었는지 보려면 이 줄의 주석을 해제하세요.
End of explanation
"""
my_tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]])
my_variable = tf.Variable(my_tensor)
# Variables can be all kinds of types, just like tensors
bool_variable = tf.Variable([False, False, False, True])
complex_variable = tf.Variable([5 + 4j, 6 + 1j])
"""
Explanation: 변수 만들기
변수를 작성하려면 초기값을 제공합니다. tf.Variable은 초기화 값과 같은 dtype을 갖습니다.
End of explanation
"""
print("Shape: ",my_variable.shape)
print("DType: ",my_variable.dtype)
print("As NumPy: ", my_variable.numpy)
"""
Explanation: 변수는 텐서처럼 보이고 작동하며, 실제로 tf.Tensor에서 지원되는 데이터 구조입니다. 텐서와 마찬가지로, dtype과 형상을 가지며 NumPy로 내보낼 수 있습니다.
End of explanation
"""
print("A variable:",my_variable)
print("\nViewed as a tensor:", tf.convert_to_tensor(my_variable))
print("\nIndex of highest value:", tf.argmax(my_variable))
# This creates a new tensor; it does not reshape the variable.
print("\nCopying and reshaping: ", tf.reshape(my_variable, ([1,4])))
"""
Explanation: 변수를 재구성할 수는 없지만, 대부분의 텐서 연산은 예상대로 변수에 대해 작동합니다.
End of explanation
"""
a = tf.Variable([2.0, 3.0])
# This will keep the same dtype, float32
a.assign([1, 2])
# Not allowed as it resizes the variable:
try:
a.assign([1.0, 2.0, 3.0])
except Exception as e:
print(f"{type(e).__name__}: {e}")
"""
Explanation: 위에서 언급했듯이, 변수는 텐서에 의해 지원됩니다. tf.Variable.assign을 사용하여 텐서를 재할당할 수 있습니다. assign을 호출해도 (일반적으로) 새로운 텐서를 할당하지 않고, 대신 기존 텐서의 메모리가 재사용됩니다.
End of explanation
"""
a = tf.Variable([2.0, 3.0])
# Create b based on the value of a
b = tf.Variable(a)
a.assign([5, 6])
# a and b are different
print(a.numpy())
print(b.numpy())
# There are other versions of assign
print(a.assign_add([2,3]).numpy()) # [7. 9.]
print(a.assign_sub([7,9]).numpy()) # [0. 0.]
"""
Explanation: 연산에서 텐서와 같은 변수를 사용하는 경우, 일반적으로 지원 텐서에서 작동합니다.
기존 변수에서 새 변수를 만들면 지원 텐서가 복제됩니다. 두 변수는 같은메모리를 공유하지 않습니다.
End of explanation
"""
# Create a and b; they have the same value but are backed by different tensors.
a = tf.Variable(my_tensor, name="Mark")
# A new variable with the same name, but different value
# Note that the scalar add is broadcast
b = tf.Variable(my_tensor + 1, name="Mark")
# These are elementwise-unequal, despite having the same name
print(a == b)
"""
Explanation: 수명 주기, 이름 지정 및 감시
파이썬 기반 TensorFlow에서 tf.Variable 인스턴스는 다른 Python 객체와 같은 수명 주기를 갖습니다. 변수에 대한 참조가 없으면 자동으로 할당이 해제됩니다.
변수를 추적하고 디버그하는 데 도움이 되는 변수의 이름을 지정할 수도 있습니다. 두 변수에 같은 이름을 지정할 수 있습니다.
End of explanation
"""
step_counter = tf.Variable(1, trainable=False)
"""
Explanation: 모델을 저장하고 로드할 때 변수 이름이 유지됩니다. 기본적으로 모델의 변수는 고유한 변수 이름이 자동으로 지정되므로 원치 않는 한 직접 할당할 필요가 없습니다.
변수는 구별을 위해 중요하지만, 일부 변수는 구별할 필요가 없습니다. 생성 시 trainable을 false로 설정하여 변수의 그래디언트를 끌 수 있습니다. 그래디언트가 필요하지 않은 변수의 예는 훈련 단계 카운터입니다.
End of explanation
"""
with tf.device('CPU:0'):
# Create some tensors
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
c = tf.matmul(a, b)
print(c)
"""
Explanation: 변수 및 텐서 배치하기
더 나은 성능을 위해 TensorFlow는 dtype과 호환되는 가장 빠른 기기에 텐서 및 변수를 배치하려고 시도합니다. 이는 대부분의 변수가 GPU(사용 가능한 경우)에 배치됨을 의미합니다.
그러나 재정의할 수 있습니다. 다음 코드 조각에서는 GPU가 사용 가능한 경우에도 부동 텐서와 변수를 CPU에 배치할 수 있습니다. 기기 배치 로깅을 켜면(설정 참조) 변수가 어디에 배치되었는지 확인할 수 있습니다.
참고: 수동 배치도 가능하지만, 분배 전략을 사용하면 계산을 최적화하는 더 편리하고 확장 가능한 방법이 될 수 있습니다.
GPU가 있거나 없는 서로 다른 백엔드에서 이 노트북을 실행하면 서로 다른 로깅이 표시됩니다. 세션 시작 시 기기 배치 로깅을 켜야 합니다.
End of explanation
"""
with tf.device('CPU:0'):
a = tf.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
b = tf.Variable([[1.0, 2.0, 3.0]])
with tf.device('GPU:0'):
# Element-wise multiply
k = a * b
print(k)
"""
Explanation: 한 기기에서 변수 또는 텐서의 위치를 설정하고 다른 기기에서 계산을 수행할 수 있습니다. 이 경우, 기기 간에 데이터를 복사해야 하므로 지연이 발생합니다.
GPU 작업자가 여러 개이지만 변수의 사본이 하나만 필요한 경우에 수행할 수 있습니다.
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion
|
notebooks/building_production_ml_systems/labs/3_kubeflow_pipelines.ipynb
|
apache-2.0
|
!pip3 install --user kfp --upgrade
"""
Explanation: Kubeflow pipelines
Learning Objectives:
1. Learn how to deploy a Kubeflow cluster on GCP
1. Learn how to create a experiment in Kubeflow
1. Learn how to package you code into a Kubeflow pipeline
1. Learn how to run a Kubeflow pipeline in a repeatable and traceable way
Introduction
In this notebook, we will first setup a Kubeflow cluster on GCP.
Then, we will create a Kubeflow experiment and a Kubflow pipeline from our taxifare machine learning code. At last, we will run the pipeline on the Kubeflow cluster, providing us with a reproducible and traceable way to execute machine learning code.
End of explanation
"""
from os import path
import kfp
import kfp.compiler as compiler
import kfp.components as comp
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.notebook
"""
Explanation: Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
Import libraries and define constants
End of explanation
"""
HOST = "" # TODO: fill in the HOST information for the cluster
"""
Explanation: Setup a Kubeflow cluster on GCP
TODO 1
To deploy a Kubeflow cluster
in your GCP project, use the AI Platform pipelines:
Go to AI Platform Pipelines in the GCP Console.
Create a new instance
Hit "Configure"
Check the box "Allow access to the following Cloud APIs"
Hit "Create Cluster"
Hit "Deploy"
When the cluster is ready, go back to the AI Platform pipelines page and click on "SETTINGS" entry for your cluster.
This will bring up a pop up with code snippets on how to access the cluster
programmatically.
Copy the "host" entry and set the "HOST" variable below with that.
End of explanation
"""
# Change below if necessary
PROJECT = !gcloud config get-value project # noqa: E999
PROJECT = PROJECT[0]
BUCKET = PROJECT # change if needed
CLUSTER = "cluster-1" # change if needed
ZONE = "us-central1-a" # change if needed
NAMESPACE = "default" # change if needed
%env PROJECT=$PROJECT
%env CLUSTER=$CLUSTER
%env ZONE=$ZONE
%env NAMESPACE=$NAMESPACE
# Configure kubectl to connect with the cluster
!gcloud container clusters get-credentials "$CLUSTER" --zone "$ZONE" --project "$PROJECT"
"""
Explanation: Authenticate your KFP cluster with a Kubernetes secret
If you run pipelines that requires calling any GCP services, you need to set the application default credential to a pipeline step by mounting the proper GCP service account token as a Kubernetes secret.
First point your kubectl current context to your cluster. Go back to your Kubeflow cluster dashboard or navigate to Navigation menu > AI Platform > Pipelines and look to see the cluster name, zone and namespace for the pipeline you deployed above. It's likely called cluster-1 if this is the first AI Pipelines you've created.
End of explanation
"""
%%bash
gcloud iam service-accounts keys create application_default_credentials.json \
--iam-account kfpdemo@$PROJECT.iam.gserviceaccount.com
# Check that the key was downloaded correctly.
!ls application_default_credentials.json
# Create a k8s secret. If already exists, override.
!kubectl create secret generic user-gcp-sa \
--from-file=user-gcp-sa.json=application_default_credentials.json \
-n $NAMESPACE --dry-run=client -o yaml | kubectl apply -f -
"""
Explanation: We'll create a service account called kfpdemo with the necessary IAM permissions for our cluster secret. We'll give this service account permissions for any GCP services it might need. This taxifare pipeline needs access to Cloud Storage, so we'll give it the storage.admin role and ml.admin. Open a Cloud Shell and copy/paste this code in the terminal there.
```bash
PROJECT=$(gcloud config get-value project)
Create service account
gcloud iam service-accounts create kfpdemo \
--display-name kfpdemo --project $PROJECT
Grant permissions to the service account by binding roles
gcloud projects add-iam-policy-binding $PROJECT \
--member=serviceAccount:kfpdemo@$PROJECT.iam.gserviceaccount.com \
--role=roles/storage.admin
gcloud projects add-iam-policy-binding $PROJECT \
--member=serviceAccount:kfpdemo@$PROJECT.iam.gserviceaccount.com \
--role=roles/ml.admin
```
Then, we'll create and download a key for this service account and store the service account credential as a Kubernetes secret called user-gcp-sa in the cluster.
End of explanation
"""
client = # TODO: create a Kubeflow client
"""
Explanation: Create an experiment
TODO 2
We will start by creating a Kubeflow client to pilot the Kubeflow cluster:
End of explanation
"""
client.list_experiments()
"""
Explanation: Let's look at the experiments that are running on this cluster. Since you just launched it, you should see only a single "Default" experiment:
End of explanation
"""
exp = # TODO: create an experiment called 'taxifare'
"""
Explanation: Now let's create a 'taxifare' experiment where we could look at all the various runs of our taxifare pipeline:
End of explanation
"""
client.list_experiments()
"""
Explanation: Let's make sure the experiment has been created correctly:
End of explanation
"""
# Builds the taxifare trainer container in case you skipped the optional part
# of lab 1
!taxifare/scripts/build.sh
# Pushes the taxifare trainer container to gcr/io
!taxifare/scripts/push.sh
# Builds the KF component containers and push them to gcr/io
!cd pipelines && make components
"""
Explanation: Packaging your code into Kubeflow components
We have packaged our taxifare ml pipeline into three components:
* ./components/bq2gcs that creates the training and evaluation data from BigQuery and exports it to GCS
* ./components/trainjob that launches the training container on AI-platform and exports the model
* ./components/deploymodel that deploys the trained model to AI-platform as a REST API
Each of these components has been wrapped into a Docker container, in the same way we did with the taxifare training code in the previous lab.
If you inspect the code in these folders, you'll notice that the main.py or main.sh files contain the code we previously executed in the notebooks (loading the data to GCS from BQ, or launching a training job to AI-platform, etc.). The last line in the Dockerfile tells you that these files are executed when the container is run.
So we just packaged our ml code into light container images for reproducibility.
We have made it simple for you to build the container images and push them to the Google Cloud image registry gcr.io in your project:
End of explanation
"""
%%writefile bq2gcs.yaml
name: bq2gcs
description: |
This component creates the training and
validation datasets as BiqQuery tables and export
them into a Google Cloud Storage bucket at
gs://<BUCKET>/taxifare/data.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-bq2gcs you just created
args: ["--bucket", {inputValue: Input Bucket}]
%%writefile trainjob.yaml
name: trainjob
description: |
This component trains a model to predict that taxi fare in NY.
It takes as argument a GCS bucket and expects its training and
eval data to be at gs://<BUCKET>/taxifare/data/ and will export
the trained model at gs://<BUCKET>/taxifare/model/.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-trainjob you just created
args: [{inputValue: Input Bucket}]
%%writefile deploymodel.yaml
name: deploymodel
description: |
This component deploys a trained taxifare model on GCP as taxifare:dnn.
It takes as argument a GCS bucket and expects the model to deploy
to be found at gs://<BUCKET>/taxifare/model/export/savedmodel/
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-deployment you just created
args: [{inputValue: Input Bucket}]
"""
Explanation: Now that the container images are pushed to the registry in your project, we need to create yaml files describing to Kubeflow how to use these containers. It boils down essentially to
* describing what arguments Kubeflow needs to pass to the containers when it runs them
* telling Kubeflow where to fetch the corresponding Docker images
In the cells below, we have three of these "Kubeflow component description files", one for each of our components.
TODO 3
IMPORTANT: Modify the image URI in the cell
below to reflect that you pushed the images into the gcr.io associated with your project.
End of explanation
"""
# TODO 3
PIPELINE_TAR = "taxifare.tar.gz"
BQ2GCS_YAML = "./bq2gcs.yaml"
TRAINJOB_YAML = "./trainjob.yaml"
DEPLOYMODEL_YAML = "./deploymodel.yaml"
@dsl.pipeline(
name="Taxifare",
description="Train a ml model to predict the taxi fare in NY",
)
def pipeline(gcs_bucket_name="<bucket where data and model will be exported>"):
bq2gcs_op = comp.load_component_from_file(BQ2GCS_YAML)
bq2gcs = bq2gcs_op(
input_bucket=gcs_bucket_name,
)
trainjob_op = # TODO: Load the yaml file for training
trainjob = # TODO: Add your code to run the training job
deploymodel_op = # TODO: Load the yaml file for deployment
deploymodel = # TODO: Addd your code to run model deployment
# TODO: Add the code to run 'trainjob' after 'bq2gcs' in the pipeline
# TODO: Add the code to run 'deploymodel' after 'trainjob' in the pipeline
"""
Explanation: Create a Kubeflow pipeline
The code below creates a kubeflow pipeline by decorating a regular function with the
@dsl.pipeline decorator. Now the arguments of this decorated function will be
the input parameters of the Kubeflow pipeline.
Inside the function, we describe the pipeline by
* loading the yaml component files we created above into a Kubeflow op
* specifying the order into which the Kubeflow ops should be run
End of explanation
"""
# TODO: Compile the pipeline functon above
ls $PIPELINE_TAR
"""
Explanation: The pipeline function above is then used by the Kubeflow compiler to create a Kubeflow pipeline artifact that can be either uploaded to the Kubeflow cluster from the UI, or programatically, as we will do below:
End of explanation
"""
# TODO 4
run = client.run_pipeline(
experiment_id= # TODO: Add code for experiment id
job_name= # TODO: Provide a jobname
pipeline_package_path= # TODO: Add code for pipeline zip file
params={
"gcs_bucket_name": BUCKET,
},
)
"""
Explanation: If you untar and uzip this pipeline artifact, you'll see that the compiler has transformed the
Python description of the pipeline into yaml description!
Now let's feed Kubeflow with our pipeline and run it using our client:
End of explanation
"""
|
FordyceLab/AcqPack
|
examples/.ipynb_checkpoints/imaging_and_gui-checkpoint.ipynb
|
mit
|
# test image stack
arr = []
for i in range(50):
b = np.random.rand(500,500)
b= (b*(2**16-1)).astype('uint16')
arr.append(b)
# snap (MPL)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
plt.imshow(img, cmap='gray')
display.clear_output(wait=True)
display.display(plt.gcf())
button.on_click(on_button_clicked)
# snap (CV2)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
cv2.imshow('Video',img)
cv2.waitKey(30)
button.on_click(on_button_clicked)
"""
Explanation: Snap
End of explanation
"""
# test image stack
a = []
for i in range(50):
b = np.zeros((500,500))
b[i:i+100, i:i+100]=1.0
b=b*255
b=b.astype('uint8')
a.append(b)
# video (MPL) (slow, doesn't work well)
# for img in a:
# plt.imshow(img, cmap='gray')
# display.clear_output(wait=True)
# display.display(plt.gcf())
# video (CV2)
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
for img in a:
b = cv2.imshow('Video',img)
cv2.resizeWindow('Video', 500,500)
cv2.moveWindow('Video',0,0)
display.clear_output(wait=True)
print np.random.randn(1)
if cv2.waitKey(30) >= 0:
break
cv2.destroyAllWindows()
# video with button (CV2)
button = widgets.Button(description='Live')
display.display(button)
def on_button_clicked(b):
for img in a:
cv2.imshow('Video',img)
cv2.waitKey(30)
display.clear_output(wait=True)
print np.random.randn(1)
button.on_click(on_button_clicked)
"""
Explanation: Video
http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
End of explanation
"""
button = widgets.ToggleButton(description='Live', value=False)
def on_click(change):
display.clear_output(wait=True)
print change['new']
button.observe(on_click, names='value')
display.display(button)
import time
b1 = widgets.Button(description='b1')
b2 = widgets.Button(description='b2')
def ctrlloop():
def b1_click(b):
for i in range(10):
print 'b1', i
time.sleep(0.5)
def b2_click(b):
for i in range(10):
print 'b2', i
# dl = widgets.jsdlink((button, 'value'), (vid, 'value'))
b1.on_click(b1_click)
b2.on_click(b2_click)
widgets.HBox([b1,b2])
play = widgets.Play(
interval=160,
value=50,
min=0,
max=100,
step=1,
description="Press play",
disabled=False
)
slider = widgets.IntSlider()
widgets.jslink((play, 'value'), (slider, 'value'))
widgets.HBox([play, slider])
f = open('temp.msg','wb')
f.write(str(1))
f.close()
"""
Explanation: GUI and BUTTONS
http://docs.opencv.org/2.4/modules/highgui/doc/user_interface.html
End of explanation
"""
# icons are from "font-awesome"
x_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-left')
x_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-right')
y_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon='arrow-up')
y_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
xy_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
xy_cluster = widgets.VBox([ widgets.HBox([x_minus,x_plus]), widgets.HBox([y_minus, y_plus]) ])
z_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-up')
z_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
z_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
z_cluster = widgets.VBox([ z_minus, z_plus])
widgets.HBox([xy_cluster, xy_slider, z_cluster, z_slider])
"""
Explanation: Arrows
End of explanation
"""
|
mattgiguere/doglodge
|
code/.ipynb_checkpoints/bf_qt_scraping-checkpoint.ipynb
|
mit
|
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *
from lxml import html
class Render(QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.loadFinished.connect(self._loadFinished)
self.mainFrame().load(QUrl(url))
self.app.exec_()
def _loadFinished(self, result):
self.frame = self.mainFrame()
self.app.quit()
def update_url(self, url):
self.mainFrame().load(QUrl(url))
self.app.exec_()
url = 'http://www.bringfido.com/lodging/city/new_haven_ct_us'
#This does the magic.Loads everything
r = Render(url)
#result is a QString.
result = r.frame.toHtml()
# result
#QString should be converted to string before processed by lxml
formatted_result = str(result.toAscii())
#Next build lxml tree from formatted_result
tree = html.fromstring(formatted_result)
tree.text_content
#Now using correct Xpath we are fetching URL of archives
archive_links = tree.xpath('//*[@id="results_list"]/div')
print archive_links
url = 'http://pycoders.com/archive/'
r = Render(url)
result = r.frame.toHtml()
#QString should be converted to string before processed by lxml
formatted_result = str(result.toAscii())
tree = html.fromstring(formatted_result)
#Now using correct Xpath we are fetching URL of archives
archive_links = tree.xpath('//*[@class="campaign"]/a/@href')
# for lnk in archive_links:
# print(lnk)
"""
Explanation: bf_qt_scraping
This notebook describes how hotel data can be scraped using PyQT.
The items we want to extract are:
- the hotels for a given city
- links to each hotel page
- text hotel summary
- text hotel description
Once the links for each hotel are determined, I then want to extract the following items pertaining to each review:
- title
- author
- text
- rating
End of explanation
"""
url = 'http://www.bringfido.com/lodging/city/new_haven_ct_us'
r = Render(url)
result = r.frame.toHtml()
#QString should be converted to string before processed by lxml
formatted_result = str(result.toAscii())
tree = html.fromstring(formatted_result)
#Now using correct Xpath we are fetching URL of archives
archive_links = tree.xpath('//*[@id="results_list"]/div')
print(archive_links)
print('')
for lnk in archive_links:
print(lnk.xpath('div[2]/h1/a/text()')[0])
print(lnk.text_content())
print('*'*25)
"""
Explanation: Now the Hotels
End of explanation
"""
links = []
for lnk in archive_links:
print(lnk.xpath('div/h1/a/@href')[0])
links.append(lnk.xpath('div/h1/a/@href')[0])
print('*'*25)
lnk.xpath('//*/div/h1/a/@href')[0]
links
"""
Explanation: Now Get the Links
End of explanation
"""
url_base = 'http://www.bringfido.com'
r.update_url(url_base+links[0])
result = r.frame.toHtml()
#QString should be converted to string before processed by lxml
formatted_result = str(result.toAscii())
tree = html.fromstring(formatted_result)
hotel_description = tree.xpath('//*[@class="body"]/text()')
details = tree.xpath('//*[@class="address"]/text()')
address = details[0]
csczip = details[1]
phone = details[2]
#Now using correct Xpath we are fetching URL of archives
reviews = tree.xpath('//*[@class="review_container"]')
texts = []
titles = []
authors = []
ratings = []
print(reviews)
print('')
for rev in reviews:
titles.append(rev.xpath('div/div[1]/text()')[0])
authors.append(rev.xpath('div/div[2]/text()')[0])
texts.append(rev.xpath('div/div[3]/text()')[0])
ratings.append(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1])
print(rev.xpath('div[2]/img/@src')[0].split('/')[-1][0:1])
titles
authors
texts
ratings
"""
Explanation: Loading Reviews
Next, we want to step through each page, and scrape the reviews for each hotel.
End of explanation
"""
|
goerlitz/text-mining
|
python/REST-API Content Retriever.ipynb
|
apache-2.0
|
from pymongo import MongoClient
from urllib import urlopen
from jsonpath_rw import jsonpath, parse
from datetime import datetime
import json
import yaml
"""
Explanation: About
Retrieve JSON documents which are accessible via REST API and store them in mongodb.
Prerequesites
A running mongodb instance to store the JSON documents (see docker container).
Python libraries:
pymongo - python bindings for mongodb.
jsonpath_rw - path expressions for matching parts of a JSON document.
A configuration file (config.yaml) with setting for
mongodb instance and collection name
REST API url and parameters
JSON document format
End of explanation
"""
with open('config.yaml') as yaml_file:
cfg = yaml.load(yaml_file)
rest_api = cfg['rest-api']
json_cfg = cfg['json-path']
mongo_db = cfg['mongo']
api_list_items = rest_api['url'] + rest_api['get_list']
api_get_item = rest_api['url'] + rest_api['get_detail']
item_id_field = cfg['json-path']['item_id']
item_list_path = parse(cfg['json-path']['item_list'])
"""
Explanation: Prepare configuration settings
End of explanation
"""
client = MongoClient(mongo_db['url'])
db = client[mongo_db['database']][mongo_db['collection']]
print "%d entries in database." % db.find().count()
"""
Explanation: Prepare database connection
End of explanation
"""
# functions for REST API calls
def get_item_list(offset = 0, limit = 100, url = api_list_items):
request = urlopen(url % (offset, limit))
return json.loads(request.read())
def get_item(id, url = api_get_item):
request = urlopen(url % id)
return json.loads(request.read())
max_items = 10**6
limit=100
for offset in xrange(0, max_items, limit):
print ("%s - fetching items %s - %s" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), offset, offset+limit))
# download item list
result = get_item_list(offset=offset, limit=limit)
item_list = [ item.value for item in item_list_path.find(result) ]
# stop if result list is empty
if len(item_list) == 0:
print "no more results returned"
break
# extract IDs and compare with items already in database
item_ids = [ item[item_id_field] for item in item_list ]
known_ids = [ item[item_id_field] for item in db.find( {item_id_field: { "$in": item_ids }} ) ]
new_ids = [ x for x in item_ids if x not in known_ids ]
print "-> got %d ids (%d known, %d new)" % (len(item_ids), len(known_ids), len(new_ids))
# fetch new items from REST API
items = []
for id in new_ids:
item = get_item(id)
items.append(item)
# insert new items in database
if len(items) != 0:
result = db.insert_many(items)
"""
Explanation: Fetching documents via REST API
End of explanation
"""
|
ToqueWillot/M2DAC
|
FDMS/TME6/TME6_Reco.ipynb
|
gpl-2.0
|
from random import random
import math
import numpy as np
import copy
"""
Explanation: TME4 FDMS Collaborative Filtering
Florian Toqué & Paul Willot
End of explanation
"""
def loadMovieLens(path='./data/movielens'):
#Get movie titles
movies={}
rev_movies={}
for idx,line in enumerate(open(path+'/u.item')):
idx,title=line.split('|')[0:2]
movies[idx]=title
rev_movies[title]=idx
# Load data
prefs={}
for line in open(path+'/u.data'):
(user,movieid,rating,ts)=line.split('\t')
prefs.setdefault(user,{})
prefs[user][movies[movieid]]=float(rating)
return prefs,rev_movies
data,movies = loadMovieLens("data/ml-100k")
"""
Explanation: Loading the data
End of explanation
"""
data['3']
"""
Explanation: Content example
End of explanation
"""
def getRawArray(data):
d = []
for u in data.keys():
for i in data[u].keys():
d.append([u,i,data[u][i]])
return np.array(d)
# splitting while avoiding to reduce the dataset too much
def split_train_test(data,percent_test):
test={}
train={}
movie={}
for u in data.keys():
test.setdefault(u,{})
train.setdefault(u,{})
for movie in data[u]:
#print(data[u][movie])
if (random()<percent_test):
test[u][movie]=data[u][movie]
else:
train[u][movie]=data[u][movie]
return train, test
def split_train_test_by_movies(data,percent_test):
test={}
train={}
movie={}
for u in data.keys():
for movie in data[u]:
if (random()<percent_test):
try:
test[movie][u]=data[u][movie]
except KeyError:
test.setdefault(movie,{})
test[movie][u]=data[u][movie]
else:
try:
train[movie][u]=data[u][movie]
except KeyError:
train.setdefault(movie,{})
train[movie][u]=data[u][movie]
return train, test
percent_test=0.2
train,test=split_train_test(data,percent_test)
"""
Explanation: Splitting data between train/test
We avoid to let unseen data form the train set in the test set.
We also try to minimise the dataset reduction by splitting on each user.
End of explanation
"""
percent_test=0.2
m_train,m_test=split_train_test_by_movies(data,percent_test)
"""
Explanation: split used for convenience on the average by movie baseline
End of explanation
"""
def deleteUnseenInTest(train,test):
for k in test.keys():
try:
train[k]
except KeyError:
test.pop(k,None)
deleteUnseenInTest(train,test)
deleteUnseenInTest(m_train,m_test)
"""
Explanation: cleaning
End of explanation
"""
evalArrayAll = getRawArray(data)
evalArrayTest = getRawArray(test)
evalArrayTest[:10,:10]
"""
Explanation: Matrix used for fast evaluation
End of explanation
"""
class baselineMeanUser:
def __init__(self):
self.users={}
def fit(self,train):
for user in train.keys():
note=0.0
for movie in train[user].keys():
note+=train[user][movie]
note=note/len(train[user])
self.users[user]=note
def predict(self,users):
return [self.users[u] for u in users]
baseline_mu= baselineMeanUser()
baseline_mu.fit(train)
pred = baseline_mu.predict(evalArrayTest[:,0])
print("Mean Error %0.6f" %(
(np.array(pred) - np.array(evalArrayTest[:,2], float)) ** 2).mean())
class baselineMeanMovie:
def __init__(self):
self.movies={}
def fit(self,train):
for movie in train.keys():
note=0.0
for user in train[movie].keys():
note+=train[movie][user]
note=note/len(train[movie])
self.movies[movie]=note
def predict(self,movies):
res=[]
for m in movies:
try:
res.append(self.movies[m])
except:
res.append(3)
return res
baseline_mm= baselineMeanMovie()
baseline_mm.fit(m_train)
pred = baseline_mm.predict(evalArrayTest[:,1])
print("Mean Error %0.6f" %(
(np.array(pred) - np.array(evalArrayTest[:,2], float)) ** 2).mean())
"""
Explanation: Baseline: mean by user
End of explanation
"""
rawMatrix = np.zeros((len(data.keys()),1682))
for u in data:
for m in data[u]:
rawMatrix[int(u)-1][int(movies[m])-1] = data[u][m]
print(np.shape(rawMatrix))
rawMatrix[:10,:10]
"""
Explanation: Raw matrix used for convenience and clarity.
Structure like scipy sparse matrix or python dictionnaries may be used for speedup.
Complete dataset
End of explanation
"""
rawMatrixTrain = np.zeros((len(data.keys()),1682))
for u in train:
for m in train[u]:
rawMatrixTrain[int(u)-1][int(movies[m])-1] = train[u][m]
rawMatrixTest = np.zeros((len(data.keys()),1682))
for u in test:
for m in test[u]:
rawMatrixTest[int(u)-1][int(movies[m])-1] = test[u][m]
"""
Explanation: Train and test dataset
End of explanation
"""
#from scipy import linalg
def nmf(X, latent_features, max_iter=100, eps = 1e-5,printevery=100):
print "NMF with %d latent features, %d iterations."%(latent_features, max_iter)
# mask used to ignore null element (coded by zero)
mask = np.sign(X)
# randomly initialized matrix
rows, columns = X.shape
A = np.random.rand(rows, latent_features)
Y = np.random.rand(latent_features, columns)
# Not used as I couldn't find significant improvments
#Y = linalg.lstsq(A, X)[0] # initializing that way as recommanded in a blog post
#Y = np.maximum(Y, eps) # avoiding too low values
masked_X = mask * X
masktest = np.sign(rawMatrixTest) # used for prints
masktrain = np.sign(rawMatrixTrain) # used for prints
for i in range(1, max_iter + 1):
top = np.dot(masked_X, Y.T)
bottom = (np.dot((mask * np.dot(A, Y)), Y.T)) + eps
A *= top / bottom
top = np.dot(A.T, masked_X)
bottom = np.dot(A.T, mask * np.dot(A, Y)) + eps
Y *= top / bottom
# evaluation
if i % printevery == 0 or i == 1 or i == max_iter:
X_est = np.dot(A, Y)
q = masktest*X_est - rawMatrixTest
q_train = masktrain*X_est - rawMatrixTrain
print "Iteration %d, Err %.05f, Err train %.05f"%( i, (q*q).sum()/ masktest.sum(), (q_train*q_train).sum()/ masktest.sum() )
return A, Y
%%time
A,Y = nmf(rawMatrixTrain,100,eps = 1e-5,max_iter=5,printevery=1)
resMatrix = A.dot(Y)
"""
Explanation: Non-negative Matrix Factorization
Fast implementation using numpy's matrix processing.
End of explanation
"""
%%time
A,Y = nmf(rawMatrixTrain,100,eps = 1e-5,max_iter=500,printevery=100)
resMatrix = A.dot(Y)
"""
Explanation: We see that it quickly get better than the baseline.
However, we see below that it overfit after that:
End of explanation
"""
%%time
A,Y = nmf(rawMatrixTrain,1,eps = 1e-5,max_iter=100,printevery=20)
resMatrix = A.dot(Y)
"""
Explanation: This is due to the high sparsity of the matrix.
We can of course reduce the features matrix size to avoid overfitting, but that will limit further improvments.
End of explanation
"""
## This class is used to make predictions
class evalMF:
def __init__(self,resMatrix,dicU,dicI):
self.resMatrix=resMatrix
self.dicU = dicU
self.dicI = dicI
def fit(self):
pass
def predict(self,user,movie):
return self.resMatrix[int(user)-1][int(self.dicI[movie])-1]
mf = evalMF(resMatrix,data,movies)
# np.array([ (float(ra[2]) - mf.predict(ra[0],ra[1]))**2 for ra in evalArrayTest]).mean()
# faster evaluation
masqueTest=np.sign(rawMatrixTest)
q = masqueTest*resMatrix - rawMatrixTest
(q*q).sum()/ masqueTest.sum()
print data["1"]["Akira (1988)"]
print mf.predict("1","Akira (1988)")
print data["1"]["I.Q. (1994)"]
print mf.predict("1","I.Q. (1994)")
"""
Explanation: Despite good results in few seconds on this dataset, this can only get us so far.
We then have to add regularization to the cost function.
Evaluation
End of explanation
"""
summ=0
for i in data["1"]:
summ+=(float(data["1"][i]) - mf.predict("1",i))**2
summ/len(data["1"])
summ=0
for i in data["3"]:
summ+=(float(data["3"][i]) - mf.predict("3",i))**2
summ/len(data["3"])
"""
Explanation: We usualy see an important difference between users, so we need to take the bias into account.
End of explanation
"""
#self.lamb*np.linalg.norm(self.theta)
#from scipy import linalg
def nmf(X, latent_features, max_iter=100, eps = 1e-5, printevery=100):
print "NMF with %d latent features, %d iterations."%(latent_features, max_iter)
#lamb = 0.2
## User and Item bais
#X = copy.deepcopy(rawMatrix)
#with np.errstate(all='ignore'):
#avg_m = X.sum(0)/(X != 0).sum(0)
#avg_u = X.sum(1)/(X != 0).sum(1)
#diff_m = avg_m - avg_m.mean()
#diff_u = avg_u - avg_u.mean()
#print(avg_u.mean())
#X = X - diff_m
#for idxi,i in enumerate(X):
# for idxj,j in enumerate(i):
# if X[idxi,idxj]!=0:
# X[idxi,idxj]+=diff_u[idxi]
mask = np.sign(X)
rows, columns = X.shape
A = np.random.rand(rows, latent_features)
Y = np.random.rand(latent_features, columns)
# Not used as I couldn't find significant improvments
#Y = linalg.lstsq(A, X)[0] # initializing that way as recommanded in a blog post
#Y = np.maximum(Y, eps) # avoiding too low values
masked_X = mask * X
masktest = np.sign(rawMatrixTest) # used for prints
masktrain = np.sign(rawMatrixTrain) # used for prints
prev_A = A
prev_Y = Y
#diff_u = (avg_u - avg_u.mean().T).T
#(np.array([1,5])-mat.T).T
for i in range(1, max_iter + 1):
top = np.dot(masked_X, Y.T)
esti = np.dot((mask * np.dot(A, Y)), Y.T)
#esti = esti - diff_u
bottom = esti + eps
#print("val",np.shape(top/bottom))
A *= top / bottom
top = np.dot(A.T, masked_X)
esti = np.dot(A.T, mask * np.dot(A, Y))
#esti = esti - diff_m
bottom = esti + eps
#print("lav",np.shape(top/bottom))
tb = top / bottom
#print(np.linalg.norm(tb))
no = np.linalg.norm(tb)
#Y *= tb
Y *= (0.9 * tb) + ( 0.1 * ( tb + (1/no) ) )
"""
## Regularization
if i % 10 == 0:
diff = np.abs(Y - prev_Y)
diff = diff - 0.001
Y = np.sign(diff)*Y
prev_Y = Y
"""
# evaluation
if i % 10 == 0 or i == 1 or i == max_iter:
X_est = np.dot(A, Y)
q = masktest*X_est - rawMatrixTest
q_train = masktrain*X_est - rawMatrixTrain
print(np.linalg.norm(tb))
print "Iteration %d, Err %.05f, Err train %.05f"%( i, (q*q).sum()/ masktest.sum(), (q_train*q_train).sum()/ masktest.sum() )
return A, Y
"""
Explanation: Various atempts to incoporate the bias and the L1 regularization can be found below.
However, we have not been very successful with them yet...
A simpler yet slower model can be found at the bottom of the page, in which the bias and L1 regularization can be added easly.
End of explanation
"""
%%time
X = copy.deepcopy(rawMatrixTrain)
A,Y = nmf(X,1,eps = 1e-5,max_iter=100,printevery=10)
resMatrix = A.dot(Y)
"""
Explanation: This is quite unstable
End of explanation
"""
with np.errstate(all='ignore'):
avg_m = rawMatrix.sum(0)/(rawMatrix != 0).sum(0)
avg_u = rawMatrix.sum(1)/(rawMatrix != 0).sum(1)
"""
Explanation: /!\ 18 movies have no ratings at all
so we get a divide by zero warning. Ignored with:
End of explanation
"""
masqueTest=np.sign(rawMatrixTest)
q = masqueTest*resMatrix - rawMatrixTest
(q*q).sum()/ masqueTest.sum()
mf = evalMF(resMatrix,data,movies)
print data["1"]["Akira (1988)"]
print mf.predict("1","Akira (1988)")
print data["1"]["All Dogs Go to Heaven 2 (1996)"]
print mf.predict("1","All Dogs Go to Heaven 2 (1996)")
len(rawMatrixTest)
t = []
c = 10
for idxi,i in enumerate(rawMatrixTest):
for idxj,j in enumerate(i):
if rawMatrixTest[idxi][idxj] != 0:
t.append( (resMatrix[idxi][idxj] - float(rawMatrixTest[idxi][idxj]))**2 )
if c>0:
print(rawMatrixTest[idxi][idxj],resMatrix[idxi][idxj])
c-=1
np.array(t).mean()
"""
Explanation:
End of explanation
"""
R = rawMatrixTrain
def matrix_factorization(R, K, steps=100, eps=0.0001, beta=0.02, decay=0.95):
N,M = np.shape(R)
P = np.random.rand(N,K)
#P = np.maximum(P, eps)
#Q = np.random.rand(M,K).T
Q = linalg.lstsq(P, R)[0]
Q = np.maximum(Q, eps)
#masked_X = mask * X
#X_est_prev = dot(A, Y)
#mask = np.sign(R)
#masked_R = mask * R
masktest = np.sign(rawMatrixTest)
masktrain = np.sign(rawMatrixTrain)
for step in xrange(1,steps+1):
#"""
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - np.dot(P[i,:],Q[:,j])
P[i] = P[i] + eps * (2 * eij * Q.T[j] - beta * P[i])
#Q[i] = P[i] + eps * (2 * eij * Q.T[j] - beta * P[i])
Q.T[j] = Q.T[j] + eps * (2 * eij * P[i] - beta * Q.T[j])
#for k in xrange(K):
# P[i][k] = P[i][k] + eps * (2 * eij * Q[k][j] - beta * P[i][k])
#Q[k][j] = Q[k][j] + eps * (2 * eij * P[i][k] - beta * Q[k][j])
if step%5:
eps=eps*decay
if step % 10 == 0 or step == 1 or step == steps:
X_est = dot(P, Q)
q = masktest*X_est - rawMatrixTest
q_train = masktrain*X_est - rawMatrixTrain
print "Iteration %d, Err %.05f, Err on train %.05f"%( step, (q*q).sum()/ masktest.sum(), (q_train*q_train).sum()/ masktest.sum() )
return P, Q.T
%%time
K = 10
nP, nQ = matrix_factorization(R, K, steps=100,eps=1e-5)
nR = np.dot(nP, nQ.T)
((nR-R)**2).sum()/np.sign(R).sum()
"""
Explanation:
End of explanation
"""
def matrix_factorization(R, K, steps=100, eps=0.0001, beta=0.02, decay=0.95):
N,M = np.shape(R)
P = np.random.rand(N,K)
Q = linalg.lstsq(P, R)[0]
Q = np.maximum(Q, eps)
masktest = np.sign(rawMatrixTest)
masktrain = np.sign(rawMatrixTrain)
for step in xrange(1,steps+1):
for i in xrange(len(R)):
for j in xrange(len(R[i])):
if R[i][j] > 0:
eij = R[i][j] - np.dot(P[i,:],Q[:,j])
P[i] = P[i] + eps * (2 * eij * Q.T[j] - beta * P[i])
Q.T[j] = Q.T[j] + eps * (2 * eij * P[i] - beta * Q.T[j])
if step%5:
eps=eps*decay
if step % 10 == 0 or step == 1 or step == steps:
X_est = dot(P, Q)
q = masktest*X_est - rawMatrixTest
q_train = masktrain*X_est - rawMatrixTrain
print "Iteration %d, Err %.05f, Err on train %.05f"%( step, (q*q).sum()/ masktest.sum(), (q_train*q_train).sum()/ masktest.sum() )
return P, Q.T
"""
Explanation:
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/computer_vision_fun/solutions/classifying_images_using_dropout_and_batchnorm_layer.ipynb
|
apache-2.0
|
import tensorflow as tf
print(tf.version.VERSION)
"""
Explanation: Classifying Images using Dropout and Batchnorm Layer
Introduction
In this notebook, you learn how to build a neural network to classify the tf-flowers dataset using dropout and batchnorm layer.
Learning objectives
Define Helper Functions.
Apply dropout and batchnorm layer.
Each learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.
End of explanation
"""
# Helper functions
def training_plot(metrics, history):
f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5))
for idx, metric in enumerate(metrics):
ax[idx].plot(history.history[metric], ls='dashed')
ax[idx].set_xlabel("Epochs")
ax[idx].set_ylabel(metric)
ax[idx].plot(history.history['val_' + metric]);
ax[idx].legend([metric, 'val_' + metric])
# Call model.predict() on a few images in the evaluation dataset
def plot_predictions(filename):
f, ax = plt.subplots(3, 5, figsize=(25,15))
dataset = (tf.data.TextLineDataset(filename).
map(decode_csv))
for idx, (img, label) in enumerate(dataset.take(15)):
ax[idx//5, idx%5].imshow((img.numpy()));
batch_image = tf.reshape(img, [1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
batch_pred = model.predict(batch_image)
pred = batch_pred[0]
label = CLASS_NAMES[label.numpy()]
pred_label_index = tf.math.argmax(pred).numpy()
pred_label = CLASS_NAMES[pred_label_index]
prob = pred[pred_label_index]
ax[idx//5, idx%5].set_title('{}: {} ({:.4f})'.format(label, pred_label, prob))
def show_trained_weights(model):
# CLASS_NAMES is ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips']
LAYER = 1 # Layer 0 flattens the image, layer=1 is the first dense layer
WEIGHT_TYPE = 0 # 0 for weight, 1 for bias
f, ax = plt.subplots(1, 5, figsize=(15,15))
for flower in range(len(CLASS_NAMES)):
weights = model.layers[LAYER].get_weights()[WEIGHT_TYPE][:, flower]
min_wt = tf.math.reduce_min(weights).numpy()
max_wt = tf.math.reduce_max(weights).numpy()
flower_name = CLASS_NAMES[flower]
print("Scaling weights for {} in {} to {}".format(
flower_name, min_wt, max_wt))
weights = (weights - min_wt)/(max_wt - min_wt)
ax[flower].imshow(weights.reshape(IMG_HEIGHT, IMG_WIDTH, 3));
ax[flower].set_title(flower_name);
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
def read_and_decode(filename, reshape_dims):
# Read the file
img = tf.io.read_file(filename)
# Convert the compressed string to a 3D uint8 tensor.
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# TODO 1: Resize the image to the desired size.
return tf.image.resize(img, reshape_dims)
CLASS_NAMES = [item.numpy().decode("utf-8") for item in
tf.strings.regex_replace(
tf.io.gfile.glob("gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/*"),
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/", "")]
CLASS_NAMES = [item for item in CLASS_NAMES if item.find(".") == -1]
print("These are the available classes:", CLASS_NAMES)
# the label is the index into CLASS_NAMES array
def decode_csv(csv_row):
record_defaults = ["path", "flower"]
filename, label_string = tf.io.decode_csv(csv_row, record_defaults)
img = read_and_decode(filename, [IMG_HEIGHT, IMG_WIDTH])
label = tf.argmax(tf.math.equal(CLASS_NAMES, label_string))
return img, label
"""
Explanation: Define Helper Functions
Reading and Preprocessing image data
End of explanation
"""
def train_and_evaluate(batch_size = 32,
lrate = 0.0001,
l1 = 0,
l2 = 0.001,
dropout_prob = 0.4,
num_hidden = [64, 16]):
regularizer = tf.keras.regularizers.l1_l2(l1, l2)
train_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/train_set.csv").
map(decode_csv)).batch(batch_size)
eval_dataset = (tf.data.TextLineDataset(
"gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv").
map(decode_csv)).batch(32) # this doesn't matter
# NN with multiple hidden layers
layers = [tf.keras.layers.Flatten(
input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),
name='input_pixels')]
for hno, nodes in enumerate(num_hidden):
layers.extend([
tf.keras.layers.Dense(nodes,
kernel_regularizer=regularizer,
name='hidden_dense_{}'.format(hno)),
tf.keras.layers.BatchNormalization(scale=False, # ReLU
center=False, # have bias in Dense
name='batchnorm_dense_{}'.format(hno)),
#move activation to come after batchnorm
tf.keras.layers.Activation('relu', name='relu_dense_{}'.format(hno)),
# TODO 2: Apply Dropout to the input
tf.keras.layers.Dropout(rate=dropout_prob,
name='dropout_dense_{}'.format(hno)),
])
layers.append(
tf.keras.layers.Dense(len(CLASS_NAMES),
kernel_regularizer=regularizer,
activation='softmax',
name='flower_prob')
)
model = tf.keras.Sequential(layers, name='flower_classification')
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lrate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False),
metrics=['accuracy'])
print(model.summary())
history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10)
training_plot(['loss', 'accuracy'], history)
return model
model = train_and_evaluate(dropout_prob=0.4)
"""
Explanation: Apply dropout and batchnorm layer
A deep neural network (DNN) is a neural network with more than one hidden layer. Each time you add a layer, the number of trainable parameters increases. Therefore,you need a larger dataset. You still have only 3700 flower images which might cause overfitting.
Dropouts are the regularization technique that is used to prevent overfitting in the model. Batch normalization is a layer that allows every layer of the network to do learning more independently. The layer is added to the sequential model to standardize the input or the outputs. Add a dropout and batchnorm layer after each of the hidden layers.
Dropout
Dropout is one of the oldest regularization techniques in deep learning. At each training iteration, it drops random neurons from the network with a probability p (typically 25% to 50%). In practice, neuron outputs are set to 0. The net result is that these neurons will not participate in the loss computation this time around and they will not get weight updates. Different neurons will be dropped at each training iteration.
Batch normalization
Our input pixel values are in the range [0,1] and this is compatible with the dynamic range of the typical activation functions and optimizers. However, once we add a hidden layer, the resulting output values will no longer lie in the dynamic range of the activation function for subsequent layers. When this happens, the neuron output is zero, and because there is no difference by moving a small amount in either direction, the gradient is zero. There is no way for the network to escape from the dead zone. To fix this, batch norm normalizes neuron outputs across a training batch of data, i.e. it subtracts the average and divides by the standard deviation. This way, the network decides, through machine learning, how much centering and re-scaling to apply at each neuron. In Keras, you can selectively use one or the other:
tf.keras.layers.BatchNormalization(scale=False, center=True)
When using batch normalization, remember that:
1. Batch normalization goes between the output of a layer and its activation function. So, rather than set activation='relu' in the Dense layer’s constructor, we’d omit the activation function, and then add a separate Activation layer.
2. If you use center=True in batch norm, you do not need biases in your layer. The batch norm offset plays the role of a bias.
3. If you use an activation function that is scale-invariant (i.e. does not change shape if you zoom in on it) then you can set scale=False. ReLu is scale-invariant. Sigmoid is not.
End of explanation
"""
|
danielhomola/boruta_py
|
boruta/examples/Madalon_Data_Set.ipynb
|
bsd-3-clause
|
# Installation
#!pip install boruta
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
from boruta import BorutaPy
def load_data():
# URLS for dataset via UCI
train_data_url='https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.data'
train_label_url='https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.labels'
X_data = pd.read_csv(train_data_url, sep=" ", header=None)
y_data = pd.read_csv(train_label_url, sep=" ", header=None)
data = X_data.loc[:, :499]
data['target'] = y_data[0]
return data
data = load_data()
data.head()
y = data.pop('target')
X = data.copy().values
"""
Explanation: Using Boruta on the Madalon Data Set
Author: Mike Bernico
This example demonstrates using Boruta to find all relevant features in the Madalon dataset, which is an artificial dataset used in NIPS2003 and cited in the Boruta paper
This dataset has 2000 observations and 500 features. We will use Boruta to identify the features that are relevant to the classification task.
End of explanation
"""
rf = RandomForestClassifier(n_jobs=-1, class_weight=None, max_depth=7, random_state=0)
# Define Boruta feature selection method
feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=0)
"""
Explanation: Boruta conforms to the sklearn api and can be used in a Pipeline as well as on it's own. Here we will demonstrate stand alone operation.
First we will instantiate an estimator that Boruta will use. Then we will instantiate a Boruta Object.
End of explanation
"""
feat_selector.fit(X, y)
"""
Explanation: Once built, we can use this object to identify the relevant features in our dataset.
End of explanation
"""
# Check selected features
print(feat_selector.support_)
# Select the chosen features from our dataframe.
selected = X[:, feat_selector.support_]
print ("")
print ("Selected Feature Matrix Shape")
print (selected.shape)
"""
Explanation: Boruta has confirmed only a few features as useful. When our run ended, Boruta was undecided on 2 features. '
We can interrogate .support_ to understand which features were selected. .support_ returns an array of booleans that we can use to slice our feature matrix to include only relevant columns. Of course, .transform can also be used, as expected in the scikit API.
End of explanation
"""
feat_selector.ranking_
"""
Explanation: We can also interrogate the ranking of the unselected features with .ranking_
End of explanation
"""
|
dietmarw/EK5312_ElectricalMachines
|
Chapman/Ch9-Problem_9-02.ipynb
|
unlicense
|
%pylab notebook
%precision %.4g
"""
Explanation: Excercises Electric Machinery Fundamentals
Chapter 9
Problem 9-2
End of explanation
"""
V = 120 # [V]
p = 4
R1 = 2.0 # [Ohm]
R2 = 2.8 # [Ohm]
X1 = 2.56 # [Ohm]
X2 = 2.56 # [Ohm]
Xm = 60.5 # [Ohm]
s = 0.025
Prot = 51 # [W]
"""
Explanation: Description
Repeat Problem 9-1 for a rotor slip of 0.025.
End of explanation
"""
Zf = ((R2/s + X2*1j)*(Xm*1j)) / (R2/s + X2*1j + Xm*1j)
Zf
"""
Explanation: SOLUTION
The impedances $Z_F$ and $Z_B$ are:
$$Z_F = \frac{(R_2/s + jX_2)(jX_M)}{R_2/s + jX_2 + jX_M}$$
End of explanation
"""
Zb = ((R2/(2-s) + X2*1j)*(Xm*1j)) / (R2/(2-s) + X2*1j + Xm*1j)
Zb
"""
Explanation: $$Z_B = \frac{(R_2/(2-s) + jX_2)(jX_M)}{R_2/(2-s) + jX_2 + jX_M}$$
End of explanation
"""
I1 = V / (R1 +X1*1j + 0.5*Zf + 0.5*Zb)
I1_angle = arctan(I1.imag/I1.real)
print('I1 = {:.3f} V ∠{:.1f}°'.format(abs(I1), I1_angle/pi*180))
Pin = V*abs(I1)*cos(I1_angle)
print('''
Pin = {:.1f} W
============='''.format(Pin))
"""
Explanation: (a)
The input current is:
$$\vec{I}_1 = \frac{\vec{V}}{R_1 + jX_1 + 0.5Z_F + 0.5Z_B}$$
End of explanation
"""
Pag_f = abs(I1)**2*0.5*Zf.real
Pag_f
Pag_b = abs(I1)**2*0.5*Zb.real
Pag_b
Pag = Pag_f - Pag_b
print('''
Pag = {:.1f} W
============='''.format(Pag))
"""
Explanation: (b)
The air-gap power is:
End of explanation
"""
Pconv_f = (1-s)*Pag_f
Pconv_f
Pconv_b = (1-s)*Pag_b
Pconv_b
Pconv = Pconv_f - Pconv_b
print('''
Pconv = {:.1f} W
==============='''.format(Pconv))
"""
Explanation: (c)
The power converted from electrical to mechanical form is:
End of explanation
"""
Pout = Pconv - Prot
print('''
Pout = {:.1f} W
=============='''.format(Pout))
"""
Explanation: (d)
The output power is:
End of explanation
"""
n_sync = 1800.0 # [r/min]
w_sync = n_sync * (2.0*pi/1.0) * (1.0/60.0)
tau_ind = Pag / w_sync
print('''
τ_ind = {:.3f} Nm
================'''.format(tau_ind))
"""
Explanation: (e)
The induced torque is
$$\tau_\text{ind} = \frac{P_\text{AG}}{\omega_\text{sync}}$$
End of explanation
"""
w_m = (1-s)*w_sync
tau_load = Pout / w_m
print('''
τ_load = {:.3f} Nm
================='''.format(tau_load))
"""
Explanation: (f)
The load torque is:
$$\tau_\text{load} = \frac{P_\text{out}}{\omega_m}$$
End of explanation
"""
eta = Pout/Pin
print('''
η = {:.1f} %
=========='''.format(eta*100))
"""
Explanation: (g)
The overall efficiency is:
$$\eta = \frac{P_\text{out}}{P_\text{in}} \cdot 100\%$$
End of explanation
"""
PF = cos(I1_angle)
print('''
PF = {:.3f} lagging
=================='''.format(PF))
"""
Explanation: (h)
The stator power factor is:
End of explanation
"""
|
NREL/bifacial_radiance
|
docs/tutorials/16 - AgriPV - 3-up and 4-up collector optimization.ipynb
|
bsd-3-clause
|
import os
from pathlib import Path
testfolder = Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_16'
if not os.path.exists(testfolder):
os.makedirs(testfolder)
print ("Your simulation will be stored in %s" % testfolder)
import bifacial_radiance
import numpy as np
rad_obj = bifacial_radiance.RadianceObj('tutorial_16', str(testfolder))
"""
Explanation: 16 - AgriPV - 3-up and 4-up collector optimization
This journal helps the exploration of varying collector widths and xgaps in the ground underneath as well as on the rear irradiance for bifacial AgriPV. The optimization varies the numpanels combinations with xgaps for having 3-up and 4-up collectors with varying space along the row (xgap). The actual raytracing is not performed in the jupyter journal but rather on the HPC, but the geometry is the same as presented here.
The steps on this journal:
<ol>
<li> <a href='#step1'> Making Collectors for each number panel and xgap case </a></li>
<li> <a href='#step2'> Builds the Scene so it can be viewed with rvu </a></li>
An area of 40m x 20 m area is sampled on the HPC, and is highlighted in the visualizations below with an appended terrain of 'litesoil'. The image below shows the two extremes of the variables optimized and the raytrace results, including the worst-case shading experienced under the array ( 100 - min_irradiance *100 / GHI).

End of explanation
"""
x = 2
y = 1
ygap = 0.1524 # m = 6 in
zgap = 0.002 # m, veyr little gap to torquetube.
tubeParams = {'diameter':0.15,
'tubetype':'square',
'material':'Metal_Grey',
'axisofrotation':True,
'visible': True}
ft2m = 0.3048
xgaps = [3, 4, 6, 9, 12, 15, 18, 21]
numpanelss = [3, 4]
# Loops
for ii in range(0, len(numpanelss)):
numpanels = numpanelss[ii]
for jj in range(0, len(xgaps)):
xgap = xgaps[jj]*ft2m
moduletype = 'test-module_'+str(numpanels)+'up_'+str(round(xgap,1))+'xgap'
rad_obj.makeModule(moduletype,
x=x, y=y,
xgap=xgap, zgap=zgap, ygap = ygap, numpanels=numpanels,
tubeParams=tubeParams)
"""
Explanation: <a id='step1'></a>
1. Making Collectors for each number panel and xgap case
End of explanation
"""
xgaps = np.round(np.array([3, 4, 6, 9, 12, 15, 18, 21]) * ft2m,1)
numpanelss = [3, 4]
sensorsxs = np.array(list(range(0, 201)))
# Select CASE:
xgap = np.round(xgaps[-1],1)
numpanels = 4
# All the rest
ft2m = 0.3048
hub_height = 8.0 * ft2m
y = 1
pitch = 0.001 # If I recall, it doesn't like when pitch is 0 even if it's a single row, but any value works here.
ygap = 0.15
tilt = 18
sim_name = ('Coffee_'+str(numpanels)+'up_'+
str(round(xgap,1))+'_xgap')
albedo = 0.35 # Grass value from Torres Molina, "Measuring UHI in Puerto Rico" 18th LACCEI
# International Multi-Conference for Engineering, Education, and Technology
azimuth = 180
if numpanels == 3:
nMods = 9
if numpanels == 4:
nMods = 7
nRows = 1
moduletype = 'test-module_'+str(numpanels)+'up_'+str(round(xgap,1))+'xgap'
rad_obj.setGround(albedo)
lat = 18.202142
lon = -66.759187
metfile = rad_obj.getEPW(lat,lon)
rad_obj.readWeatherFile(metfile)
sceneDict = {'tilt':tilt,'pitch':pitch,'hub_height':hub_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows}
scene = rad_obj.makeScene(module=moduletype,sceneDict=sceneDict, radname = sim_name)
rad_obj.gendaylit(4020)
octfile = rad_obj.makeOct(filelist = rad_obj.getfilelist(), octname = rad_obj.basename)
name='SampleArea'
text='! genbox litesoil cuteBox 40 20 0.01 | xform -t -20 -10 0.01'
customObject =rad_obj.makeCustomObject(name,text)
rad_obj.appendtoScene(scene.radfiles, customObject, '!xform -rz 0')
octfile = rad_obj.makeOct(rad_obj.getfilelist())
"""
Explanation: <a id='step2'></a>
2. Build the Scene so it can be viewed with rvu
End of explanation
"""
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 makemod.oct
#!rvu -vf views\front.vp -e .0265652 -vp 5 0 70 -vd 0 0.0001 -1 makemod.oct
"""
Explanation: To View the generated Scene, you can navigate to the testfolder on a terminal and use:
<b>front view:<b>
rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 makemod.oct
<b> top view: </b>
rvu -vf views\front.vp -e .0265652 -vp 5 0 70 -vd 0 0.0001 -1 makemod.oct
Or run it directly from Jupyter by removing the comment from the following cell:
End of explanation
"""
|
fastai/fastai
|
dev_nbs/course/lesson6-rossmann.ipynb
|
apache-2.0
|
path = Config().data/'rossmann'
train_df = pd.read_pickle(path/'train_clean')
train_df.head().T
n = len(train_df); n
"""
Explanation: Rossmann
Data preparation
To create the feature-engineered train_clean and test_clean from the Kaggle competition data, run rossman_data_clean.ipynb. One important step that deals with time series is this:
python
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
End of explanation
"""
idx = np.random.permutation(range(n))[:2000]
idx.sort()
small_df = train_df.iloc[idx]
small_cont_vars = ['CompetitionDistance', 'Mean_Humidity']
small_cat_vars = ['Store', 'DayOfWeek', 'PromoInterval']
small_df = small_df[small_cat_vars + small_cont_vars + ['Sales']].reset_index(drop=True)
small_df.head()
small_df.iloc[1000:].head()
splits = [list(range(1000)),list(range(1000,2000))]
to = TabularPandas(small_df.copy(), Categorify, cat_names=small_cat_vars, cont_names=small_cont_vars, splits=splits)
to.train.items.head()
to.valid.items.head()
to.classes['DayOfWeek']
splits = [list(range(1000)),list(range(1000,2000))]
to = TabularPandas(small_df.copy(), FillMissing, cat_names=small_cat_vars, cont_names=small_cont_vars, splits=splits)
to.train.items[to.train.items['CompetitionDistance_na'] == True]
"""
Explanation: Experimenting with a sample
End of explanation
"""
train_df = pd.read_pickle(path/'train_clean')
test_df = pd.read_pickle(path/'test_clean')
len(train_df),len(test_df)
procs=[FillMissing, Categorify, Normalize]
dep_var = 'Sales'
cat_names = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'StoreType', 'Assortment',
'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear', 'State', 'Week', 'Events', 'Promo_fw',
'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw', 'SchoolHoliday_fw', 'SchoolHoliday_bw']
cont_names = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h', 'Mean_Wind_SpeedKm_h',
'CloudCover', 'trend', 'trend_DE', 'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
dep_var = 'Sales'
df = train_df[cat_names + cont_names + [dep_var,'Date']].copy()
test_df['Date'].min(), test_df['Date'].max()
cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max()
cut
splits = (list(range(cut, len(train_df))),list(range(cut)))
train_df[dep_var].head()
train_df[dep_var] = np.log(train_df[dep_var])
#train_df = train_df.iloc[:100000]
#cut = 20000
splits = (list(range(cut, len(train_df))),list(range(cut)))
%time to = TabularPandas(train_df, procs, cat_names, cont_names, dep_var, y_block=TransformBlock(), splits=splits)
dls = to.dataloaders(bs=512, path=path)
dls.show_batch()
"""
Explanation: Preparing full data set
End of explanation
"""
max_log_y = np.log(1.2) + np.max(train_df['Sales'])
y_range = (0, max_log_y)
dls.c = 1
learn = tabular_learner(dls, layers=[1000,500], loss_func=MSELossFlat(),
config=tabular_config(ps=[0.001,0.01], embed_p=0.04, y_range=y_range),
metrics=exp_rmspe)
learn.model
len(dls.train_ds.cont_names)
learn.lr_find()
learn.fit_one_cycle(5, 3e-3, wd=0.2)
"""
Explanation: Model
End of explanation
"""
learn.recorder.plot_loss(skip_start=1000)
"""
Explanation: (10th place in the competition was 0.108)
End of explanation
"""
test_to = to.new(test_df)
test_to.process()
test_dls = test_to.dataloaders(bs=512, path=path, shuffle_train=False)
learn.metrics=[]
tst_preds,_ = learn.get_preds(dl=test_dls.train)
np.exp(tst_preds.numpy()).T.shape
test_df["Sales"]=np.exp(tst_preds.numpy()).T[0]
test_df[["Id","Sales"]] = test_df[["Id","Sales"]].astype("int")
test_df[["Id","Sales"]].to_csv("rossmann_submission.csv",index=False)
"""
Explanation: (10th place in the competition was 0.108)
Inference on the test set
End of explanation
"""
|
mattilyra/gensim
|
docs/notebooks/wikinews-bigram-en.ipynb
|
lgpl-2.1
|
LANG="english"
%%bash
fdate=20170327
fname=enwikinews-$fdate-cirrussearch-content.json.gz
if [ ! -e $fname ]
then
wget "https://dumps.wikimedia.org/other/cirrussearch/$fdate/$fname"
fi
# iterator
import gzip
import json
FDATE = 20170327
FNAME = "enwikinews-%s-cirrussearch-content.json.gz" % FDATE
def iter_texts(fpath=FNAME):
with gzip.open(fpath, "rt") as f:
for l in f:
data = json.loads(l)
if "title" in data:
yield data["title"]
yield data["text"]
# also prepare nltk
import nltk
nltk.download("punkt")
nltk.download("stopwords")
"""
Explanation: Illustrating common terms usage using Wikinews in english
getting data
We get the cirrussearch dump of wikinews (a dump meant for elastic-search indexation).
End of explanation
"""
# make a custom tokenizer
import re
from nltk.tokenize import sent_tokenize
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer('\w[\w-]*|\d[\d,]*')
# prepare a text
def prepare(txt):
# lower case
txt = txt.lower()
return [tokenizer.tokenize(sent)
for sent in sent_tokenize(txt, language=LANG)]
# we put all data in ram, it's not so much
corpus = []
for txt in iter_texts():
corpus.extend(prepare(txt))
# how many sentences and words ?
words_count = sum(len(s) for s in corpus)
print("Corpus has %d words in %d sentences" % (words_count, len(corpus)))
"""
Explanation: Preparing data
we arrange the corpus as required by gensim
End of explanation
"""
from gensim.models.phrases import Phrases
# which are the stop words we will use
from nltk.corpus import stopwords
" ".join(stopwords.words(LANG))
# a version of corups without stop words
stop_words = frozenset(stopwords.words(LANG))
def stopwords_filter(txt):
return [w for w in txt if w not in stop_words]
st_corpus = [stopwords_filter(txt) for txt in corpus]
# bigram std
%time bigram = Phrases(st_corpus)
# bigram with common terms
%time bigram_ct = Phrases(corpus, common_terms=stopwords.words(LANG))
"""
Explanation: Testing bigram with and without common terms
The Phrases model gives us the possiblity of handling common terms, that is words that appears much time in a text and are there only to link objects between them.
While you could remove them, you may information, for "the president is in america" is not the same as "the president of america"
The common_terms parameter Phrases can help you deal with them in a smarter way, keeping them around but avoiding them to crush frequency statistics.
End of explanation
"""
# grams that have more than 2 terms, are those with common terms
ct_ngrams = set((g[1], g[0].decode("utf-8"))
for g in bigram_ct.export_phrases(corpus)
if len(g[0].split()) > 2)
ct_ngrams = sorted(list(ct_ngrams))
print(len(ct_ngrams), "grams with common terms found")
# highest scores
ct_ngrams[-20:]
# did we found any bigram with same words but different stopwords
import collections
by_terms = collections.defaultdict(set)
for ngram, score in bigram_ct.export_phrases(corpus):
grams = ngram.split()
by_terms[(grams[0], grams[-1])].add(ngram)
for k, v in by_terms.items():
if len(v) > 1:
print(b"-".join(k).decode("utf-8")," : ", [w.decode("utf-8") for w in v])
"""
Explanation: bigram with common terms inside
What are (some of) the bigram founds thanks to common terms
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/nasa-giss/cmip6/models/giss-e2-1h/land.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'giss-e2-1h', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: NASA-GISS
Source ID: GISS-E2-1H
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:20
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
luofan18/deep-learning
|
language-translation/dlnd_language_translation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
source_int = []
for text in source_text.split('\n'):
source_int.append([source_vocab_to_int[word] for word in text.split()])
target_int = []
for text in target_text.split('\n'):
target_int.append([target_vocab_to_int[word] for word in text.split()])
for text in target_int:
text.append(target_vocab_to_int['<EOS>'])
return source_int, target_int
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
"""
# TODO: Implement Function
input_ = tf.placeholder(tf.int32, (None, None), name='input')
target_ = tf.placeholder(tf.int32, (None, None), name='target')
lr_ = tf.placeholder(tf.float32, name='learning_rate')
keep_prob_ = tf.placeholder(tf.float32, name='keep_prob')
target_seq_len_ = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_len_ = tf.reduce_max(target_seq_len_, name='max_target_len')
source_seq_len_ = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_, target_, lr_, keep_prob_, target_seq_len_, max_target_len_, source_seq_len_
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoder_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Target sequence length placeholder named "target_sequence_length" with rank 1
Max target sequence length tensor named "max_target_len" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.
Source sequence length placeholder named "source_sequence_length" with rank 1
Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
End of explanation
"""
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([target_vocab_to_int['<GO>'] * tf.ones((batch_size, 1), tf.int32), ending], axis=1)
return dec_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_encoding_input(process_decoder_input)
"""
Explanation: Process Decoder Input
Implement process_decoder_input by removing the last word id from each batch in target_data and concat the GO ID to the begining of each batch.
End of explanation
"""
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
"""
# TODO: Implement Function
encoding_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=0))
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return cell
cells = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
encoder_output, encoder_state = tf.nn.dynamic_rnn(
cells, encoding_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return encoder_output, encoder_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer:
* Embed the encoder input using tf.contrib.layers.embed_sequence
* Construct a stacked tf.contrib.rnn.LSTMCell wrapped in a tf.contrib.rnn.DropoutWrapper
* Pass cell and embedded input to tf.nn.dynamic_rnn()
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
"""
# TODO: Implement Function
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer)
final_output, final_state = tf.contrib.seq2seq.dynamic_decode(
training_decoder, impute_finished=True, maximum_iterations=max_summary_length)
return final_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create a training decoding layer:
* Create a tf.contrib.seq2seq.TrainingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
"""
# TODO: Implement Function
start_tokens = start_of_sequence_id * tf.ones((batch_size), tf.int32)
start_tokens = tf.identity(start_tokens, name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
dec_embeddings, start_tokens, end_of_sequence_id)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper, encoder_state, output_layer)
final_output, final_state = tf.contrib.seq2seq.dynamic_decode(
inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
return final_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference decoder:
* Create a tf.contrib.seq2seq.GreedyEmbeddingHelper
* Create a tf.contrib.seq2seq.BasicDecoder
* Obtain the decoder outputs from tf.contrib.seq2seq.dynamic_decode
End of explanation
"""
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
"""
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
# Decoder embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Construct the decoding cell
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=0))
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
output_layer = Dense(target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope('decoder'):
training_decoder_output = decoding_layer_train(encoder_state,
dec_cell,
dec_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob)
with tf.variable_scope('decoder', reuse=True):
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
vocab_size = target_vocab_size
inference_decoder_output = decoding_layer_infer(encoder_state,
dec_cell,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
vocab_size,
output_layer,
batch_size,
keep_prob)
return training_decoder_output, inference_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Embed the target sequences
Construct the decoder LSTM cell (just like you constructed the encoder cell above)
Create an output layer to map the outputs of the decoder to the elements of our vocabulary
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
"""
# TODO: Implement Function
_, encoder_state = encoding_layer(input_data,
rnn_size,
num_layers,
keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size)
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
training_decoder_output, inference_decoder_output = decoding_layer(dec_input,
encoder_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size)
return training_decoder_output, inference_decoder_output
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size).
Process target data using your process_decoder_input(target_data, target_vocab_to_int, batch_size) function.
Decode the encoded input using your decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size) function.
End of explanation
"""
# Number of Epochs
epochs = 2
# Batch Size
batch_size = 32
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 256
decoding_embedding_size = 256
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.8
display_step = 100
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
Set display_step to state how many steps between each debug output statement
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
"""
Explanation: Batch and pad the source and target sequences
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
sentence = sentence.lower()
unknow = vocab_to_int['<UNK>']
sentence = [vocab_to_int.get(word, unknow) for word in sentence.split()]
return sentence
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
cbare/Etudes
|
notebooks/covid-model.ipynb
|
apache-2.0
|
def spread(cases, pop, n, r0=15):
mu = 0
sigma = 1
new_cases = (sum(cases > 0) * r0/10* np.random.lognormal(mu, sigma) / np.exp(mu + sigma**2/2)).round().astype(int)
for exposure in np.random.choice(n, new_cases, replace=True):
# if you're already infected, nothing happens
if cases[exposure] == 0 and pop[exposure] < np.random.uniform(0,1):
cases[exposure] = (np.random.beta(3,2,1)*10).astype(int)
pop[exposure] = min(1.0, pop[exposure] + (1-pop[exposure])*np.random.normal(0.6, 0.2))
return cases
"""
Explanation: Covid Modeling
Warning: I have no idea what I'm doing. Don't take these numbers seriously.
Cases is an array with one element for each member of the population. It's 0 if that person is not infected. A new infection gets a random integer that counts down the number of days 'til recovery. We subtract one each day from all non-zero values until they reach zero.
The array pop represents immunity in the population as a probability of getting infected if exposed. 0.0 means that person will certainly get sick if exposed. 1.0 means bomb-proof immunity.
Non-vector version, maybe a little more readable.
End of explanation
"""
def spread(cases, pop, n, r0=15):
mu = 0
sigma = 1
# how many new exposures result from the existing cases?
exposures = (sum(cases > 0) * r0/10 * np.random.lognormal(mu, sigma) / np.exp(mu + sigma**2/2)).round().astype(int)
# randomly sample from the population to decide who gets exposed
exposure_indexes = np.unique(np.random.choice(n, exposures, replace=True))
# If an existing case is exposed, nothing happens. Otherwise, an exposure
# turns into a case if their immunity fails to stop infection
case_indexes = exposure_indexes[ (cases[exposure_indexes] == 0) & (pop[exposure_indexes] < np.random.uniform(0,1,exposure_indexes.shape)) ]
# Draw how many days each case will be sick from a beta distribution
# Note: maybe should depend on immunity?
cases[case_indexes] = (np.random.beta(3,2,case_indexes.shape)*10).astype(int)
# being infected causes immunity to increase. Should being exposed also?
pop[case_indexes] = (pop[case_indexes] + (1-pop[case_indexes])*np.random.normal(0.6, 0.2, case_indexes.shape)).clip(0.0, 1.0)
return cases, pop
def recover(cases, pop, waning_immunity=1.0):
cases[cases>0] -= 1
return cases, pop * waning_immunity
"""
Explanation: Vectorized spread function. Disappointingly, it's not that much faster than the previous version.
End of explanation
"""
n=10000
cases = np.zeros(n)
cases[0] = 10
pop = np.random.uniform(0,1,n)
n_cases = []
for i in tqdm(range(365)):
cases, pop = spread(cases, pop, n=n, r0=15)
cases, pop = recover(cases, pop, waning_immunity=0.999)
n_cases.append(sum(cases>0))
plt.plot(n_cases)
plt.show()
n=10000
runs = []
for j in tqdm(range(100)):
cases = np.zeros(n)
cases[0] = 10
pop = np.random.uniform(0,1,n)
n_cases = []
for i in range(365):
cases = spread(cases, pop, n=n, r0=8)
cases, pop = recover(cases, pop, waning_immunity=0.999)
n_cases.append(sum(cases>0))
runs.append(n_cases)
z = np.stack(runs, axis=0)
print(z.shape)
z = z.mean(axis=0)
print(z.shape)
plt.plot(z)
plt.show()
import seaborn as sns
import pandas as pd
df = pd.concat(
(
pd.DataFrame({"run":i, "day":range(0,365), "cases":r})
for i, r in enumerate(runs)
),
ignore_index=True
)
sns.lineplot(x="day", y="cases", data=df)
"""
Explanation: Try in small population
End of explanation
"""
n = 5127200
pop = np.random.beta(2,30,n)
plt.hist(pop)
print(pop.mean())
vax_protection_inf = [0.0, 0.25, 0.33, 0.70]
vax_protection_hos = [0.0, 0.50, 0.70, 0.85]
ped_doses = 232_359
doses = [
n,
4_017_924 + ped_doses,
3_956_862,
2_248_153,
]
print("\nd=0")
print(pop.mean(), pop.std(), pop.min(), pop.max())
std=0.05
d=1
vaxed = np.random.choice(n, size=doses[d])
protection = np.random.normal(vax_protection_inf[d], std, size=doses[d])
pop[vaxed] += protection
pop = pop.clip(0,1)
print(f"\nd={d}")
print(len(vaxed))
print(protection.mean(), protection.std())
print(pop.mean(), pop.std(), pop.min(), pop.max())
d=2
vaxed = np.random.choice(vaxed, size=doses[d])
pop[vaxed] += np.random.normal(vax_protection_inf[d]-vax_protection_inf[d-1], std, size=doses[d])
pop = pop.clip(0,1)
print(f"\nd={d}")
print(len(vaxed))
print(protection.mean(), protection.std())
print(pop.mean(), pop.std(), pop.min(), pop.max())
d=3
vaxed = np.random.choice(vaxed, size=doses[d])
pop[vaxed] += np.random.normal(vax_protection_inf[d]-vax_protection_inf[d-1], std, size=doses[d])
pop = pop.clip(0,1)
print(f"\nd={d}")
print(len(vaxed))
print(protection.mean(), protection.std())
print(pop.mean(), pop.std(), pop.min(), pop.max())
doses = [
doses[0]-doses[1],
doses[1]-doses[2],
doses[2]-doses[3],
doses[3]
]
print(doses)
print(sum(doses))
"""
Explanation: Model the New Zealand Population
Roughly based on vaccine numbers from https://covid19.govt.nz/news-and-data/covid-19-data-and-statistics/ as of 2022-02-26.
Let's start out with some variation in their inate immunity, even in a totally naive population. Then add on the effects of each dose of vaccine.
End of explanation
"""
plt.hist(pop)
cases = np.zeros(n)
cases[0] = 10
n_cases = []
for i in tqdm(range(365)):
cases, pop = spread(cases, pop, n=n, r0=15)
cases, pop = recover(cases, pop, waning_immunity=0.9985)
n_cases.append(sum(cases>0))
plt.plot(n_cases)
print(", ".join(str(x) for x in n_cases))
"""
Explanation: Looking at the population immunity, we see a stack of unvaxed down near zero, the 1-dose and 2-dose folks are in a lump between 0.1 and 0.5. Boosted people center around 0.7.
End of explanation
"""
plt.hist(x=(np.random.beta(3,2,1000)*10).astype(int))
import seaborn as sns
z = np.random.lognormal(1.5, 1, 1000) / np.exp(1.5 + 1/2)
print(z.mean(), np.exp(0 + 1/2))
sns.kdeplot(z)
def mode_of_beta(a,b):
if a < 1 and b < 1:
return (0,1)
if a <=1 and b > 1:
return 0
if a > 1 and b <= 1:
return 1
return (a-1) / (a+b-2)
mode_of_beta(10,2)
n=365
x=np.ones(n)
for i in range(1,n):
x[i] = x[i-1]*0.9985
plt.plot(x)
"""
Explanation: Distributions
End of explanation
"""
|
poldrack/fmri-analysis-vm
|
analysis/MVPA/ClassificationAnalysis-Haxby.ipynb
|
mit
|
import nipype.algorithms.modelgen as model # model generation
import nipype.interfaces.fsl as fsl # fsl
from nipype.interfaces.base import Bunch
import os,json,glob
import numpy
import nibabel
import nilearn.plotting
import sklearn.multiclass
from sklearn.svm import SVC
import sklearn.metrics
import sklearn.cross_validation
from nilearn.input_data import NiftiMasker
import scipy.stats
import random
import nilearn.datasets
from haxby_data import HaxbyData
haxby_dataset = nilearn.datasets.fetch_haxby()
boldfile=haxby_dataset.func[0]
datadir=os.path.dirname(boldfile)
print('using data from %s'%datadir)
haxbydata=HaxbyData(datadir)
%matplotlib inline
import matplotlib.pyplot as plt
boldimg=nibabel.load(boldfile)
if not os.path.exists(boldfile.replace('.nii.gz','_brain.nii.gz')):
bet=fsl.BET()
bet.inputs.in_file=boldfile
bet.inputs.out_file=boldfile.replace('.nii.gz','_brain.nii.gz')
bet.inputs.functional=True
bet.inputs.mask=True
bet.run()
brainmaskimg=nibabel.load(boldfile.replace('.nii.gz','_brain_mask.nii.gz'))
vtmaskimg=nibabel.load(haxby_dataset.mask_vt[0])
# set up design info
"""
Explanation: In this exercise we will classify stimulus classes using the Haxby et al. data.
End of explanation
"""
modeldir=os.path.join(datadir,'blockmodel')
# no way to specify the output directory, so we just chdir into the
# desired output directory
if not os.path.exists(modeldir):
os.mkdir(modeldir)
os.chdir(modeldir)
"""
Explanation: Set up model
End of explanation
"""
not os.path.exists(os.path.join(modeldir,'stats/zstat1.nii.gz'))
contrasts=[]
for i in range(len(haxbydata.conditions)):
contrasts.append([haxbydata.conditions[i],'T',[haxbydata.conditions[i]],[1]])
# this is how one could do it using FSL - this is VERY slow, so let's compute the GLM on our own
if not os.path.exists(os.path.join(modeldir,'stats/zstat1.nii.gz')):
info = [Bunch(conditions=haxbydata.conditions,
onsets=haxbydata.onsets,
durations=haxbydata.durations)
]
s = model.SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = [haxbydata.boldbrainfile]
s.inputs.time_repetition = haxbydata.tr
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.subject_info = info
s.run()
level1design = fsl.model.Level1Design()
level1design.inputs.interscan_interval = haxbydata.tr
level1design.inputs.bases = {'dgamma':{'derivs': False}}
level1design.inputs.session_info = s._sessinfo
level1design.inputs.model_serial_correlations=False
level1design.inputs.contrasts=contrasts
level1info=level1design.run()
fsf_file=os.path.join(modeldir,'run0.fsf')
matfile=fsf_file.replace(".fsf",".mat")
event_files=glob.glob(os.path.join(modeldir,'ev*txt'))
modelgen=fsl.model.FEATModel()
modelgen.inputs.fsf_file=fsf_file
modelgen.inputs.ev_files=event_files
modelgen.run()
fgls = fsl.FILMGLS(autocorr_noestimate=True)
fgls.inputs.in_file =haxbydata.boldbrainfile
fgls.inputs.design_file = os.path.join(modeldir,'run0.mat')
fgls.inputs.threshold = 10
fgls.inputs.results_dir = os.path.join(modeldir,'stats')
fgls.inputs.tcon_file=os.path.join(modeldir,'run0.con')
res = fgls.run()
else:
print('stats have already been run - using existing files')
"""
Explanation: Estimate the model with a separate condition for each block using FSL. This will take several hours to finish.
End of explanation
"""
use_whole_brain=False
# include faces and cats
condition_mask = numpy.logical_or(haxbydata.condnums == 2,
haxbydata.condnums == 3)
condlabels=haxbydata.condnums[condition_mask]
runlabels=haxbydata.runs[condition_mask]
if not os.path.exists(os.path.join(modeldir,'zstatdata_facecat.nii.gz')):
zstatdata=numpy.zeros((boldimg.shape[0],boldimg.shape[1],
boldimg.shape[2],len(haxbydata.conditions)))
for i in range(len(haxbydata.conditions)):
zstatdata[:,:,:,i]=nibabel.load(os.path.join(modeldir,
'stats/zstat%d.nii.gz'%int(i+1))).get_data()
zstatimg=nibabel.Nifti1Image(zstatdata,affine=brainmaskimg.get_affine())
zstatimg.to_filename(os.path.join(modeldir,'zstatdata.nii.gz'))
zstatimg=nibabel.Nifti1Image(zstatdata[:,:,:,condition_mask],
affine=brainmaskimg.get_affine())
zstatimg.to_filename(os.path.join(modeldir,'zstatdata_facecat.nii.gz'))
if use_whole_brain:
maskimg=haxbydata.brainmaskfile
else:
maskimg=haxbydata.vtmaskfile
nifti_masker = NiftiMasker(mask_img=maskimg, standardize=True)
fmri_masked = nifti_masker.fit_transform(os.path.join(modeldir,'zstatdata.nii.gz'))
fmri_masked = fmri_masked[condition_mask,:]
"""
Explanation: Load the zstat images that we will use as our block-by-block signal estimates
End of explanation
"""
def shuffle_within_runs(labels,runs):
for r in numpy.unique(runs):
l=labels[runs==r]
random.shuffle(l)
labels[runs==r]=l
return labels
def run_classifier(fmri_masked,condlabels,runs,baseclf,shuffle_labels=False):
cv = sklearn.cross_validation.LeaveOneLabelOut(labels=runs)
pred=numpy.zeros(len(runs)) # predicted class
if len(numpy.unique(condlabels))>2:
clf=sklearn.multiclass.OneVsRestClassifier(baseclf)
else:
clf=baseclf
for train,test in cv:
testdata=fmri_masked[test,:]
traindata=fmri_masked[train,:]
trainlabels=condlabels[train]
if shuffle_labels:
trainlabels=shuffle_within_runs(trainlabels,runs[train])
clf.fit(traindata,trainlabels)
pred[test]=clf.predict(testdata)
confmtx=sklearn.metrics.confusion_matrix(condlabels,pred)
acc=sklearn.metrics.accuracy_score(condlabels,pred)
return pred,confmtx,acc
pred,confmtx,acc=run_classifier(fmri_masked,condlabels,runlabels,SVC(kernel='linear'))
print(confmtx)
print('Accuracy score: %f'%acc)
"""
Explanation: Now let's do a leave-one-run out classifier.
End of explanation
"""
nperms=500
randacc=numpy.zeros(nperms)
condlabels_rand=condlabels.copy()
for i in range(nperms):
_,_,randacc[i]=run_classifier(fmri_masked,condlabels_rand,
runlabels,
SVC(kernel='linear'),
shuffle_labels=True)
pct=scipy.stats.percentileofscore(randacc,acc)
print('Pval:',(100-pct)/100.0)
_=plt.hist(randacc,50)
plt.plot([acc,acc],[0,20],'green',linewidth=2)
plt.axis([0,1,0,100])
"""
Explanation: Run the classifier repeatedly using random labels to get a null distribution
End of explanation
"""
import nilearn.decoding
slradius=8
sl=nilearn.decoding.SearchLight(mask_img=vtmaskimg,radius=slradius)
sl.fit(nibabel.load(os.path.join(modeldir,'zstatdata_facecat.nii.gz')),condlabels)
if not os.path.exists(haxbydata.boldfile.replace('.nii.gz','_brain_mean.nii.gz')):
mean=fsl.maths.MeanImage(in_file=haxbydata.boldfile,
out_file=haxbydata.boldfile.replace('.nii.gz','_brain_mean.nii.gz'))
mean.run()
mean_fmri=nibabel.load(haxbydata.boldfile.replace('.nii.gz','_brain_mean.nii.gz'))
nilearn.plotting.plot_stat_map(nibabel.Nifti1Image(sl.scores_,
mean_fmri.get_affine()), mean_fmri,
title="Searchlight", display_mode="z", cut_coords=[-28,-24,-20,-16,-12],
colorbar=False,threshold=0.75)
"""
Explanation: Now let's set up a searchlight analysis
End of explanation
"""
nruns=500
maxscores=numpy.zeros(nruns)
sl_rand=nilearn.decoding.SearchLight(mask_img=vtmaskimg,radius=slradius)
for i in range(500):
cl=shuffle_within_runs(condlabels,runlabels)
sl_rand.fit(nibabel.load(os.path.join(modeldir,'zstatdata_facecat.nii.gz')),cl)
maxscores[i]=numpy.max(sl_rand.scores_)
cutoff=scipy.stats.scoreatpercentile(maxscores,95)
print('95 pct null accuracy: %f'%cutoff)
print('N voxels > cutoff: %d'%numpy.sum(sl.scores_>cutoff))
"""
Explanation: Exercise: Change the searchlight radius and see how it affects the results.
Obtain a null distribution by running the searchligh 500 times, so we can compare the observed results to those expected by chance
End of explanation
"""
|
Iolaum/ud370
|
assignments/5_word2vec.ipynb
|
gpl-3.0
|
# These are all the modules we'll be using later.
# Make sure you can import them before proceeding further.
%matplotlib inline
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
import shutil # high level file operations
"""
Explanation: Deep Learning
Assignment 5
The goal of this assignment is to train a Word2Vec skip-gram model over Text8 data.
Reading Material
Some reading material to get familiarised with word2vec approach.
A very good introduction on word2vec can be found in this blog post.
First word2vec paper from Mikolov et al.
NIPS paper with improvements for word2vec also from Mikolov et al.
An implementation of word2vec from Thushan Ganegedara.
TensorFlow word2vec tutorial.
Udacity Deel Leanring Embeddings notebook.
Short Theory Introduction
Word embeddings
When you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation.
To solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.
Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an embedding lookup and the number of hidden units is the embedding dimension.
<img src='images/tokenize_lookup.png' width=500>
There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.
Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called Word2Vec uses the embedding layer to find vector representations of words that contain semantic meaning.
Word2Vec
The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other.
Word2vec is a particularly computationally-efficient predictive model for learning word embeddings from raw text. It comes in two flavors, the Continuous Bag-of-Words model (CBOW) and the Skip-Gram model (Section 3.1 and 3.2 in Mikolov et al.). Algorithmically, these models are similar, except that CBOW predicts target words (e.g. 'mat') from source context words ('the cat sits on the'), while the skip-gram does the inverse and predicts source context-words from the target words. This inversion might seem like an arbitrary choice, but statistically it has the effect that CBOW smoothes over a lot of the distributional information (by treating an entire context as one observation). For the most part, this turns out to be a useful thing for smaller datasets. However, skip-gram treats each context-target pair as a new observation, and this tends to do better when we have larger datasets.
The two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram are depicted in the following graph:
<img src="images/word2vec_architectures.png" width="500">
We will first work with the skip-gram model and later with the CBOW. In the skip-gram model, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
End of explanation
"""
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
# Go to parent directory and then go to data directory
fpath = os.getcwd()
cpath = os.path.abspath(os.path.join(fpath, os.pardir))
cpath = os.path.join(cpath, 'data')
cpath = os.path.join(cpath, filename)
# create boolean variable if file exists
cpathl = os.path.exists(cpath)
if not cpathl:
filename, _ = urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
else:
statinfo = os.stat(cpath)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
if not cpathl:
# After file has been verified move it to data folder
fpath = os.path.join(fpath, filename)
shutil.move(fpath, cpath)
return(filename)
filename = maybe_download('text8.zip', 31344016)
"""
Explanation: Download the data from the source website if necessary.
End of explanation
"""
def read_data(filename):
# Go to parent directory and then go to data directory
fpath = os.getcwd()
cpath = os.path.abspath(os.path.join(fpath, os.pardir))
cpath = os.path.join(cpath, 'data')
cpath = os.path.join(cpath, filename)
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(cpath) as f:
# ZipFile.namelist() :: Returns a list of archive members by name.
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return (data)
words = read_data(filename)
print('Data size %d' % len(words))
"""
Explanation: Read the data into a string.
End of explanation
"""
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
# add most common words and their count as tupples in the list==count
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
# The dictionaty[word] gives the index of the word in the dictionary
# ie the time it was added, hence entries start with most frequent word and go down
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
# data collects the indexes contained in dictionary
# each data element represents the results/dictionary_index of the relevant words element
data.append(index)
count[0][1] = unk_count
# reverses the dictionary entries
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return (data, count, dictionary, reverse_dictionary)
data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del(words) # Hint to reduce memory.
"""
Explanation: Build the dictionary and replace rare words with UNK token.
Note: The UNK token is a special token used to capture out-of-vocabulary (OOV) words.
End of explanation
"""
print('data type is {} with length {}'.format(type(data), len(data)))
print(data[:24])
print('\ncount type is {} with length {}'.format(type(count), len(count)))
print(count[:6])
print('\ndictionary type is {} with length {}'.format(type(dictionary), len(dictionary)))
print(list(dictionary.items())[:6])
print('\nreverse_dictionary type is {} with length {}'.format(
type(reverse_dictionary), len(reverse_dictionary)))
print(list(reverse_dictionary.items())[:6])
"""
Explanation: Let's Explore the data structures created by the previous function
End of explanation
"""
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
"""
Function to generate a batch from the dataset.
Args:
batch_size: size of the batch to be created
num_skips: How many times to reuse an input to generate a label.
skip_window: radius of the window of word2vec/skipgram model
"""
global data_index
# assert % == 0 because we later iterate over that //
assert batch_size % num_skips == 0
# num_skips <= 2*skip_window so our exclusions later don't break down
# because there are not enough words to create (word, label) pairs.
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
# https://docs.python.org/3.5/library/collections.html#collections.deque
# deque :list-like container with fast appends and pops on either end
buffer = collections.deque(maxlen=span)
# First for loop gets numbers/indexes from data object so they can be used in next loop
for __ in range(span):
buffer.append(data[data_index])
# data_index is global variable and the next line iterates it
# up to the end of the dataset and then at start
data_index = (data_index + 1) % len(data)
# loop to create batch data
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
# We divide by num skips for i but now we iterate over them!
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
# appends word that will become label so that it is not picked in the next iteration
targets_to_avoid.append(target)
### create word+label:
# batch always picks the skip_window entry from buffer
# ?why? it has to do with move step?
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
# this adds a word at the end but removes one at the beginning (from buffer)!
buffer.append(data[data_index])
# data index moves with buffer - just like initialisation
# in previous for loop.
data_index = (data_index + 1) % len(data)
return (batch, labels)
print('data:', [reverse_dictionary[di] for di in data[:8]])
for num_skips, skip_window in [(2, 1), (4, 2)]:
data_index = 0
batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
for num_skips, skip_window in [(1, 1), (1, 3), (1, 4)]:
data_index = 0
batch, labels = generate_batch(batch_size=8, num_skips=num_skips, skip_window=skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
print(batch)
print(labels)
"""
Explanation: Why do we need to return the data variable?
It is used later to generate data batches.
Data is a list of indexes in the dictionary for all the words in our corpus. It either points to UNK or to a most_common_word entry in the dictionary.
Function to generate a training batch for the skip-gram model.
End of explanation
"""
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
# with graph.as_default(), tf.device('/cpu:0'):
# why use cpu explicitely?
with graph.as_default():
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=softmax_weights,
biases=softmax_biases,
inputs=embed,
labels=train_labels,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Optimizer.
# Note: The optimizer will optimize the softmax_weights AND the embeddings.
# This is because the embeddings are defined as a variable quantity and the
# optimizer's `minimize` method will by default modify all variable quantities
# that contribute to the tensor it is passed.
# See docs on `tf.train.Optimizer.minimize()` for more details.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
def plot(embeddings, labels):
assert embeddings.shape[0] >= len(labels), 'More labels than embeddings'
pylab.figure(figsize=(15,15)) # in inches
for i, label in enumerate(labels):
x, y = embeddings[i,:]
pylab.scatter(x, y)
pylab.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points',
ha='right', va='bottom')
pylab.show()
words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)
"""
Explanation: Train a skip-gram model.
End of explanation
"""
data_index = 0
def generate_batch(batch_size, bag_window):
global data_index
span = 2 * bag_window + 1 # [ bag_window target bag_window ]
batch = np.ndarray(shape=(batch_size, span - 1), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size):
buffer_list = list(buffer)
labels[i, 0] = buffer_list.pop(bag_window)
batch[i] = buffer_list
# iterate to the next buffer
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
print('data:', [reverse_dictionary[di] for di in data[:16]])
for bag_window in [1, 2]:
data_index = 0
batch, labels = generate_batch(batch_size=4, bag_window=bag_window)
print('\nwith bag_window = %d:' % (bag_window))
print(' batch:', [[reverse_dictionary[w] for w in bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(4)])
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
###skip_window = 1 # How many words to consider left and right.
###num_skips = 2 # How many times to reuse an input to generate a label.
bag_window = 2 # How many words to consider left and right.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size, bag_window * 2])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model.
# Look up embeddings for inputs.
embeds = tf.nn.embedding_lookup(embeddings, train_dataset)
# Compute the softmax loss, using a sample of the negative labels each time.
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(
weights=softmax_weights,
biases=softmax_biases,
inputs=tf.reduce_sum(embeds, 1),
labels=train_labels,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
num_steps = 100001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, bag_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
"""
Explanation: Problem
An alternative to skip-gram is another Word2Vec model called CBOW (Continuous Bag of Words). In the CBOW model, instead of predicting a context word from a word vector, you predict a word from the sum of all the word vectors in its context. Implement and evaluate a CBOW model trained on the text8 dataset.
End of explanation
"""
num_points = 400
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
words = [reverse_dictionary[i] for i in range(1, num_points+1)]
plot(two_d_embeddings, words)
"""
Explanation: Re-use code to visualise embeddings
End of explanation
"""
|
ireapps/pycar
|
completed/filter_csv_notebook_complete.ipynb
|
mit
|
from urllib.request import urlretrieve
import csv
"""
Explanation: Filter a CSV
We're going to use built-in Python modules - programs really - to download a csv file from the Internet and save it locally.
CSV stands for comma-separated values. It's a common file format a file format that resembles a spreadsheet or database table in a text file.
So first, let's import two built-in Python modules: urllib and csv.
urllib is a module that allows Python to make http requests to URLs on the web to fetch HTML. It contains a submodule called request. And inside there we want a specific method called urlretrieve
csv is a module that helps Python work with tabular data extracted from spreadsheets and databases
End of explanation
"""
downloaded_file = "banklist.csv"
"""
Explanation: We're going to download a csv file. What should we name it?
End of explanation
"""
urlretrieve("https://s3.amazonaws.com/datanicar/banklist.csv", downloaded_file)
"""
Explanation: Now we need a URL to a CSV file out on the Internet.
For this project we're going to download a CSV file that the FDIC compiles of all the banks that have failed since October 1, 2000.
The file we want is at https://s3.amazonaws.com/datanicar/banklist.csv.
If the internet is uncooperative, we can also use the local version of the file in the project1/data/ directory, and structure out code a little differently.
To do this, we use that program within the urllib module to download the file and save it to our project folder. It's called urlretrieve and for our purposes starting out think of it as a way to download a file from the Internet.
urlretrieve takes two arguments to download a file. First specify our target URL, and then we give it a name for the file we want to create.
End of explanation
"""
filtered_file = open('california_banks.csv', 'w', newline='')
"""
Explanation: The output shows we successfully downloaded the file and saved it
Let's open a new file so we can filter just the data we want. We add the newline parameter when we open the file to write so it doesn't add additional, blank rows on Windows machines.
End of explanation
"""
# create our output
output = csv.writer(filtered_file, delimiter=',')
# open our downloaded file
with open(downloaded_file, 'r') as file:
# use python's csv reader to access the contents
# and create an object that represents the data
csv_data = csv.reader(file)
# write our header row to the output csv
header_row = next(csv_data)
print(header_row)
output.writerow(header_row)
# loop through each row of the csv
for row in csv_data:
# now we're going to use an IF statement
# to find items where the state field
# is equal to California
if row[2] == 'CA':
# write the row to the new csv file
output.writerow(row)
# and print the row to the terminal
print(row)
# print the data type to the terminal
print(type(row))
# print the length of the row to the terminal
print(len(row))
# otherwise continue on
else:
continue
# close the output file
filtered_file.close()
"""
Explanation: We will use the writer method to write data to a file by passing in the name of the new file as the first argument and delimiter as the the second.
Then we will go ahead and use python's csv reader to open the file and see what is inside.
We specify the name of the file we just created, and we add a setting so we can open and read almost any CSV file.
End of explanation
"""
|
bgruening/EDeN
|
examples/annotation.ipynb
|
gpl-3.0
|
pos = 'bursi.pos.gspan'
neg = 'bursi.neg.gspan'
from eden.converter.graph.gspan import gspan_to_eden
iterable_pos = gspan_to_eden( pos )
iterable_neg = gspan_to_eden( neg )
#split train/test
train_test_split=0.9
from eden.util import random_bipartition_iter
iterable_pos_train, iterable_pos_test = random_bipartition_iter(iterable_pos, relative_size=train_test_split)
iterable_neg_train, iterable_neg_test = random_bipartition_iter(iterable_neg, relative_size=train_test_split)
"""
Explanation: Annotation
Consider a binary classification problem. We will fit a predictor and use it to assign a weight score to each node in each instance; this operation is referred to as "annotation". For illustration purposes we will display a few annotated graphs. We will see that building a predictor on the annotated instances can increase the predictive performance.
load data and convert it to graphs
End of explanation
"""
from eden.graph import Vectorizer
vectorizer = Vectorizer( complexity=2 )
%%time
from itertools import tee
iterable_pos_train,iterable_pos_train_=tee(iterable_pos_train)
iterable_neg_train,iterable_neg_train_=tee(iterable_neg_train)
iterable_pos_test,iterable_pos_test_=tee(iterable_pos_test)
iterable_neg_test,iterable_neg_test_=tee(iterable_neg_test)
from eden.util import fit,estimate
estimator = fit(iterable_pos_train_, iterable_neg_train_, vectorizer, n_iter_search=5)
estimate(iterable_pos_test_, iterable_neg_test_, estimator, vectorizer)
"""
Explanation: setup the vectorizer
End of explanation
"""
help(vectorizer.annotate)
%matplotlib inline
from itertools import tee
iterable_pos_train,iterable_pos_train_=tee(iterable_pos_train)
graphs = vectorizer.annotate( iterable_pos_train_, estimator=estimator )
import itertools
graphs = itertools.islice( graphs, 3 )
from eden.util.display import draw_graph
for graph in graphs: draw_graph( graph, vertex_color='importance', size=10 )
%matplotlib inline
from itertools import tee
iterable_pos_train,iterable_pos_train_=tee(iterable_pos_train)
graphs = vectorizer.annotate( iterable_pos_train_, estimator=estimator )
from eden.modifier.graph.vertex_attributes import colorize_binary
graphs = colorize_binary(graph_list = graphs, output_attribute = 'color_value', input_attribute='importance', level=0)
import itertools
graphs = itertools.islice( graphs, 3 )
from eden.util.display import draw_graph
for graph in graphs: draw_graph( graph, vertex_color='color_value', size=10 )
"""
Explanation: annotate instances and list all resulting graphs
display one graph as an example. Color the vertices using the annotated 'importance' attribute.
End of explanation
"""
%%time
a_estimator=estimator
num_iterations = 3
reweight = 0.6
for i in range(num_iterations):
print 'Iteration %d'%i
from itertools import tee
iterable_pos_train_=vectorizer.annotate( iterable_pos_train, estimator=a_estimator, reweight=reweight )
iterable_neg_train_=vectorizer.annotate( iterable_neg_train, estimator=a_estimator, reweight=reweight )
iterable_pos_test_=vectorizer.annotate( iterable_pos_test, estimator=a_estimator, reweight=reweight )
iterable_neg_test_=vectorizer.annotate( iterable_neg_test, estimator=a_estimator, reweight=reweight )
iterable_pos_train,iterable_pos_train_=tee(iterable_pos_train_)
iterable_neg_train,iterable_neg_train_=tee(iterable_neg_train_)
iterable_pos_test,iterable_pos_test_=tee(iterable_pos_test_)
iterable_neg_test,iterable_neg_test_=tee(iterable_neg_test_)
from eden.util import fit,estimate
a_estimator = fit(iterable_pos_train_, iterable_neg_train_, vectorizer)
estimate(iterable_pos_test_, iterable_neg_test_, a_estimator, vectorizer)
"""
Explanation: Create a data matrix this time using the annotated graphs. Note that now graphs are weighted.
Evaluate the predictive performance on the weighted graphs.
End of explanation
"""
|
banduri/snippets
|
BitCoinInContextDE.ipynb
|
gpl-3.0
|
(mil,mrd,bil) = (pow(10,6),pow(10,9),pow(10,12))
bip_de=3466639*mil # USD
einwohner = int(82457000)
verschuldung=2022.6*mrd
bip_wo=119884004*mil # bip der Welt
"""
Explanation: Wie bewerte ich eigentlich Bitcoins?
erstmal ein paar Zahlen zu Deutschland von https://de.wikipedia.org/wiki/Deutschland. und von https://de.wikipedia.org/wiki/Staatsverschuldung_Deutschlands
Stand: 31. Dezember 2015 insgesamt 2.022,6 Milliarden Euro
End of explanation
"""
print("Staatsverschuldungsquotien: %.2f%%" %(100*verschuldung/bip_de))
"""
Explanation: Für Deutschland ergibt sich damit ein
End of explanation
"""
marktkap=233*mrd #USD
"""
Explanation: sanity check
https://de.wikipedia.org/wiki/Liste_europ%C3%A4ischer_L%C3%A4nder_nach_Staatsschuldenquote spricht von 68,2 mit der
Anmerkung dies sei eine Schätzung. Das ist ok für mich ich will ja nur die Größenordnung von BitCoins wissen.
Marktkapitalisierung von BitCoin:
Auf https://blockchainbdgpzk.onion/ (resp https://blockchain.info) bekommt man die Marktkapetalisierung angezeigt.
Stand: Dec 25 2017
End of explanation
"""
print("USD pro Einwohner dank BitCoins: %.2f" %(marktkap/einwohner))
print("Anteil am BIP: %.4f%%" %(marktkap/bip_de))
print("Anteil an der Staatsverschuldung: %.4f%%" %(marktkap/verschuldung))
"""
Explanation: Würde man diesen Markt auflösen, wieviel würde dann jeder Einwohner Deutschlands bekommen? Wie verhält sich das mit BIP und Staatsverschuldung?
End of explanation
"""
btcunits=21*mil*100*mil # Unteilbare Einheiten
"""
Explanation: nett
Mir war vorher nicht klar, in welchen Dimensionen von Geld hier gedacht werden muss. Die Maximale Anzahl von BitCoins ist 21,0 Millionen und jedes BitCoin lässt sich in 100 Millionen Satoshi (sat) aufteilen. Wir haben also
End of explanation
"""
verschuldungunits = verschuldung * 100 # Euros lassen sich in Cents aufteilen
btcunits > verschuldungunits
"""
Explanation: gut. Ich kann damit also schonmal die Staatsverschuldung von Deutschland abbilden...
End of explanation
"""
bip_wo_units= bip_wo*100
print("Abbildbare Staatsverschuldungen: %.2f" %(btcunits/verschuldungunits))
print("Anteil Welt BIP: %.2f%%" %(100*btcunits/bip_wo_units))
"""
Explanation: Wieviele Länder mit ähnlicher Staatsverschuldung könnte man damit abbilden? Und was ist mit dem BIP der Welt?
End of explanation
"""
|
dbouquin/AstroHackWeek2015
|
day3-machine-learning/07 - Grid Searches for Hyper Parameters.ipynb
|
gpl-2.0
|
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
"""
Explanation: Grid Searches
<img src="figures/grid_search_cross_validation.svg" width=100%>
Grid-Search with build-in cross validation
End of explanation
"""
import numpy as np
param_grid = {'C': 10. ** np.arange(-3, 3),
'gamma' : 10. ** np.arange(-5, 0)}
np.set_printoptions(suppress=True)
print(param_grid)
grid_search = GridSearchCV(SVC(), param_grid, verbose=3)
"""
Explanation: Define parameter grid:
End of explanation
"""
grid_search.fit(X_train, y_train)
grid_search.predict(X_test)
grid_search.score(X_test, y_test)
grid_search.best_params_
# We extract just the scores
scores = [x.mean_validation_score for x in grid_search.grid_scores_]
scores = np.array(scores).reshape(6, 5)
plt.matshow(scores)
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(5), param_grid['gamma'])
plt.yticks(np.arange(6), param_grid['C']);
"""
Explanation: A GridSearchCV object behaves just like a normal classifier.
End of explanation
"""
from sklearn.cross_validation import cross_val_score
cross_val_score(GridSearchCV(SVC(), param_grid),
digits.data, digits.target)
"""
Explanation: Nested Cross-validation in scikit-learn:
End of explanation
"""
# %load solutions/grid_search_k_neighbors.py
"""
Explanation: Exercises
Use GridSearchCV to adjust n_neighbors of KNeighborsClassifier.
End of explanation
"""
|
mjbommar/cscs-530-w2016
|
samples/cscs530-w2015-midterm-sample1.ipynb
|
bsd-2-clause
|
#Imports
%matplotlib inline
# Standard imports
import copy
import itertools
# Scientific computing imports
import numpy
import matplotlib.pyplot as plt
import networkx
import pandas
import seaborn; seaborn.set()
import scipy.stats as stats
# Import widget methods
from IPython.html.widgets import *
"""
Explanation: Midterm
Goal
I will explore whether a network-theory driven approach shown to improve the efficiency of an agricultural extension program is sensitive to the models and parameters originally used.
Justification
Social networks have been shown to be important vehicles for the transmission of new agricultural methods or 'technologies' (Bandiera and Rasul 2006, Conley and Udry 2010). These types of dynamics and time-varying agent behavior are best captured with through network modeling.
My project is based off a recent paper which used network modeling in conjunction with a large-scale field experiment (Beaman et al 2014). I wish to test the robustness of the findings of their model and so will employ a similar network modeling method.
Background on base paper
Beaman and co-authors aimed to improve the rollout of an agricultural extension program using predictions from network theory to optimally select 'seed farmers'. 'Seed farmers' are the select farmers in a village that the agricultural extension program trains. Because it is costly to train farmers in this way, it is most efficient to pick seed farmers such that their adoption of the agricultural technology will lead to the greatest spread of the technology throughout the village.
Beaman and coauthors first elicit the social networks of various rural villages. Then under the condition that the extension program only trains two farmers in each village, they take every possible combination of two nodes in a village network and simulate an information diffusion process for 4 periods. They take a measure of information diffusion at the end of each simulation and the pair of nodes which gives the greatest diffusion is their optimal seeding pair.
Their findings are then used in a field experiment where a random half of total villages are seeded according to their simulated optimal seeds while the other half is seeded according to the extension program's default procedure, usually based off of a field officer's own knowledge of the village and its influential farmers. They find evidence that network-theory informed seeding leads to increased technological adoption over baseline seeding procedures.
My extensions and measures of interest
I wish to recreate and expand upon their simulations in the following ways:
- I will compare optimal seeds found with their method against optimal seeds found with an extended process of information diffusion. The extended process will include the possibility that households can reject a new technology even after being exposed to it by multiple connections. The original process assumes that a household will automatically adopt a technology after the number of connections who have adopted the technology passes a certain threshold
- I will also sweep across the number of periods simulated and the alpha which the adoption threshold is normally distributed around to see if this produces alternate optimal seeds.
Outline
The original paper looks at rural village in Malawi. I do not have access to their network data but I have a dataset of social graphs from 74 villages in South India. Though there may be differences in network structure between villages in these two locations, I will assume they are reasonably comparable.
First, I will recreate results from Beaman et al by selecting all combinations of node pairs in a subset of 25 villages. For each pair, I will run them through a information diffusion simulation for {3,4,5,6} steps. I will also sweep through values {1,2,3} for a alpha parameter. Each household has an adoption threshold, T, which determines whether they adopt the new technology or not. If X number of connections have adopted the technology and X=>T, then the household will adopt the new technology in the next period. Each household independently drawns a threshold from a normal distribution N(alpha, 0.5) bounded positive, so sweeping through alpha parameters will push up and down the distribution of household thresholds T.
To mitigate stochasticity, I will repeat 2000 times, and take an average measure of information diffusion (given by percent of households adopted at last step). The pair of nodes which give the greatest information difussion are my theory-driven seed farmers equivalent to those found in Beaman et al. I will examine whether the determination of these optimal seed farmers depends on the number of steps run and the alpha parameter used. Then, I will run the same simulations except using the extended information diffusion process described above. I want to see whether seed farmers selected through this method are different than those selected by Beaman's process. For the midterm, I will concentrate on coding the re-creation of method from Beaman et al.
I. Space
I will model space with an undirected social network. Each node represents a rural household and each edge represents a social connection.
II. Actors
Each node in my network is a household. They are modeled simply and have only a few properties:
- id: household id
- adopted: whether they have adopted the new technology or not
- threshold: the threshold above which they will adopt the new technology in the next period. This threshold will be drawn from a normal distribution with mean alpha and standard deviation 0.5 which is bounded to be positive.
In each step, each unadopted household will count the number of connections who have adopted the new technology. If this count exceeds a household's adoption threshold, it will also adopt the technology in the next period.
III. Model Wrapper
I will wrap my model in a function which loops through each village, and in each village, loops through every possible pair of nodes. Then, I will sweep through my parameters, number of steps and alpha. I will repeat this under the alternate information diffusion process. I will also determine and collect optimal seeds here.
IV. Initial Conditions
Each model will start with a list of adopted households. In the first step, only seed households will be in this list which will be read in through the wrapper.
V. Model Parameters
My model will have the following parameters:
- network: adjacency matrix that is read in from wrapper
- alpha: parameter determining distribution of adoption threshold
- HH_adopted: list of adopted households, in first step these are seed households given by wrapper
- HH_not_adopted: list of all not adopted households
End of explanation
"""
class Household(object):
"""
Household class, which encapsulates the entire behavior of a household.
"""
def __init__(self, model, household_id, adopted=False, threshold=1):
"""
Constructor for HH class. By default,
* not adopted
* threshold = 1
Must "link" the Household to their "parent" Model object.
"""
# Set model link and ID
self.model = model
self.household_id = household_id
# Set HH parameters.
self.adopted = adopted
self.threshold = threshold
def __repr__(self):
'''
Return string representation.
'''
skip_none = True
repr_string = type(self).__name__ + " ["
except_list = "model"
elements = [e for e in dir(self) if str(e) not in except_list]
for e in elements:
# Make sure we only display "public" fields; skip anything private (_*), that is a method/function, or that is a module.
if not e.startswith("_") and eval('type(self.{0}).__name__'.format(e)) not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:
value = eval("self." + e)
if value != None and skip_none == True:
repr_string += "{0}={1}, ".format(e, value)
# Clean up trailing space and comma.
return repr_string.strip(" ").strip(",") + "]"
"""
Explanation: Household class
Below is a rough draft of the household class. It only has one component:
constructor: class constructor, which "initializes" or "creates" the household when we call Household(). This is in the init method.
End of explanation
"""
class Model(object):
"""
Model class, which encapsulates the entire behavior of a single "run" in network model.
"""
def __init__(self, network, alpha, HH_adopted, HH_not_adopted):
"""
Class constructor.
"""
# Set our model parameters
self.network = network
self.alpha = alpha
self.HH_adopted = HH_adopted
self.HH_not_adopted = HH_not_adopted
# Set our state variables
self.t = 0
self.households = []
# Setup our history variables.
self.history_adopted = []
self.history_not_adopted = []
self.percent_adopted = 0
# Call our setup methods
self.setup_network()
self.setup_household()
def setup_network(self):
"""
Method to setup network.
"""
## need to flesh this out. will network be an input given from wrapper?
## what do I need to do to set up network?
g = network
def setup_households(self):
"""
Method to setup households.
"""
num_households = nx.nodes(g)
# Create all households.
for i in xrange(self.num_households):
self.households.append(Household(model=self,
household_id=i,
adopted=False,
threshold=stats.truncnorm.rvs((0 - alpha) / 0.5, (alpha) / 0.5, loc=alpha, scale=0.5,size=1)
def get_neighborhood(self, x):
"""
Get a list of connected nodes.
"""
neighbors = []
for i in g.neighbors(x):
neighbors.append(i)
return neighbors
def step_adopt_decision(self):
"""
Model a household evaluating their connections and making an adopt/not adopt decision
"""
will_adopt = []
for i in HH_not_adopted:
adopt_count = 0
for j in get_neighborhood(i):
if j.adopted:
adopt_count+=1
if adopt_count >= i.threshold:
will_adopt.append(i)
def step(self):
"""
Model step function.
"""
# Adoption decision
self.step_adopt_decision()
# Increment steps and track history.
self.t += 1
self.HH_adopted.append(will_adopt)
self.HH_not_adopted.remove(will_adopt)
self.history_adopted.append(self.HH_adopted)
self.history_not_adopted.append(self.HH_not_adopted)
self.percent_adopted = len(HH_adopted)/len(households)
def __repr__(self):
'''
Return string representation.
'''
skip_none = True
repr_string = type(self).__name__ + " ["
elements = dir(self)
for e in elements:
# Make sure we only display "public" fields; skip anything private (_*), that is a method/function, or that is a module.
e_type = eval('type(self.{0}).__name__'.format(e))
if not e.startswith("_") and e_type not in ['DataFrame', 'function', 'method', 'builtin_function_or_method', 'module', 'instancemethod']:
value = eval("self." + e)
if value != None and skip_none == True:
if e_type in ['list', 'set', 'tuple']:
repr_string += "\n\n\t{0}={1},\n\n".format(e, value)
elif e_type in ['ndarray']:
repr_string += "\n\n\t{0}=\t\n{1},\n\n".format(e, value)
else:
repr_string += "{0}={1}, ".format(e, value)
# Clean up trailing space and comma.
return repr_string.strip(" ").strip(",") + "]"
"""
Explanation: Model class
Below, we will define our model class. This can be broken up as follows:
- constructor: class constructor, which "initializes" or "creates" the model when we call Model(). This is in the init method.
- setup_network: sets up graph
- setup_households: sets up households
- get_neighborhood: defines a function to get a list of connected nodes
- step_adopt_decision: method to step through household decision
- step: main step method
End of explanation
"""
## cycle through villages:
## (need to create village list where each item points to a different csv file)
num_samples = 2000
for fn in village_list:
village = np.genfromtxt(fn, delimiter=",")
network = from_numpy_matrix(village)
for HH_adopted in itertools.combinations(nx.nodes(network),2):
HH_not_adopted = [node for node in nx.nodes(network) if node not in HH_adopted]
for alpha in [1,2,3]:
for num_steps in [3,4,5,6]:
for n in xrange(num_samples):
m = Model(network, alpha, HH_adopted, HH_not_adopted)
for t in xrange(num_steps):
m.step()
## I need to collect adoption rate at each final step and average over all samples
## I am not sure where to fit this in
## I also need to write a function which determines optimal seed pairing
#######
#######
"""
Explanation: Wrapper with parameter sweep
Below is the code which wrappers around the model. It does the following:
- Loops through all villages we wish to examine
- Pulls network data from a csv and puts in the appropriate format
- Loops through all possible pairs of nodes within each village
- Sweeps through alpha and number of steps parameters
- Runs 2000 samples
End of explanation
"""
|
mohanprasath/Course-Work
|
coursera/machine_learning_with_python/Machine Learning Coursera Project.ipynb
|
gpl-3.0
|
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import numpy as np
import matplotlib.ticker as ticker
from sklearn import preprocessing
%matplotlib inline
"""
Explanation: <a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a>
<h1 align="center"><font size="5">Classification with Python</font></h1>
In this notebook we try to practice all the classification algorithms that we learned in this course.
We load a dataset using Pandas library, and apply the following algorithms, and find the best one for this specific dataset by accuracy evaluation methods.
Lets first load required libraries:
End of explanation
"""
!wget -O loan_train.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_train.csv
"""
Explanation: About dataset
This dataset is about past loans. The Loan_train.csv data set includes details of 346 customers whose loan are already paid off or defaulted. It includes following fields:
| Field | Description |
|----------------|---------------------------------------------------------------------------------------|
| Loan_status | Whether a loan is paid off on in collection |
| Principal | Basic principal loan amount at the |
| Terms | Origination terms which can be weekly (7 days), biweekly, and monthly payoff schedule |
| Effective_date | When the loan got originated and took effects |
| Due_date | Since it’s one-time payoff schedule, each loan has one single due date |
| Age | Age of applicant |
| Education | Education of applicant |
| Gender | The gender of applicant |
Lets download the dataset
End of explanation
"""
df = pd.read_csv('loan_train.csv')
df.head()
df.shape
"""
Explanation: Load Data From CSV File
End of explanation
"""
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
"""
Explanation: Convert to date time object
End of explanation
"""
df['loan_status'].value_counts()
"""
Explanation: Data visualization and pre-processing
Let’s see how many of each class is in our data set
End of explanation
"""
# notice: installing seaborn might takes a few minutes
!conda install -c anaconda seaborn -y
import seaborn as sns
bins = np.linspace(df.Principal.min(), df.Principal.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'Principal', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
bins = np.linspace(df.age.min(), df.age.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'age', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
"""
Explanation: 260 people have paid off the loan on time while 86 have gone into collection
Lets plot some columns to underestand data better:
End of explanation
"""
df['dayofweek'] = df['effective_date'].dt.dayofweek
bins = np.linspace(df.dayofweek.min(), df.dayofweek.max(), 10)
g = sns.FacetGrid(df, col="Gender", hue="loan_status", palette="Set1", col_wrap=2)
g.map(plt.hist, 'dayofweek', bins=bins, ec="k")
g.axes[-1].legend()
plt.show()
"""
Explanation: Pre-processing: Feature selection/extraction
Lets look at the day of the week people get the loan
End of explanation
"""
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
"""
Explanation: We see that people who get the loan at the end of the week dont pay it off, so lets use Feature binarization to set a threshold values less then day 4
End of explanation
"""
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
"""
Explanation: Convert Categorical features to numerical values
Lets look at gender:
End of explanation
"""
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
"""
Explanation: 86 % of female pay there loans while only 73 % of males pay there loan
Lets convert male to 0 and female to 1:
End of explanation
"""
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
"""
Explanation: One Hot Encoding
How about education?
End of explanation
"""
df[['Principal','terms','age','Gender','education']].head()
"""
Explanation: Feature befor One Hot Encoding
End of explanation
"""
Feature = df[['Principal','terms','age','Gender','weekend']]
Feature = pd.concat([Feature,pd.get_dummies(df['education'])], axis=1)
Feature.drop(['Master or Above'], axis = 1,inplace=True)
Feature.head()
"""
Explanation: Use one hot encoding technique to conver categorical varables to binary variables and append them to the feature Data Frame
End of explanation
"""
X = Feature
X[0:5]
"""
Explanation: Feature selection
Lets defind feature sets, X:
End of explanation
"""
y = df['loan_status'].values
y[0:5]
"""
Explanation: What are our lables?
End of explanation
"""
X= preprocessing.StandardScaler().fit(X).transform(X)
X[0:5]
temp = (y == 'PAIDOFF')
new_y = temp.astype(int)
"""
Explanation: Normalize Data
Data Standardization give data zero mean and unit variance (technically should be done after train test split )
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, new_y, test_size=0.33, random_state=42)
y_train
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
best_knn, best_mse, best_score, neigh, best_knn_clf = 0, 1000, 0, None, None
error_rate = []
for i in range(1, 75):
neigh = KNeighborsClassifier(n_neighbors=i)
neigh.fit(X_train, y_train)
predictions = neigh.predict(X_test)
# Get the actual values for the test set.
# mse = (((predictions - y_test) ** 2).sum()) / len(predictions)
# score = neigh.score(X_test, y_test)
accuracy = accuracy_score(predictions, y_test)
error_rate.append(np.mean(predictions != y_test))
if accuracy > best_score: # best_mse > mse or
# best_mse = mse
best_knn = i
best_score = accuracy
best_knn_clf = neigh
# The following visualization code was obtained and modified from https://medium.com/@kbrook10/day-11-machine-learning-using-knn-k-nearest-neighbors-with-scikit-learn-350c3a1402e6
# Configure and plot error rate over k values
plt.figure(figsize=(20,5))
plt.plot(range(1,75), error_rate, color='blue', linestyle='dashed', marker='o', markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K-Values')
plt.xlabel('K-Values')
plt.ylabel('Error Rate')
plt.savefig("Error Rate vs k Values.png")
plt.show()
print("The KNN Classifier works best when there are ", str(best_knn), " neighbors.")
print("The accuracy in percentage corresponding to the best k values is ", best_score * 100)
yhat = neigh.predict(X_test)
from sklearn import metrics
from sklearn.metrics import log_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
neigh = KNeighborsClassifier(n_neighbors=best_knn)
neigh.fit(X_train, y_train)
predictions = neigh.predict(X_test)
yhat_prob = neigh.predict_proba(X_test)
print("Log Loss:", log_loss(y_test, yhat_prob))
print("F1 Score:", f1_score(y_test, yhat, average='weighted'))
print("Jaccard Similarity:", jaccard_similarity_score(y_test, yhat))
"""
Explanation: Classification
Now, it is your turn, use the training set to build an accurate model. Then use the test set to report the accuracy of the model
You should use the following algorithm:
- K Nearest Neighbor(KNN)
- Decision Tree
- Support Vector Machine
- Logistic Regression
Notice:
- You can go above and change the pre-processing, feature selection, feature-extraction, and so on, to make a better model.
- You should use either scikit-learn, Scipy or Numpy libraries for developing the classification algorithms.
- You should include the code of the algorithm in the following cells.
K Nearest Neighbor(KNN)
Notice: You should find the best k to build the model with the best accuracy.
warning: You should not use the loan_test.csv for finding the best k, however, you can split your train_loan.csv into train and test to find the best k.
End of explanation
"""
from sklearn import tree
dt_clf = tree.DecisionTreeClassifier(max_depth=5)
dt_clf = dt_clf.fit(X_train, y_train)
print(dt_clf.feature_importances_)
dt_clf.score(X=X_test, y=y_test)
from sklearn.metrics import log_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
yhat = dt_clf.predict(X_test)
yhat_prob = dt_clf.predict_proba(X_test)
print("Log Loss:", log_loss(y_test, yhat_prob))
print("F1 Score:", f1_score(y_test, yhat, average='weighted'))
print("Jaccard Similarity:", jaccard_similarity_score(y_test, yhat))
# notice: installing seaborn might takes a few minutes
!conda install -c anaconda graphviz pydotplus -y
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from IPython.display import Image
import pydotplus
# The following visualization code was obtained and modified from https://www.datacamp.com/community/tutorials/decision-tree-classification-python
dot_data = StringIO()
export_graphviz(dt_clf, out_file=dot_data,
feature_names=['Principal','terms','age','Gender','weekend', 'Bechalor', 'High School or Below', 'college'],
class_names='loan_status',
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('DT Classifier Graph.png')
Image(graph.create_png())
"""
Explanation: Decision Tree
End of explanation
"""
from sklearn import svm
svm_clf = svm.SVC(probability=True)
svm_clf.fit(X_train, y_train)
svm_clf.score(X_test, y_test)
from sklearn.metrics import log_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
yhat = svm_clf.predict(X_test)
yhat_prob = svm_clf.predict_proba(X_test)
print("Log Loss:", log_loss(y_test, yhat_prob))
print("F1 Score:", f1_score(y_test, yhat, average='weighted'))
print("Jaccard Similarity:", jaccard_similarity_score(y_test, yhat))
"""
Explanation: Support Vector Machine
End of explanation
"""
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
lr_clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='ovr').fit(X_train, y_train)
lr_clf.score(X_test, y_test)
from sklearn.metrics import log_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
from sklearn import metrics
yhat = lr_clf.predict(X_test)
yhat_prob = lr_clf.predict_proba(X_test)
yhat_plot_prob = yhat_prob[::,1]
print("Log Loss:", log_loss(y_test, yhat_prob))
print("F1 Score:", f1_score(y_test, yhat, average='weighted'))
print("Jaccard Similarity:", jaccard_similarity_score(y_test, yhat))
# The Following code was obtained and modified from https://www.datacamp.com/community/tutorials/understanding-logistic-regression-python
fpr, tpr, _ = metrics.roc_curve(y_test, yhat_plot_prob)
auc = metrics.roc_auc_score(y_test, yhat_plot_prob)
plt.plot(fpr, tpr, label="data 1, auc="+str(auc))
plt.legend(loc=4)
plt.xlabel("False Positive Rate")
plt.xlabel("True Positive Rate")
plt.title("ROC Curve")
plt.show()
"""
Explanation: Logistic Regression
End of explanation
"""
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
"""
Explanation: Model Evaluation using Test set
End of explanation
"""
!wget -O loan_test.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/loan_test.csv
"""
Explanation: First, download and load the test set:
End of explanation
"""
test_df = pd.read_csv('loan_test.csv')
test_df.head()
old_df = df
df = test_df
df['due_date'] = pd.to_datetime(df['due_date'])
df['effective_date'] = pd.to_datetime(df['effective_date'])
df.head()
df['loan_status'].value_counts()
df['dayofweek'] = df['effective_date'].dt.dayofweek
df['weekend'] = df['dayofweek'].apply(lambda x: 1 if (x>3) else 0)
df.head()
df.groupby(['Gender'])['loan_status'].value_counts(normalize=True)
df['Gender'].replace(to_replace=['male','female'], value=[0,1],inplace=True)
df.head()
df.groupby(['education'])['loan_status'].value_counts(normalize=True)
df[['Principal','terms','age','Gender','education']].head()
df.head()
test_df = df
test_Feature = test_df[['Principal','terms','age','Gender','weekend']]
test_Feature = pd.concat([test_Feature,pd.get_dummies(test_df['education'])], axis=1)
test_Feature.drop(['Master or Above'], axis = 1,inplace=True)
test_Feature.dropna(thresh=2)
test_Feature.head()
test_x = test_Feature
test_y = test_df['loan_status'].values
test_x = preprocessing.StandardScaler().fit(test_x).transform(test_x)
test_temp_y = (test_y == 'PAIDOFF')
test_y = test_temp_y.astype(int)
test_x[0:5]
test_y
for classifier in [neigh, dt_clf, svm_clf, lr_clf]:
print("Classifier:", type(classifier))
yhat = classifier.predict(test_x)
yhat_prob = classifier.predict_proba(test_x)
print("LogLoss:", log_loss(test_y, yhat_prob))
print("F1-Score:", f1_score(test_y, yhat, average='weighted'))
print("Jaccard:", jaccard_similarity_score(test_y, yhat))
"""
Explanation: Load Test set for evaluation
End of explanation
"""
|
mariuszrokita/money-machine
|
Notebooks/EURPLN_exchange_rate_analysis.ipynb
|
mit
|
# customarilily import most important libraries
import pandas as pd # pandas is a dataframe library
import matplotlib.pyplot as plt # matplotlib.pyplot plots data
import numpy as np # numpy provides N-dim object support
import matplotlib.dates as mdates
import math
import urllib.request
import io
# do plotting inline instead of in a separate window
%matplotlib inline
currency_symbol = 'EUR'
start_date = '2015-09-13'
end_date = '2016-09-18'
url_template = ("http://www.bankier.pl/narzedzia/archiwum-kursow-walutowych/get_answer?"
"op=4&cur_symbol={}&start_dt={}&end_dt={}&customs=0"
"&table_name=0&fromDay=0&monthDay=1&avg=0&avg_type=1&idTable=gemiusHitSection16")
use_static = False
if use_static:
# use static file to perform analysis
url = '../data-archive/currencies/EUR_2013.09.09_2016.09.05.csv'
df = pd.read_csv(url, sep=';') # load data
else:
# or get latest data
url = url_template.format(currency_symbol, start_date, end_date)
with urllib.request.urlopen(url) as response:
content = str(response.read(), errors='ignore')
# remove unnecessary header and footers
content_lines = content.splitlines()[2:-4]
content = '\n'.join(content_lines)
df = pd.read_csv(io.StringIO(content), sep=';') # load data
# get brief statistics over data we just read
df_columns, df_rows = df.shape
print("File contains {} rows and {} columns of data.".format(df_columns, df_rows))
"""
Explanation: Currency exchange rate trends analyzer
Trend analysis for EURPLN exchange rates using data from 2013.09.09 to 2016.09.05 time period
End of explanation
"""
df.head(3)
"""
Explanation: Show first 3 rows from file
End of explanation
"""
df.tail(3)
"""
Explanation: Show last 3 rows from file
End of explanation
"""
df = df.sort_index(axis=0, ascending=True)
df = df.iloc[::-1]
df.head(3)
df.tail(3)
"""
Explanation: As we can see, data are not ordered chronologically. We must reverse the order of rows so that first row is the oldest, last row is the newest.
End of explanation
"""
plt.plot(df['Kurs'].values)
"""
Explanation: Data frame (rows) is reversed now, which we can prove by observing values on the diagram
End of explanation
"""
# an example on how to calculate SMA for a collection of data
v = {'score': [1, 2, 2, 3, 3, 4]}
data_frame = pd.DataFrame(v)
pd.rolling_mean(data_frame, 2)
"""
Explanation: SMA - simple moving average
End of explanation
"""
# get last 100 exchange rates
last_days_to_analyze = 300
last_days_to_observe = 200
recent_exchange_rates = df['Kurs'].values[-last_days_to_analyze:]
recent_dates = df['Data'].values[-last_days_to_analyze:]
# create new DataFrame containing exchange rates ordered chronologically (oldest first, newest last)
df_exchange_rates = pd.DataFrame(recent_exchange_rates)
# define SMA windows (for short-term trading)
sma_windows = [3, 5, 7, 10, 20, 50]
# calculate SMA-3, SMA-5 and so on..
df_sma = {}
for window in sma_windows:
df_sma[window] = pd.rolling_mean(df_exchange_rates, window)
# get data for last days we are interested in
recent_exchange_rates = recent_exchange_rates[-last_days_to_observe:]
recent_dates = recent_dates[-last_days_to_observe:]
"""
Explanation: Calculate various SMA
In order to perform better analysis, we will use recent data
End of explanation
"""
plt.figure(figsize=(20,7))
plt.plot(recent_exchange_rates)
# sma-10
plt.plot(df_sma[10][-last_days_to_observe:].values)
# sma-20
plt.plot(df_sma[20][-last_days_to_observe:].values)
# sma-50
plt.plot(df_sma[50][-last_days_to_observe:].values)
plt.legend(['exchange rates', 'SMA-10', 'SMA-20', 'SMA-50'], loc='upper left')
"""
Explanation: Plot exchange rates along with different SMA
End of explanation
"""
x = np.arange(0, 1000)
f = np.arange(0, 1000)
g = np.sin(np.arange(0, 10, 0.01)) * 1000
plt.plot(x, f, '-')
plt.plot(x, g, '-')
idx = np.argwhere(np.isclose(f, g, atol=10)).reshape(-1)
plt.plot(x[idx], f[idx], 'ro')
plt.show()
"""
Explanation: Example on how to find points where the curves intersect
End of explanation
"""
plt.figure(figsize=(20,10))
# plot unchanged history of exchange rates
plt.plot(recent_exchange_rates)
legend = []
legend.append('exchange rates')
for window in sma_windows:
plt.plot(df_sma[window][-last_days_to_observe:].values)
legend.append('SMA-{}'.format(window))
# plot dynamically created legend
plt.legend(legend, loc='upper left')
"""
Explanation: Comment: This way of finding intersection point did not prove to be good in our special case
Let's analyze which SMA is the best in terms of mirroring trend
End of explanation
"""
def plot_pair(pair):
window1 = pair[0]
window2 = pair[1]
# get SMA values for interesting windows
sma1 = df_sma[window1][-last_days_to_observe:].values
sma2 = df_sma[window2][-last_days_to_observe:].values
# calculate crossing points between both value ranges
intersection_points = np.argwhere(np.isclose(sma1, sma2, atol=0.01)).reshape(-1)
# plot
plt.plot(sma1)
plt.plot(sma2)
plt.plot(intersection_points, sma1[intersection_points], 'go')
plt.plot(intersection_points, sma2[intersection_points], 'ro')
plt.xticks(np.arange(0, last_days_to_observe, 10))
# return
return list(intersection_points)[::2]
# set up plotting parameters
plt.figure(figsize=(20,10))
plt.grid(True)
plt.plot(recent_exchange_rates)
comparison_pair = (3, 10)
intersection_points = plot_pair(comparison_pair)
plt.legend(['exchange rates', "SMA-{}".format(comparison_pair[0]), "SMA-{}".format(comparison_pair[1])], loc='upper left')
"""
Explanation: Conclusion: The SMA-3, SMA-5, SMA-7, and SMA-10 are the best dataset in terms of mirroring exchange rates
Calculate intersection points between various SMA data ranges
End of explanation
"""
x1 = 40
x2 = 50
test_sma_1 = df_sma[3][-200+x1:-200+x2].values
test_sma_2 = df_sma[10][-200+x1:-200+x2].values
intpoints_v1 = np.argwhere(np.isclose(test_sma_1, test_sma_2, atol=0.01)).reshape(-1)
intpoints_v2 = np.argwhere(np.isclose(test_sma_1, test_sma_2, atol=0.02)).reshape(-1)
intpoints_v3 = np.argwhere(np.isclose(test_sma_1, test_sma_2, atol=0.03)).reshape(-1)
f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
ax1.set_title("atol=0.01")
ax1.plot(test_sma_1)
ax1.plot(test_sma_2)
ax1.plot(intpoints_v1, test_sma_1[intpoints_v1], 'bo')
ax1.plot(intpoints_v1, test_sma_2[intpoints_v1], 'go')
ax1.plot(6, test_sma_1[6], 'ro')
ax1.plot(7, test_sma_1[7], 'ro')
ax1.plot(6, test_sma_2[6], 'ro')
ax1.plot(7, test_sma_2[7], 'ro')
ax2.set_title("atol=0.02")
ax2.plot(test_sma_1)
ax2.plot(test_sma_2)
ax2.plot(intpoints_v2, test_sma_1[intpoints_v2], 'bo')
ax2.plot(intpoints_v2, test_sma_2[intpoints_v2], 'go')
ax3.set_title("atol=0.03")
ax3.plot(test_sma_1)
ax3.plot(test_sma_2)
ax3.plot(intpoints_v3, test_sma_1[intpoints_v3], 'bo')
ax3.plot(intpoints_v3, test_sma_2[intpoints_v3], 'go')
"""
Explanation: What's wrong with intersection between 40-50??
End of explanation
"""
#
# line segment intersection using vectors
# see Computer Graphics by F.S. Hill
#
# Stolen directly from http://www.cs.mun.ca/~rod/2500/notes/numpy-arrays/numpy-arrays.html
#
from numpy import *
def perp(a):
b = empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
# return
def seg_intersect(a1, a2, b1, b2) :
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = dot( dap, db)
num = dot( dap, dp )
return (num / denom.astype(float))*db + b1
def get_intersection_point_between_points(a1, a2, b1, b2):
intersection_point = seg_intersect(a1, a2, b1, b2)
intersection_point_x = intersection_point[0]
if intersection_point_x >= max([a1[0], b1[0]]) and intersection_point_x <= min([a2[0], b2[0]]):
return intersection_point
else:
return [nan, nan]
def get_intersection_points(x_values, y_values_1, y_values_2):
intersection_points = []
for x in x_values[:-1]:
point = get_intersection_point_between_points(array([x, y_values_1[x]]), \
array([x+1, y_values_1[x+1]]), \
array([x, y_values_2[x]]), \
array([x+1, y_values_2[x+1]]))
if not math.isnan(point[0]):
intersection_points.append(point)
return intersection_points
def get_buy_sell_points(x_values, y_values_1, y_values_2):
intersection_points = get_intersection_points(x_values, y_values_1, y_values_2)
buy_points = []
sell_points = []
for i in intersection_points:
original_x = i[0]
next_business_day = int(original_x) + 1
# y_values_1 represents values of 'shorter' moving average
# y_values_2 represents values of 'longer' moving average
if y_values_1[next_business_day] < y_values_2[next_business_day]:
sell_points.append(i)
elif y_values_1[next_business_day] > y_values_2[next_business_day]:
buy_points.append(i)
#else:
# raise Exception('y_values_1[next_business_day] = y_values_2[next_business_day]', 'exception')
return (buy_points, sell_points)
# print("{} ==> date: {}, exchange rate: {}".format(next_business_day, \
# recent_dates[next_business_day], \
# recent_exchange_rates[next_business_day]))
# some test cases
#p1_1 = array([ 0, 4.46613333])
#p1_2 = array([ 1, 4.4712])
#p2_1 = array([ 0, 4.39197])
#p2_2 = array([ 1, 4.40709])
#print(seg_intersect(p1_1, p1_2, p2_1, p2_2))
#print(get_intersection_point_in_range(p1_1, p1_2, p2_1, p2_2))
x1 = 40
x2 = 60
x_values = list(range(0, x2-x1, 1))
test_sma_1 = df_sma[3][-200+x1:-200+x2].values
test_sma_2 = df_sma[10][-200+x1:-200+x2].values
test_sma_1_flattened = [item for sublist in test_sma_1 for item in sublist]
test_sma_2_flattened = [item for sublist in test_sma_2 for item in sublist]
intpoints_v1 = get_intersection_points(x_values, test_sma_1_flattened, test_sma_2_flattened)
f, ax1 = plt.subplots(1, sharex=True, sharey=True)
ax1.plot(test_sma_1)
ax1.plot(test_sma_2)
for intpoint in intpoints_v1:
ax1.plot(intpoint[0], intpoint[1], 'ro')
"""
Explanation: Conclusion: This way of finding intersecting points is not acceptable for our case.
End of explanation
"""
def plot_pair_v2(pair):
window1 = pair[0]
window2 = pair[1]
# get SMA values for interesting windows
sma1 = df_sma[window1][-last_days_to_observe:-1].values
sma2 = df_sma[window2][-last_days_to_observe:-1].values
# make extra calculations that simplify process of finding crossing points
sma_1_flattened = [item for sublist in sma1 for item in sublist]
sma_2_flattened = [item for sublist in sma2 for item in sublist]
x_values = range(0, len(sma_1_flattened), 1)
# calculate crossing points between both value ranges
#intersection_points = get_intersection_points(x_values, sma_1_flattened, sma_2_flattened)
# calculate buy, sell points for both value ranges
(buy_points, sell_points) = get_buy_sell_points(x_values, sma_1_flattened, sma_2_flattened)
# plot
plt.xticks(np.arange(0, last_days_to_observe, 10))
plt.plot(sma1)
plt.plot(sma2)
for buy in buy_points:
plt.plot(buy[0], buy[1], 'go')
for sell in sell_points:
plt.plot(sell[0], sell[1], 'ro')
# return all found buy, sell points
return (buy_points, sell_points)
# set up plotting parameters
plt.figure(figsize=(20,7))
plt.grid(True)
plt.plot(recent_exchange_rates)
comparison_pair = (3, 10)
(buy_points, sell_points) = plot_pair_v2(comparison_pair)
plt.legend(['exchange rates', "SMA-{}".format(comparison_pair[0]), "SMA-{}".format(comparison_pair[1])], loc='upper left')
"""
Explanation: Now we have better algorithm of finding intersection points, so it's time redraw
End of explanation
"""
#print("Exact intersection points between SMA-{} and SMA-{}".format(comparison_pair[0], comparison_pair[1]))
#print(intersection_points)
#print()
#print("Intersection points between SMA-{} and SMA-{} changed to indicate next business day".format(comparison_pair[0], \
# comparison_pair[1]))
#for i in intersection_points:
# original_x = i[0]
# next_business_day = int(original_x) + 1
# print("{} ==> date: {}, exchange rate: {}".format(next_business_day, \
# recent_dates[next_business_day], \
# recent_exchange_rates[next_business_day]))
#print()
"""
Explanation: Determine exchange rates
INFORMATION: Exchange rates are determined for "a next business day", so it's first available day once we are know that the crossing took place
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion
|
notebooks/image_models/solutions/4_tpu_training.ipynb
|
apache-2.0
|
import os
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
BUCKET = PROJECT
os.environ["BUCKET"] = BUCKET
"""
Explanation: Transfer Learning on TPUs
In the <a href="3_tf_hub_transfer_learning.ipynb">previous notebook</a>, we learned how to do transfer learning with TensorFlow Hub. In this notebook, we're going to kick up our training speed with TPUs.
Learning Objectives
Know how to set up a TPU strategy for training
Know how to use a TensorFlow Hub Module when training on a TPU
Know how to create and specify a TPU for training
First things first. Configure the parameters below to match your own Google Cloud project details.
End of explanation
"""
%%writefile tpu_models/trainer/task.py
"""TPU trainer command line interface"""
import argparse
import sys
import tensorflow as tf
from . import model, util
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--epochs", help="The number of epochs to train", type=int, default=5
)
parser.add_argument(
"--steps_per_epoch",
help="The number of steps per epoch to train",
type=int,
default=500,
)
parser.add_argument(
"--train_path",
help="The path to the training data",
type=str,
default="gs://cloud-ml-data/img/flower_photos/train_set.csv",
)
parser.add_argument(
"--eval_path",
help="The path to the evaluation data",
type=str,
default="gs://cloud-ml-data/img/flower_photos/eval_set.csv",
)
parser.add_argument(
"--tpu_address",
help="The path to the TPUs we will use in training",
type=str,
required=True,
)
parser.add_argument(
"--hub_path",
help="The path to TF Hub module to use in GCS",
type=str,
required=True,
)
parser.add_argument(
"--job-dir",
help="Directory where to save the given model",
type=str,
required=True,
)
return parser.parse_known_args(argv)
def main():
"""Parses command line arguments and kicks off model training."""
args = _parse_arguments(sys.argv[1:])[0]
# TODO: define a TPU strategy
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=args.tpu_address
)
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
with strategy.scope():
train_data = util.load_dataset(args.train_path)
eval_data = util.load_dataset(args.eval_path, training=False)
image_model = model.build_model(args.job_dir, args.hub_path)
model_history = model.train_and_evaluate(
image_model,
args.epochs,
args.steps_per_epoch,
train_data,
eval_data,
args.job_dir,
)
return model_history
if __name__ == "__main__":
main()
"""
Explanation: Packaging the Model
In order to train on a TPU, we'll need to set up a python module for training. The skeleton for this has already been built out in tpu_models with the data processing functions from the previous lab copied into <a href="tpu_models/trainer/util.py">util.py</a>.
Similarly, the model building and training functions are pulled into <a href="tpu_models/trainer/model.py">model.py</a>. This is almost entirely the same as before, except the hub module path is now a variable to be provided by the user. We'll get into why in a bit, but first, let's take a look at the new task.py file.
We've added five command line arguments which are standard for cloud training of a TensorFlow model: epochs, steps_per_epoch, train_path, eval_path, and job-dir. There are two new arguments for TPU training: tpu_address and hub_path
tpu_address is going to be our TPU name as it appears in Compute Engine Instances. We can specify this name with the ctpu up command.
hub_path is going to be a Google Cloud Storage path to a downloaded TensorFlow Hub module.
The other big difference is some code to deploy our model on a TPU. To begin, we'll set up a TPU Cluster Resolver, which will help tensorflow communicate with the hardware to set up workers for training (more on TensorFlow Cluster Resolvers). Once the resolver connects to and initializes the TPU system, our Tensorflow Graphs can be initialized within a TPU distribution strategy, allowing our TensorFlow code to take full advantage of the TPU hardware capabilities.
TODO #1: Set up a TPU strategy
End of explanation
"""
!wget https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4?tf-hub-format=compressed
"""
Explanation: The TPU server
Before we can start training with this code, we need a way to pull in MobileNet. When working with TPUs in the cloud, the TPU will not have access to the VM's local file directory since the TPU worker acts as a server. Because of this all data used by our model must be hosted on an outside storage system such as Google Cloud Storage. This makes caching our dataset especially critical in order to speed up training time.
To access MobileNet with these restrictions, we can download a compressed saved version of the model by using the wget command. Adding ?tf-hub-format=compressed at the end of our module handle gives us a download URL.
End of explanation
"""
%%bash
rm -r tpu_models/hub
mkdir tpu_models/hub
tar xvzf 4?tf-hub-format=compressed -C tpu_models/hub/
"""
Explanation: This model is still compressed, so lets uncompress it with the tar command below and place it in our tpu_models directory.
End of explanation
"""
!gsutil rm -r gs://$BUCKET/tpu_models
!gsutil cp -r tpu_models gs://$BUCKET/tpu_models
"""
Explanation: Finally, we need to transfer our materials to the TPU. We'll use GCS as a go-between, using gsutil cp to copy everything.
End of explanation
"""
!echo "gsutil cp -r gs://$BUCKET/tpu_models ."
"""
Explanation: Spinning up a TPU
Time to wake up a TPU! Open the Google Cloud Shell and copy the gcloud compute command below. Say 'Yes' to the prompts to spin up the TPU.
gcloud compute tpus execution-groups create \
--name=my-tpu \
--zone=us-central1-b \
--tf-version=2.3.2 \
--machine-type=n1-standard-1 \
--accelerator-type=v3-8
It will take about five minutes to wake up. Then, it should automatically SSH into the TPU, but alternatively Compute Engine Interface can be used to SSH in. You'll know you're running on a TPU when the command line starts with your-username@your-tpu-name.
This is a fresh TPU and still needs our code. Run the below cell and copy the output into your TPU terminal to copy your model from your GCS bucket. Don't forget to include the . at the end as it tells gsutil to copy data into the currect directory.
End of explanation
"""
%%bash
export TPU_NAME=my-tpu
echo "export TPU_NAME="$TPU_NAME
echo "python3 -m tpu_models.trainer.task \
--tpu_address=\$TPU_NAME \
--hub_path=gs://$BUCKET/tpu_models/hub/ \
--job-dir=gs://$BUCKET/flowers_tpu_$(date -u +%y%m%d_%H%M%S)"
"""
Explanation: Time to shine, TPU! Run the below cell and copy the output into your TPU terminal. Training will be slow at first, but it will pick up speed after a few minutes once the Tensorflow graph has been built out.
TODO #2 and #3: Specify the tpu_address and hub_path
End of explanation
"""
|
vbarua/PythonWorkshop
|
Code/Numerical Computing with Numpy/1 - Introduction to Numpy.ipynb
|
mit
|
x = [1,2,3]
y = [4,5,6]
x + y
"""
Explanation: Introduction to NumPy
Numpy is a library that provides multi-dimensional array objects. You can think of these somewhat like normal Python lists, except they have a number of qualities that make them better for numeric computations.
Let's try adding two lists together
End of explanation
"""
z = [0]*len(x) # Generates a list of zeroes the same length as x.
for i in range(len(x)):
z[i] = x[i] + y[i]
z
"""
Explanation: With Python lists the + operator appends them together. If we wanted to add these two lists elementwise we'd have to use a loop
End of explanation
"""
[i + j for (i, j) in zip(x, y)]
"""
Explanation: or a for comprehension
End of explanation
"""
xNumpy = np.array([1, 2, 3])
yNumpy = np.array([4, 5, 6])
xNumpy + yNumpy
"""
Explanation: With Numpy arrays this isn't the case
End of explanation
"""
def normal_multiply(x, y):
return [i * j for i, j in zip(x, y)]
def numpy_multiply(x, y):
return x * y
x = [i/3. for i in range(1,1000001)]
y = [i/7. for i in range(1,1000001)]
xNumpy = np.array(x)
yNumpy = np.array(y)
"""
Explanation: The + operator applied to Numpy arrays performs elementwise addition. -, * and / also apply elementwise. Using these operators makes it a lot easier to understand what's happening in the code.
The other advantaged of Numpy arrays has to do with performance. Let's perform elementwise multiplication of the first 1 million numbers divided by 3 and the first 1 million numbers divided by 7, that is:
[1/3, 2/3, ..., 999999/3, 1000000/3] *
[1/7, 2/7, ..., 999999/7, 1000000/7]
End of explanation
"""
%timeit normal_multiply(x, y)
%timeit numpy_multiply(xNumpy, yNumpy)
"""
Explanation: Both of the functions perform the same operation, one using a Python for loop and the other taking advantage of Numpy arrays.
End of explanation
"""
|
FabricioMatos/ifes-dropout-machine-learning
|
extra/datavix-meetup/Predicao de Evasao.ipynb
|
bsd-3-clause
|
%matplotlib inline
#import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm as cm
from pandas.tools.plotting import scatter_matrix
from pandas import DataFrame
from sklearn import cross_validation
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
"""
Explanation: Predição de Evasão Escolar - VixData Meetup
Date: 09/03/2017
Author: Fabricio Vargas Matos
Email: fabriciovargasmatos@gmail.com
Objetivo
Construir um classificador que possa, utilizando dados disponíveis após o término do primeiro período letivo (semestre ou ano) prever se o aluno irá evadir ou não.
Requisitos
python 2.7+
jupyter notebook - ambiente web iterativo para programar e documentar os resultados.
pandas - manipulação de datasets
numpy - arrays e algebra linear
matplotlib - renderizar gráficos
scikit-learn - diversos algoritmos de machine-learning e ferramentas de apoio
End of explanation
"""
df = pd.read_csv('../../../input/alunos.csv', header=0, sep=';')
df.dtypes
df.head(10)
#numero de nulls em cada coluna
df.isnull().sum()
df.fillna(value=0, inplace=True)
df.isnull().sum()
df.groupby('evadiu').size()
df.groupby('evadiu').size().plot(kind='bar')
plt.show()
def pizza_evazao(qtd_evadiu, qtd_nao_evadiu):
labels = 'Evadiu', 'Nao Evadiu'
sizes = [qtd_evadiu, qtd_nao_evadiu]
colors = ['gold','lightskyblue']
explode = (0.1, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.show()
qtd_evadiu = df.groupby('evadiu').size()[1]
qtd_nao_evadiu = df.groupby('evadiu').size()[0]
pizza_evazao(qtd_evadiu, qtd_nao_evadiu)
df.hist(figsize=(30, 20), bins=20)
plt.show()
df.describe()
df['distancia_conclusao_2grau'].hist(bins=50)
plt.show()
df_temp = df[df['distancia_conclusao_2grau'] > 10]
df_temp['distancia_conclusao_2grau'].hist(bins=45)
plt.show()
df.groupby('evadiu').describe()
"""
Explanation: 1. Análise Exploratória
End of explanation
"""
df.drop('hash_cod_matricula', axis=1, inplace=True)
df.head(10)
ncolumns = df.shape[1]
array = df.values
X = array[:,0:ncolumns-1].astype(float)
Y = array[:,ncolumns-1]
X_train, X_validation, Y_train, Y_validation = cross_validation.train_test_split(X, Y, test_size=0.20, random_state=7)
print 'X_train/Y_train:'
print X_train.shape
print Y_train.shape
print
print 'X_validation/Y_validation:'
print X_validation.shape
print Y_validation.shape
"""
Explanation: 2. Preparação dos dados para o treino
End of explanation
"""
def trainDummyClassifier(X_train, Y_train):
print '\nTraining ...'
# “stratified”: generates predictions by respecting the training set’s class distribution.
# “uniform”: generates predictions uniformly at random.
model = DummyClassifier(strategy='stratified', random_state=7)
#train
model.fit(X_train, Y_train)
return model
trainedDummyModel = trainDummyClassifier(X_train, Y_train)
print 'Done!'
predictions = trainedDummyModel.predict(X_validation)
print 'X_validatinon:', len(X_validation)
print 'predictions:', len(predictions)
print predictions
qtd_evadiu = len(np.where(predictions == 1)[0])
qtd_nao_evadiu = len(np.where(predictions == 0)[0])
pizza_evazao(qtd_evadiu, qtd_nao_evadiu)
def test_accuracy(predictions, X_validation, Y_validation):
print '\n=== Model Accuracy ==='
print '\naccuracy_score:'
print(accuracy_score(Y_validation, predictions))
print '\nconfusion_matrix:'
print '=> By definition a confusion matrix C is such that C_{i, j} is equal to the number of observations known to be in group i but predicted to be in group j.'
print(confusion_matrix(Y_validation, predictions))
print '\nclassification_report:'
print '=> http://machinelearningmastery.com/classification-accuracy-is-not-enough-more-performance-measures-you-can-use/'
print(classification_report(Y_validation, predictions))
test_accuracy(predictions, X_validation, Y_validation)
"""
Explanation: 3. Dummy Classifier
End of explanation
"""
def trainRandomForestClassifier(X_train, Y_train):
print '\nTraining ...'
model = RandomForestClassifier(max_features='log2', n_estimators=500, random_state=7, class_weight='balanced')
#train
model.fit(X_train, Y_train)
return model
trainedRFModel = trainRandomForestClassifier(X_train, Y_train)
print 'Done!'
qtd_evadiu = len(np.where(predictions == 1)[0])
qtd_nao_evadiu = len(np.where(predictions == 0)[0])
pizza_evazao(qtd_evadiu, qtd_nao_evadiu)
predictions = trainedRFModel.predict(X_validation)
test_accuracy(predictions, X_validation, Y_validation)
"""
Explanation: 4. Random Forest
Mas o que é RandomForest? Pausa para um breve overview de Random Forest
References:
Decision trees - https://www.youtube.com/watch?v=-dCtJjlEEgM
Random Forests - https://www.youtube.com/watch?v=3kYujfDgmNk
Random Forests Applications - https://www.youtube.com/watch?v=zFGPjRPwyFw
http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
End of explanation
"""
# http://scikit-learn.org/stable/auto_examples/plot_multilabel.html#sphx-glr-auto-examples-plot-multilabel-py
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
x = X_validation
y0 = Y_validation
y1 = 1 - Y_validation
y = np.column_stack((y0.reshape(len(y0), 1), y1.reshape(len(y1), 1)))
plt.figure(figsize=(40, 30))
data_samples = 200
plot_subfigure(x[:data_samples], y[:data_samples], 4, "PCA - 2D", "pca")
plt.show()
"""
Explanation: 5. Visualização dos dados em 2D
End of explanation
"""
n_estimators_values = [50, 200]
max_features_values = [1, 0.1, 'log2']
param_grid = dict(n_estimators=n_estimators_values, max_features=max_features_values)
results = []
model = RandomForestClassifier(verbose=0)
# validação cruzada é um mecanismo muito importante para manter o modelo genérico o suficiente para
# gerar boas predições em datasets desconhecidos.
kfold = cross_validation.KFold(n=len(X_train), n_folds=10, random_state=7)
grid = GridSearchCV(n_jobs=5, estimator=model, param_grid=param_grid, scoring='accuracy', cv=kfold)
grid_result = grid.fit(X_train, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
cv_results = grid_result.cv_results_['mean_test_score']
results.append(cv_results)
grid_scores = sorted(grid_result.grid_scores_, key=lambda x: x[2].mean(), reverse=True)
for param, mean_score, scores in grid_scores:
print("%f (%f) with: %r" % (scores.mean(), scores.std(), param))
"""
Explanation: 6. Tunando os parametros do RF
End of explanation
"""
|
joommf/tutorial
|
workshops/2017-04-05-IOPMagnetism2017/tutorial4_current_induced_dw_motion.ipynb
|
bsd-3-clause
|
# Definition of parameters
L = 500e-9 # sample length (m)
w = 20e-9 # sample width (m)
d = 2.5e-9 # discretisation cell size (m)
Ms = 5.8e5 # saturation magnetisation (A/m)
A = 15e-12 # exchange energy constant (J/)
D = 3e-3 # Dzyaloshinkii-Moriya energy constant (J/m**2)
K = 0.5e6 # uniaxial anisotropy constant (J/m**3)
u = (0, 0, 1) # easy axis
gamma = 2.211e5 # gyromagnetic ratio (m/As)
alpha = 0.3 # Gilbert damping
# Mesh definition
p1 = (0, 0, 0)
p2 = (L, w, d)
cell = (d, d, d)
mesh = oc.Mesh(p1=p1, p2=p2, cell=cell)
# Micromagnetic system definition
system = oc.System(name="domain_wall_pair")
system.hamiltonian = oc.Exchange(A=A) + \
oc.DMI(D=D, kind="interfacial") + \
oc.UniaxialAnisotropy(K=K, u=u)
system.dynamics = oc.Precession(gamma=gamma) + oc.Damping(alpha=alpha)
"""
Explanation: Tutorial 4 - Current induced domain wall motion
In this tutorial we show how spin transfer torque (STT) can be included in micromagnetic simulations. To illustrate that, we will try to move a domain wall pair using spin-polarised current.
Let us simulate a two-dimensional sample with length $L = 500 \,\text{nm}$, width $w = 20 \,\text{nm}$ and discretisation cell $(2.5 \,\text{nm}, 2.5 \,\text{nm}, 2.5 \,\text{nm})$. The material parameters are:
exchange energy constant $A = 15 \,\text{pJ}\,\text{m}^{-1}$,
Dzyaloshinskii-Moriya energy constant $D = 3 \,\text{mJ}\,\text{m}^{-2}$,
uniaxial anisotropy constant $K = 0.5 \,\text{MJ}\,\text{m}^{-3}$ with easy axis $\mathbf{u}$ in the out of plane direction $(0, 0, 1)$,
gyrotropic ratio $\gamma = 2.211 \times 10^{5} \,\text{m}\,\text{A}^{-1}\,\text{s}^{-1}$, and
Gilbert damping $\alpha=0.3$.
End of explanation
"""
def m_value(pos):
x, y, z = pos
if 20e-9 < x < 40e-9:
return (0, 1e-8, -1)
else:
return (0, 1e-8, 1)
# We have added the y-component of 1e-8 to the magnetisation to be able to
# plot the vector field. This will not be necessary in the long run.
system.m = df.Field(mesh, value=m_value, norm=Ms)
system.m.plot_slice("z", 0);
"""
Explanation: Because we want to move a DW pair, we need to initialise the magnetisation in an appropriate way before we relax the system.
End of explanation
"""
md = oc.MinDriver()
md.drive(system)
system.m.plot_slice("z", 0);
"""
Explanation: Now, we can relax the magnetisation.
End of explanation
"""
ux = 400 # velocity in x direction (m/s)
beta = 0.5 # non-adiabatic STT parameter
system.dynamics += oc.STT(u=(ux, 0, 0), beta=beta) # please notice the use of `+=` operator
"""
Explanation: Now we can add the STT term to the dynamics equation.
End of explanation
"""
td = oc.TimeDriver()
td.drive(system, t=0.5e-9, n=100)
system.m.plot_slice("z", 0);
"""
Explanation: And drive the system for half a nano second:
End of explanation
"""
# Definition of parameters
L = 500e-9 # sample length (m)
w = 20e-9 # sample width (m)
d = 2.5e-9 # discretisation cell size (m)
Ms = 5.8e5 # saturation magnetisation (A/m)
A = 15e-12 # exchange energy constant (J/)
D = 3e-3 # Dzyaloshinkii-Moriya energy constant (J/m**2)
K = 0.5e6 # uniaxial anisotropy constant (J/m**3)
u = (0, 0, 1) # easy axis
gamma = 2.211e5 # gyromagnetic ratio (m/As)
alpha = 0.3 # Gilbert damping
# Mesh definition
p1 = (0, 0, 0)
p2 = (L, w, d)
cell = (d, d, d)
mesh = oc.Mesh(p1=p1, p2=p2, cell=cell)
# Micromagnetic system definition
system = oc.System(name="domain_wall")
system.hamiltonian = oc.Exchange(A=A) + \
oc.DMI(D=D, kind="interfacial") + \
oc.UniaxialAnisotropy(K=K, u=u)
system.dynamics = oc.Precession(gamma=gamma) + oc.Damping(alpha=alpha)
def m_value(pos):
x, y, z = pos
if 20e-9 < x < 40e-9:
return (0, 1e-8, -1)
else:
return (0, 1e-8, 1)
# We have added the y-component of 1e-8 to the magnetisation to be able to
# plot the vector field. This will not be necessary in the long run.
system.m = df.Field(mesh, value=m_value, norm=Ms)
system.m.plot_slice("z", 0);
md = oc.MinDriver()
md.drive(system)
system.m.plot_slice("z", 0);
ux = 400 # velocity in x direction (m/s)
beta = 0.5 # non-adiabatic STT parameter
system.dynamics += oc.STT(u=(ux, 0, 0), beta=beta)
td = oc.TimeDriver()
td.drive(system, t=0.5e-9, n=100)
system.m.plot_slice("z", 0);
"""
Explanation: We see that the DW pair has moved to the positive $x$ direction.
Exercise
Modify the code below (which is a copy of the example from above) to obtain one domain wall instead of a domain wall pair and move it using the same current.
End of explanation
"""
|
myselfHimanshu/UdacityDSWork
|
Deep Learning Nanodegree/Project_1/dlnd-your-first-neural-network.ipynb
|
gpl-2.0
|
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: Your first neural network
In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
End of explanation
"""
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
"""
Explanation: Load and prepare the data
A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
End of explanation
"""
rides[:24*10].plot(x='dteday', y='cnt')
"""
Explanation: Checking out the data
This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
End of explanation
"""
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
"""
Explanation: Dummy variables
Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().
End of explanation
"""
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
"""
Explanation: Scaling target variables
To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
The scaling factors are saved so we can go backwards when we use the network for predictions.
End of explanation
"""
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
"""
Explanation: Splitting the data into training, testing, and validation sets
We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
End of explanation
"""
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
"""
Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
End of explanation
"""
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
#output_errors = # Output layer error is the difference between desired target and actual output.
output_errors = targets_list-final_outputs
# TODO: Backpropagated error
hidden_errors = np.dot( self.weights_hidden_to_output.T, output_errors.T)
hidden_grad = hidden_errors * (hidden_outputs * (1 - hidden_outputs))
# TODO: Update the weights
self.weights_hidden_to_output += self.lr*(np.dot(output_errors.T,hidden_outputs.T))
self.weights_input_to_hidden += self.lr*(np.dot(hidden_grad,inputs.T))
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot( self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
"""
Explanation: Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
End of explanation
"""
import sys
### Set the hyperparameters here ###
epochs = 2000
learning_rate = 0.009
hidden_nodes = 13
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
"""
Explanation: Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of epochs
This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
"""
Explanation: Check out your predictions
Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
End of explanation
"""
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
"""
Explanation: Thinking about your results
Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
Your answer below
The model predicts well till Dec 21 and after that it is overfitting (Dec 22-Dec 31).
Unit tests
Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
End of explanation
"""
|
paris-saclay-cds/python-workshop
|
Day_1_Scientific_Python/scikit-learn/13-Cross-Validation.ipynb
|
bsd-3-clause
|
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
iris = load_iris()
X, y = iris.data, iris.target
classifier = KNeighborsClassifier()
"""
Explanation: Cross-Validation and scoring methods
In the previous sections and notebooks, we split our dataset into two parts, a training set and a test set. We used the training set to fit our model, and we used the test set to evaluate its generalization performance -- how well it performs on new, unseen data.
<img src="figures/train_test_split.svg" width="100%">
However, often (labeled) data is precious, and this approach lets us only use ~ 3/4 of our data for training. On the other hand, we will only ever try to apply our model 1/4 of our data for testing.
A common way to use more of the data to build a model, but also get a more robust estimate of the generalization performance, is cross-validation.
In cross-validation, the data is split repeatedly into a training and non-overlapping test-sets, with a separate model built for every pair. The test-set scores are then aggregated for a more robust estimate.
The most common way to do cross-validation is k-fold cross-validation, in which the data is first split into k (often 5 or 10) equal-sized folds, and then for each iteration, one of the k folds is used as test data, and the rest as training data:
<img src="figures/cross_validation.svg" width="100%">
This way, each data point will be in the test-set exactly once, and we can use all but a k'th of the data for training.
Let us apply this technique to evaluate the KNeighborsClassifier algorithm on the Iris dataset:
End of explanation
"""
y
"""
Explanation: The labels in iris are sorted, which means that if we split the data as illustrated above, the first fold will only have the label 0 in it, while the last one will only have the label 2:
End of explanation
"""
import numpy as np
rng = np.random.RandomState(0)
permutation = rng.permutation(len(X))
X, y = X[permutation], y[permutation]
print(y)
"""
Explanation: To avoid this problem in evaluation, we first shuffle our data:
End of explanation
"""
k = 5
n_samples = len(X)
fold_size = n_samples // k
scores = []
masks = []
for fold in range(k):
# generate a boolean mask for the test set in this fold
test_mask = np.zeros(n_samples, dtype=bool)
test_mask[fold * fold_size : (fold + 1) * fold_size] = True
# store the mask for visualization
masks.append(test_mask)
# create training and test sets using this mask
X_test, y_test = X[test_mask], y[test_mask]
X_train, y_train = X[~test_mask], y[~test_mask]
# fit the classifier
classifier.fit(X_train, y_train)
# compute the score and record it
scores.append(classifier.score(X_test, y_test))
"""
Explanation: Now implementing cross-validation is easy:
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.matshow(masks)
"""
Explanation: Let's check that our test mask does the right thing:
End of explanation
"""
print(scores)
print(np.mean(scores))
"""
Explanation: And now let's look a the scores we computed:
End of explanation
"""
from sklearn.model_selection import cross_val_score
scores = cross_val_score(classifier, X, y)
print(scores)
print(np.mean(scores))
"""
Explanation: As you can see, there is a rather wide spectrum of scores from 90% correct to 100% correct. If we only did a single split, we might have gotten either answer.
As cross-validation is such a common pattern in machine learning, there are functions to do the above for you with much more flexibility and less code.
The sklearn.model_selection module has all functions related to cross validation. There easiest function is cross_val_score which takes an estimator and a dataset, and will do all of the splitting for you:
End of explanation
"""
cross_val_score(classifier, X, y, cv=5)
"""
Explanation: As you can see, the function uses three folds by default. You can change the number of folds using the cv argument:
End of explanation
"""
from sklearn.model_selection import KFold, StratifiedKFold, ShuffleSplit
"""
Explanation: There are also helper objects in the cross-validation module that will generate indices for you for all kinds of different cross-validation methods, including k-fold:
End of explanation
"""
cv = StratifiedKFold(n_splits=5)
for train, test in cv.split(X, y):
print(test)
"""
Explanation: By default, cross_val_score will use StratifiedKFold for classification, which ensures that the class proportions in the dataset are reflected in each fold. If you have a binary classification dataset with 90% of data point belonging to class 0, that would mean that in each fold, 90% of datapoints would belong to class 0.
If you would just use KFold cross-validation, it is likely that you would generate a split that only contains class 0.
It is generally a good idea to use StratifiedKFold whenever you do classification.
StratifiedKFold would also remove our need to shuffle iris.
Let's see what kinds of folds it generates on the unshuffled iris dataset.
Each cross-validation class is a generator of sets of training and test indices:
End of explanation
"""
def plot_cv(cv, y):
masks = []
X = np.ones((len(y), 1))
for train, test in cv.split(X, y):
mask = np.zeros(len(y), dtype=bool)
mask[test] = 1
masks.append(mask)
plt.matshow(masks)
plot_cv(StratifiedKFold(n_splits=5), iris.target)
"""
Explanation: As you can see, there are a couple of samples from the beginning, then from the middle, and then from the end, in each of the folds.
This way, the class ratios are preserved. Let's visualize the split:
End of explanation
"""
plot_cv(KFold(n_splits=5), iris.target)
"""
Explanation: For comparison, again the standard KFold, that ignores the labels:
End of explanation
"""
plot_cv(KFold(n_splits=10), iris.target)
"""
Explanation: Keep in mind that increasing the number of folds will give you a larger training dataset, but will lead to more repetitions, and therefore a slower evaluation:
End of explanation
"""
plot_cv(ShuffleSplit(n_splits=5, test_size=.2), iris.target)
"""
Explanation: Another helpful cross-validation generator is ShuffleSplit. This generator simply splits of a random portion of the data repeatedly. This allows the user to specify the number of repetitions and the training set size independently:
End of explanation
"""
plot_cv(ShuffleSplit(n_splits=10, test_size=.2), iris.target)
"""
Explanation: If you want a more robust estimate, you can just increase the number of iterations:
End of explanation
"""
cv = ShuffleSplit(n_splits=5, test_size=.2)
cross_val_score(classifier, X, y, cv=cv)
"""
Explanation: You can use all of these cross-validation generators with the cross_val_score method:
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.2/tutorials/ebv_Av_Rv.ipynb
|
gpl-3.0
|
!pip install -I "phoebe>=2.2,<2.3"
"""
Explanation: Extinction (ebv, Av, & Rv)
Setup
Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
End of explanation
"""
b.add_dataset('lc')
"""
Explanation: Now let's add a light curve so that we can access the relevant parameters.
End of explanation
"""
print(b['ebv'])
print(b['ebv@dataset'])
print(b['ebv@constraint'])
print(b['Av'])
print(b['Rv'])
"""
Explanation: Relevant Parameters
Extinction is parameterized by 3 parameters: ebv (E(B-V)), Av, and Rv. Of these three, two can be provided and the other must be constrained. By default, ebv is the constrained parameter. To change this, see the tutorial on constraints and the b.flip_constraint API docs.
End of explanation
"""
|
psi4/psi4meta
|
download-analysis/conda/2017-07-03-scrape_anacondaorg.ipynb
|
gpl-2.0
|
import re
import requests
import numpy as np
from datetime import date
from pandas import DataFrame
from bs4 import BeautifulSoup
from dateutil.relativedelta import relativedelta
def todatetime(ul_str):
upload = re.compile(r'((?P<year>\d+) years?)?( and )?((?P<month>\d+) months?)?( and )?((?P<day>\d+) days?)?( and )?((?P<hour>\d+) hours?)?( and )?((?P<min>\d+) minutes?)?(.*)ago')
yr = mo = dy = hr = mn = 0
mobj = upload.match(ul_str)
if mobj:
if mobj.group('year'):
yr = int(mobj.group('year'))
if mobj.group('month'):
mo = int(mobj.group('month'))
if mobj.group('day'):
dy = int(mobj.group('day'))
if mobj.group('hour'):
hr = int(mobj.group('hour'))
if mobj.group('min'):
mn = int(mobj.group('min'))
else:
raise ValueError("Unexpected period {!r}".format(ul_str))
delta = relativedelta(years=yr, months=mo, days=dy, hours=hr, minutes=mn)
return date.today() - delta
def parse_name(cell):
name = cell.text.strip().split('/')
if len(name) != 2:
name = cell.text.strip().split('\\')
arch = '{}'.format(name[0].split()[1])
name = '{}'.format(name[1].split('.tar.bz2')[0])
return arch, name
def get_page(package, page):
url = "https://anaconda.org/psi4/{}/files?page={}".format
r = requests.get(url(package, page))
r.raise_for_status()
soup = BeautifulSoup(r.text)
table = soup.find("table", class_="full-width")
downloads, uploaded, platforms, names = [], [], [], []
for row in table.findAll('tr'):
col = row.findAll('td')
#print('COL: ', col)
if len(col) == 8:
downloads.append(int(col[6].text.strip()))
uploaded.append(todatetime(col[4].text.strip()))
platform, name = parse_name(col[3])
platforms.append(platform)
names.append(name)
#print downloads[-1], uploaded[-1], platforms[-1], names[-1]
return downloads, uploaded, platforms, names
def get_df(package):
downloads, uploaded, platforms, names = [], [], [], []
for page in range(1, 15):
dn, up, pf, nm = get_page(package, page)
print(len(nm), end=' ')
downloads.extend(dn)
uploaded.extend(up)
platforms.extend(pf)
names.extend(nm)
if len(nm) != 50:
break
else:
print("Insufficient pages or packages in multiple of 50 which may lead to inflated download counts.")
df = DataFrame(data=np.c_[platforms, names, uploaded, downloads],
columns=['platform', 'name', 'uploaded', 'downloads'])
df['uploaded'] = pd.to_datetime(df['uploaded'])
df.set_index('uploaded', inplace=True, drop=True)
df['downloads'] = df['downloads'].astype(int)
return df
"""
Explanation: Conda and
binstar are changing the packaging world of Python.
Conda made it easy to install re-locatable python binaries that where hard
to build, while binstar provides a "Linux repository-like system"
(or if you are younger than me an AppStore-like system) to host custom binaries.
Taking advantage of that IOOS created a binstar
channel with Met-ocean themed packages for Windows,
Linux and MacOS. Note that, if you are using Red Hat Enterprise Linux or Centos you
should use the rhel6 channel to avoid the
GLIBC problem.
All the conda-recipes are open and kept in a GitHub
repository. (And accepting PRs ;-)
In this post I will not show how to install and configure conda with this channel.
It has been done already here
and
here. Is this post I will scrape
the binstar channel stats to evaluate how the channel is doing.
First some handy functions to parse the dates, the package names, and
to same all the data into a pandas DataFrame.
End of explanation
"""
from requests import HTTPError
from pandas import Panel, read_json
import pandas as pd
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 5000)
json = "https://conda.anaconda.org/psi4/linux-64/repodata.json"
df = read_json(json)
packages = sorted(set(['-'.join(pac.split('-')[:-2]) for pac in df.index]))
packages = [pkg for pkg in packages if pkg]
packages = [u'psi4', u'chemps2', u'dftd3', u'pcmsolver', u'v2rdm_casscf', u'libint', u'erd', u'simint', u'dkh', u'gdma', u'gcp', u'libefp', 'libxc']
dfs = dict()
for pac in packages:
try:
print('\n', pac, ': ', end='')
dfs.update({pac: get_df(pac)})
except HTTPError:
continue
#print(dfs)
"""
Explanation: All the data we need is in the repodata.json file. There isn't an API
to access that via the command line (yet), that is why we need to scrape
it.
End of explanation
"""
def get_plat_total(df):
package = dict()
for plat in ['linux-64', 'osx-64']: #, 'win-32', 'win-64']:
# all time
#sset = df.loc[:].query('platform == "{}"'.format(plat))
# before 1.0 # 5 Jul 2017 - no longer any good b/c I thinned out the pkgs
#sset = df.loc['2016-7-4':].query('platform == "{}"'.format(plat))
# after 1.0
#sset = df.loc[:'2016-7-4'].query('platform == "{}"'.format(plat))
# after 1.1
sset = df.loc[:'2017-5-16'].query('platform == "{}"'.format(plat))
print(sset) # nicely formatted output
total = sset.sum()
package.update({plat: total['downloads']})
return package
packages = dict()
for pac in dfs.keys():
df = dfs[pac]
packages.update({pac: get_plat_total(df)})
for pac in dfs.keys():
print('{:<15}: {:<10} {:<6} {:<10} {:<6} {:<10} {:<6}'.format(pac,
'linux-64', packages[pac]['linux-64'],
'osx-64', packages[pac]['osx-64'],
'total', packages[pac]['linux-64'] + packages[pac]['osx-64']))
df = DataFrame.from_dict(packages).T
df['sum'] = df.T.sum()
df.sort('sum', ascending=False, inplace=True)
df.drop('sum', axis=1, inplace=True)
"""
Explanation: Now let's split the various platforms and compute total number of downloads
for each package.
End of explanation
"""
%matplotlib inline
import seaborn
import matplotlib.pyplot as plt
stride = 19 # 19 x 5 = 95
# stride = len(packages)
kw = dict(kind='bar', stacked=True)
fig, ax = plt.subplots(figsize=(11, 3))
ax = df.ix[:stride].plot(ax=ax, **kw)
# fig, ax = plt.subplots(figsize=(11, 3))
# ax = df.ix[stride:stride*2].plot(ax=ax, **kw)
# fig, ax = plt.subplots(figsize=(11, 3))
# ax = df.ix[stride*2:stride*3].plot(ax=ax, **kw)
# fig, ax = plt.subplots(figsize=(11, 3))
# ax = df.ix[stride*3:stride*4].plot(ax=ax, **kw)
# fig, ax = plt.subplots(figsize=(11, 3))
# ax = df.ix[stride*4:stride*5].plot(ax=ax, **kw)
# df['win'] = df['win-32'] + df['win-64']
# total = df[['linux-64', 'osx-64', 'win']].sum()
total = df[['linux-64', 'osx-64']].sum()
fig, ax = plt.subplots(figsize=(7, 3))
ax = total.plot(ax=ax, kind='bar')
"""
Explanation: And here is the result,
End of explanation
"""
# import pandas as pd
# pd.set_option('display.max_rows', 1500)
# packagesY = dict()
# #dates = pd.date_range('1/1/2016', periods=12)
# #print 'keys', dfs.keys(), dates
# for pac in dfs.keys():
# print '<<< {} >>>'.format(pac)
# df = dfs[pac]
# df.sort_index(inplace=True)
# #print 'df\n', df
# #print 'cols', df.axes
# #df.plot(title=pac)
# df['cumulative_downloads']=df['downloads'].cumsum()
# print df
# df.plot(title=pac, figsize=(15, 8))
"""
Explanation: Right now it is hard to make sense of the data. That is because some
downloads might be a direct download or an indirect download via a package
dependency. Also, our own build system downloads the dependencies when
building new or when updating the packages in the channel. One conclusion
that we may take from this is that the Windows packages are as popular the
Linux packages!
End of explanation
"""
|
sspickle/sci-comp-notebooks
|
P01-Euler.ipynb
|
mit
|
#
# Simple python program to calculate s as a function of t.
# Any line that begins with a '#' is a comment.
# Anything in a line after the '#' is a comment.
#
lam=0.01 # define some variables: lam, dt, s, s0 and t. Set initial values.
dt=1.0
s=s0=100.0
t=0.0
def f_s(s,t): # define a function that describes the rate of change of 's'
return -lam*s
print ('t s') # print a header for the table of data
for i in range(11):
print (t,s) # iterate through 11 steps, starting at 0
ds = f_s(s,t)*dt # compute the change in 's' using the 'rule' that ds/dt = f(s,t)
s = s + ds # update s
t = t + dt # update t
"""
Explanation: The Euler Method
This notebook is a introduction to the Euler Method.
Imagine you have a system described by a "state" s. The idea is that s is the collection of all the variables that are needed to specify the system's condition at some moment in time. If you know s at some time, then you know all you need to know to understand what the system is doing at that time. The idea is that there is some rule that determines the rate of change of s that depends on s, and possibly also on the time like so:
$$\frac{ds}{dt} = f_s(s,t)$$
So.. if we have a rule like this, and we know the state of the system now, Euler's method allows us to estimate the state at a later time. How? Simple.. $f_s(s,t)$ is telling us the rate of change of s itself. So, if a short period of time $\Delta t$ passes what should be the corresponding change in s (called $\Delta s$)?
$$\frac{\Delta s}{\Delta t} \approx \frac{ds}{dt} = f_s(s,t)$$
Solving for $\Delta{s}$ gives:
$$\Delta s \approx f_s(s,t) \Delta t$$
That's the Euler Method! The only thing left is to add $\Delta s$ to the old value of s, and repeat the process many times until you get the value of s at some later time.
$$s_{\rm new} = s_{\rm old} + \Delta s$$
Let's work out a detailed example 'by hand'...
Example: Decay
First.. the worlds simplest example: Radioactive Decay. In radioactive decay the rate of decay is proportional to the number of radioactive nuclei in the sample at any given time.
s = number of nuclei left
$$\frac{ds}{dt} = f_s(s,t) = - \lambda s$$
Let's say we start with s=100 nuclei, $\lambda=0.01$ (1 per 100 seconds of time per source nucleon). How many nuclei will be left after 10 seconds?
$\frac{ds}{dt}$ will be $-\lambda s = -(0.01 s^{-1})(100n) = -1 n/s$
$s + \Delta s = 100n + (-1n/s) (10s) = 90$ nuclei remaining.
Below is the simplest python program that implements this calculation and prints the results
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
"""
Explanation: Now lets add a plot. Turn on the "pylab" environment:
End of explanation
"""
slist=[]
tlist=[]
lam=0.01
dt=1.0
s=s0=100.0
t=0.0
tlist.append(t)
slist.append(s)
print ('t s')
for i in range(11):
s += f_s(s,t)*dt
t += dt
tlist.append(t)
slist.append(s)
#plot(tlist, slist, 'b.', tlist, 100.0*exp(-lam*array(tlist)))
print ("tlist=", tlist)
print ("slist=", slist)
exact = s0*np.exp(-lam*np.array(tlist))
print ("exact", exact)
"""
Explanation: Good! Next collect some lists of data (slist and tlist) and use the "plot" function to display the values graphically.
End of explanation
"""
pl.title('Decay Results')
pl.xlabel('time (s)')
pl.ylabel('n (nuclei)')
pl.plot(tlist, slist, 'b.', tlist, exact, 'r-')
"""
Explanation: Now, during the "in class" work time this week we'll pretty this up and compare to our analytical solution.
End of explanation
"""
#
# Here is the raw data for the position of the muffin cup as a function of time. Use the "split" function to break it into
# a list of (possibly empty) strings.
#
data = """0.000000000E0 -2.688162330E0
3.336670003E-2 -4.301059729E0
6.673340007E-2 -5.376324661E0
1.001001001E-1 -6.989222059E0
1.334668001E-1 -1.129028179E1
1.668335002E-1 -1.451607658E1
2.002002002E-1 -2.043003371E1
2.335669002E-1 -2.526872591E1
2.669336003E-1 -3.118268303E1
3.003003003E-1 -3.870953756E1
3.336670003E-1 -4.623639208E1
3.670337004E-1 -5.430087907E1
4.004004004E-1 -6.236536606E1
4.337671004E-1 -7.150511799E1
4.671338005E-1 -8.010723744E1
5.005005005E-1 -8.924698937E1
5.338672005E-1 -9.892437376E1
5.672339006E-1 -1.080641257E2
6.006006006E-1 -1.177415101E2
6.339673006E-1 -1.274188945E2
6.673340007E-1 -1.370962788E2
7.007007007E-1 -1.467736632E2
7.340674007E-1 -1.575263126E2
7.674341008E-1 -1.672036969E2
8.008008008E-1 -1.768810813E2
8.341675008E-1 -1.865584657E2
8.675342009E-1 -1.973111150E2
9.009009009E-1 -2.075261319E2
9.342676009E-1 -2.182787812E2
9.676343010E-1 -2.284937981E2
""".splitlines() # split this string on the "newline" character.
print("We have", len(data), "data points.")
#
# Here we'll take the list of strings defined above and break it into actual numbers in reasonable units.
#
tlist = []
ylist = []
for s in data:
t,y = s.split() # break string in two
t=float(t) # convert time to float
y=float(y)/100.0 # convert distanct (in meters) to float
tlist.append(t)
ylist.append(y)
print ("tlist=",tlist)
print ("ylist=",ylist)
pl.plot(tlist, ylist)
pl.title("raw data")
pl.xlabel("time(s)")
pl.ylabel("y(m)")
pl.grid()
vlist = [] # Velocity list (computed velocities from experimental data)
tvlist = [] # time list (times for corresponding velocities)
for i in range(1,len(tlist)):
dy=ylist[i]-ylist[i-1]
dt=tlist[i]-tlist[i-1]
vlist.append(dy/dt)
tvlist.append((tlist[i]+tlist[i-1])/2.0)
pl.plot(tvlist,vlist,'g.')
pl.title("Velocity graph")
pl.xlabel("time(s)")
pl.ylabel("$v_y$ (m/s)")
pl.grid()
m=0.0035 # kg
g=9.8 # m/s
b=0.001 # total guess, need to improve
v=0.0 # start with zero velocity
dt = (tlist[-1]-tlist[0])/(len(tlist)-1) # time per frame in original video
t=0.0
vclist = [v]
tclist = [t]
def deriv(v, t):
return b*v**2/m - g
for i in range(len(tlist)):
dv = deriv(v,t)*dt
v += dv
t += dt
vclist.append(v)
tclist.append(t)
pl.title("Comparison of experimental and drag model")
pl.xlabel("time(s)")
pl.ylabel("velocity (m/s)")
pl.plot(tclist, vclist, 'r-',tvlist,vlist,'g.')
"""
Explanation: Below are two project options.
You should be aware that for every project in this course you can always create your own
project studying your own favorite topic so long as the project uses the same computational
approach that we're exploring in a given week (e.g., the Euler Algorithm, etc.).
Project 1, version A: The Euler Method
Use the Euler method to estimate parameters of a free fall model.
Using data from this slide: https://physlets.org/tracker/download/air_resistance.pdf
And the following model:
$$ +b v^2 - m g = m \frac{dv}{dt}$$
Assuming the mass is $m\approx 3.5\,{\rm g}$ and $g\approx 9.8\,{\rm m/s}$, estimate the value of the parameter $b$.
Below you will find most of the code already worked out as a detailed example. You need to read the code and understand what it's doing. If you have questions, ASK!
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/2ef6921dc0a9b8045508fcba2760290e/plot_resample.ipynb
|
bsd-3-clause
|
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
"""
Explanation: Resampling data
When performing experiments where timing is critical, a signal with a high
sampling rate is desired. However, having a signal with a much higher sampling
rate than is necessary needlessly consumes memory and slows down computations
operating on the data.
This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold
reduction in data size, at the cost of an equal loss of temporal resolution.
End of explanation
"""
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data()
"""
Explanation: Setting up data paths and loading raw data (skip some data for speed)
End of explanation
"""
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.copy().resample(100, npad='auto')
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
"""
Explanation: Since downsampling reduces the timing precision of events, we recommend
first extracting epochs and downsampling the Epochs object:
End of explanation
"""
# Resample to 300 Hz
raw_resampled = raw.copy().resample(300, npad='auto')
"""
Explanation: When resampling epochs is unwanted or impossible, for example when the data
doesn't fit into memory or your analysis pipeline doesn't involve epochs at
all, the alternative approach is to resample the continuous data. This
can only be done on loaded or pre-loaded data.
End of explanation
"""
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (suppress the warning that would be emitted)
raw_resampled = raw.copy().resample(100, npad='auto', verbose='error')
print('Number of events after resampling:',
len(mne.find_events(raw_resampled)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.copy().resample(
100, npad='auto', events=events)
print('Number of events after resampling:', len(events_resampled))
"""
Explanation: Because resampling also affects the stim channels, some trigger onsets might
be lost in this case. While MNE attempts to downsample the stim channels in
an intelligent manner to avoid this, the recommended approach is to find
events on the original data before downsampling.
End of explanation
"""
|
luiscruz/udacity_data_analyst
|
P02/Project2_Investigate_a_Dataset_NYC.ipynb
|
mit
|
print ggplot(turnstile_weather, aes(x='ENTRIESn_hourly')) +\
geom_histogram(binwidth=1000,position="identity") +\
scale_x_continuous(breaks=range(0, 60001, 10000), labels = range(0, 60001, 10000))+\
facet_grid("rain")+\
ggtitle('Distribution of ENTRIESn_hourly in non-rainy days (0.0) and rainy days(1.0)')
"""
Explanation: Analyzing the NYC Subway Dataset
Section 1. Statistical Test
1.1 Which statistical test did you use to analyze the NYC subway data? Did you use a one-tail or a two-tail P value? What is the null hypothesis? What is your p-critical value?
Given random draws x from the population of people that ride the subuway when it rains and y from the population of people that ride the subway when it does not rain, the standard two-tailed hypotheses are as follows:
$H0: P(x \gt y) = 0.5$
$H1: P(x \gt y) \neq 0.5$
The test used is Mann-Whitney U-statistic, and a two-tail P value is used.
The p-critical value is 0.05.
1.2 Why is this statistical test applicable to the dataset? In particular, consider the assumptions that the test is making about the distribution of ridership in the two samples.
Sample size is greater than 20
Distribution of samples is not normal (see histograms)
Samples are independent
End of explanation
"""
### YOUR CODE HERE ###
df_with_rain = turnstile_weather[turnstile_weather['rain']==1]
df_without_rain = turnstile_weather[turnstile_weather['rain']==0]
with_rain_mean = df_with_rain['ENTRIESn_hourly'].mean()
without_rain_mean = df_without_rain['ENTRIESn_hourly'].mean()
U, p = scipy.stats.mannwhitneyu(df_with_rain['ENTRIESn_hourly'], df_without_rain['ENTRIESn_hourly'])
print "mean_with_rain=%f mean_without_rain=%f p-value=%.8f" %(with_rain_mean, without_rain_mean, p*2)
"""
Explanation: 1.3 What results did you get from this statistical test? These should include the following numerical values: p-values, as well as the means for each of the two samples under test.
End of explanation
"""
print "Descriptive statistics for the ridership in rainy days"
df_with_rain['ENTRIESn_hourly'].describe()
print "Descriptive statistics for the ridership in non-rainy days"
df_without_rain['ENTRIESn_hourly'].describe()
"""
Explanation: 1.4 What is the significance and interpretation of these results?
The p-value is below the significance value ($\alpha = 0.05$). Thus, the results obtained reject the null hipothesis with a significance level of 0.05. This means that the number of passengers in rainy days is different than the number observed in non-rainy days.
The following statistics support our test:
End of explanation
"""
def linear_regression(features, values):
"""
Perform linear regression given a data set with an arbitrary number of features.
This can be the same code as in the lesson #3 exercise.
"""
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(features, values)
return regr.intercept_, regr.coef_
def predictions(dataframe):
'''
The NYC turnstile data is stored in a pandas dataframe called weather_turnstile.
Using the information stored in the dataframe, let's predict the ridership of
the NYC subway using linear regression with ordinary least squares.
You can download the complete turnstile weather dataframe here:
https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
Your prediction should have a R^2 value of 0.40 or better.
You need to experiment using various input features contained in the dataframe.
We recommend that you don't use the EXITSn_hourly feature as an input to the
linear model because we cannot use it as a predictor: we cannot use exits
counts as a way to predict entry counts.
Note: Due to the memory and CPU limitation of our Amazon EC2 instance, we will
give you a random subet (~10%) of the data contained in
turnstile_data_master_with_weather.csv. You are encouraged to experiment with
this exercise on your own computer, locally. If you do, you may want to complete Exercise
8 using gradient descent, or limit your number of features to 10 or so, since ordinary
least squares can be very slow for a large number of features.
If you receive a "server has encountered an error" message, that means you are
hitting the 30-second limit that's placed on running your program. Try using a
smaller number of features.
'''
################################ MODIFY THIS SECTION #####################################
# Select features. You should modify this section to try different features! #
# We've selected rain, precipi, Hour, meantempi, and UNIT (as a dummy) to start you off. #
# See this page for more info about dummy variables: #
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html #
##########################################################################################
features = dataframe[['rain', 'precipi', 'Hour', 'meantempi', 'fog']]
dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')
features = features.join(dummy_units)
# Values
values = dataframe['ENTRIESn_hourly']
# Perform linear regression
intercept, params = linear_regression(features, values)
predictions = intercept + numpy.dot(features, params)
return predictions, intercept, params
predicted, intercept, params = predictions(turnstile_weather)
values = turnstile_weather['ENTRIESn_hourly']
(turnstile_weather['ENTRIESn_hourly'] - predicted).hist(bins=20)
print "R2 Score=%f"%r2_score(values, predicted)
"""
Explanation: Section 2. Linear Regression
2.1 What approach did you use to compute the coefficients theta and produce prediction for ENTRIESn_hourly in your regression model:
OLS using Scikit Learn
End of explanation
"""
print "Correlation analysis"
turnstile_weather.corr()['ENTRIESn_hourly'].sort_values(inplace=False)
# plt.rcParams['figure.figsize'] = (12.0, 3.0)
# dtypes = turnstile_weather.dtypes
# for column in turnstile_weather.columns:
# if dtypes[column] in ['int64', 'float64']:
# plt.figure()
# turnstile_weather[column].hist(bins=20)
# #turnstile_weather.plot(kind='kde', x=column)
# plt.title(column)
# plt.rcParams['figure.figsize'] = (16.0, 8.0)
"""
Explanation: 2.2 What features (input variables) did you use in your model? Did you use any dummy variables as part of your features?
I have used rain, precipi, Hour, meantempi and UNIT. UNIT was transformed into dummy variables.
2.3 Why did you select these features in your model? We are looking for specific reasons that lead you to believe that
the selected features will contribute to the predictive power of your model.
Your reasons might be based on intuition. For example, response for fog might be: “I decided to use fog because I thought that when it is very foggy outside people might decide to use the subway more often.”
Your reasons might also be based on data exploration and experimentation, for example: “I used feature X because as soon as I included it in my model, it drastically improved my R2 value.”
We know that weather, namely precipitation, affects the $\mu_{passengers}$. Thus I have included rain, precipi, meantempi and fog. From the correlation analysis below we can also see that Hour is the most correlated valid feature. For this reason Hour was also included in the input features.
End of explanation
"""
features=['rain', 'precipi', 'Hour', 'meantempi', 'fog']
print "== Non-dummy features coefficients =="
for i in range(5):
output_str = ("%s:"%features[i]).ljust(12)
output_str += "%.3f"%(params[i])
print output_str
"""
Explanation: 2.4 What are the parameters (also known as "coefficients" or "weights") of the non-dummy features in your linear regression model?
End of explanation
"""
r_squared = 1 - ((values-predicted)**2).sum()/((values-values.mean())**2).sum()
assert(r_squared == r2_score(values, predicted))
print "R2 Score=%f"%r_squared
"""
Explanation: 2.5 What is your model’s R2 (coefficients of determination) value?
End of explanation
"""
print ggplot(aes(x='ENTRIESn_hourly', fill='rain'), data=turnstile_weather) +\
geom_histogram(binwidth=1000) +\
ggtitle('Ridership per hour distribution for rainy and non-rainy days') +\
ylab('Number of tuples')
print "ENTRIESn_hourly max value: %d"%turnstile_weather['ENTRIESn_hourly'].max()
"""
Explanation: 2.6 What does this R2 value mean for the goodness of fit for your regression model? Do you think this linear model to predict ridership is appropriate for this dataset, given this R2 value?
When the coefficient of determination, $R^2$, give us the correlation between the predictor features and the independent variable Entries per hour.
When $R^2$ is close to 1, it means that the model has very good fitness, while when it is close to 0, the model does not fit at all.
We have an $R^2$ of 0.46 which means that 0.46 of the variance of data is explained in the regression model.
In addition, we should be evaluating our model with data that was not used to train the model. Even if we get a good score, our model might be overfiting.
If we look at our coefficients we can see that rain and meantempi have a negative impact in Entries per hour, while precipi, Hour and Fog have a positive impact.
This means that 0.46 of the variance of the data is explained with a negative impact of rain.
Section 3. Visualization
Please include two visualizations that show the relationships between two or more variables in the NYC subway data.
Remember to add appropriate titles and axes labels to your plots. Also, please add a short description below each figure commenting on the key insights depicted in the figure.
3.1 One visualization should contain two histograms: one of ENTRIESn_hourly for rainy days and one of ENTRIESn_hourly for non-rainy days.
You can combine the two histograms in a single plot or you can use two separate plots.
If you decide to use to two separate plots for the two histograms, please ensure that the x-axis limits for both of the plots are identical. It is much easier to compare the two in that case.
For the histograms, you should have intervals representing the volume of ridership (value of ENTRIESn_hourly) on the x-axis and the frequency of occurrence on the y-axis. For example, each interval (along the x-axis), the height of the bar for this interval will represent the number of records (rows in our data) that have ENTRIESn_hourly that falls in this interval.
Remember to increase the number of bins in the histogram (by having larger number of bars). The default bin width is not sufficient to capture the variability in the two samples.
R:
The following visualization has 2 histograms combined in a single plot. The histogram in red shows the ridership per hour distribution for non-rainy days, while the histogram in blue shows for rainy days. We can see that non-rainy have bigger bars for ENTRIESn_hourly below 10000. This doesn't mean rainy days have less passengers. I just means that we have less data for rainy days, which is natural since we have less rainy days.
End of explanation
"""
print ggplot(aes(x='ENTRIESn_hourly', fill='rain'), data=turnstile_weather) +\
geom_histogram(binwidth=100) +\
xlim(0, 10000)+\
ggtitle('Ridership per hour distribution for rainy and non-rainy days limited to 10000') +\
ylab('Number of tuples')
# print ggplot(aes(x='ENTRIESn_hourly', color='rain'), data=turnstile_weather) +\
# geom_density() +\
# ggtitle('Ridership per hour distribution for rainy and non-rainy days limited to 10000') +\
# ylab('Number of tuples')
"""
Explanation: Although the maximum value of ENTRIESn_hourly is above 50000, from the histogram we see that most values are below 10000. Thus, let's generate a histogram limited to 10000 entries.
End of explanation
"""
print ggplot(turnstile_weather, aes(x='Hour', y='ENTRIESn_hourly'))+geom_bar(stat = "summary", fun_y=numpy.mean, fill='lightblue')+ggtitle('Average ridership by time-of-day')
"""
Explanation: 3.2 One visualization can be more freeform. You should feel free to implement something that we discussed in class (e.g., scatter plots, line plots) or attempt to implement something more advanced if you'd like. Some suggestions are:
Ridership by time-of-day
Ridership by day-of-week
R:
The following plot shows the average number of passengers per hour in our dataset. We can see that in average 8pm, 12pm, and 4pm are the times of day with most passengers.
End of explanation
"""
print pandas.to_datetime(turnstile_weather['DATEn']).describe()
"""
Explanation: Section 4. Conclusion
Please address the following questions in detail. Your answers should be 1-2 paragraphs long.
4.1 From your analysis and interpretation of the data, do more people ride the NYC subway when it is raining or when it is not raining?
The number of people that ride NYC in raining or non-raining days is different, but the analysis made shows that is not clear which days have more ridership.
4.2 What analyses lead you to this conclusion? You should use results from both your statistical tests and your linear regression to support your analysis.
The Mann-Whitney U-statistic was able to reject the null hypothesis with a significance level of 0.05.
When we look at the distributions, we see that the maximum value of ridership per hour is much higher on rainy days (51839 against 43199).
The histograms are not able to produce a good visualization to compare distributions since, there are more tuples for non-rainy days. Perhaps, some normalization will help for further analysis.
Nevertheless, when we look at our linear regression model with $R^2=0.46$, the coefficient for rain has a negative value (-39.307), which means that the number of ridership is inversely proportional with the existence of rain. This might happen due to existent correlation or causality between rain and other features. E.g., rain might have some correlation with fog which might also affect ridership.
Section 5. Reflection
Please address the following questions in detail. Your answers should be 1-2 paragraphs long.
5.1 Please discuss potential shortcomings of the methods of your analysis, including: Dataset, Analysis, such as the linear regression model or statistical test.
Regarding the linear regression, this method is not robust against correlated features. The use of correlated features might be reducing the quality of our model and conclusions.
Although our test rejected the null hypothesis, we can't assume that there is a causality between rain and ridership. There is the possibility of having another condition that affects both features.
End of explanation
"""
|
tgsmith61591/skutil
|
doc/examples/pipeline/skutil grid demo.ipynb
|
bsd-3-clause
|
from sklearn.pipeline import Pipeline
from skutil.preprocessing import BoxCoxTransformer, SelectiveScaler
from skutil.decomposition import SelectivePCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
# build a pipeline
pipe = Pipeline([
('collinearity', MulticollinearityFilterer(threshold=0.85)),
('scaler' , SelectiveScaler()),
('boxcox' , BoxCoxTransformer()),
('pca' , SelectivePCA(n_components=0.9)),
('model' , RandomForestClassifier())
])
# fit the pipe, report scores
pipe.fit(X_train, y_train)
# report scores
print 'Train RF accuracy: %.5f' % accuracy_score(y_train, pipe.predict(X_train))
print 'Test RF accuracy: %.5f' % accuracy_score(y_test, pipe.predict(X_test))
"""
Explanation: Build a simple Pipeline
End of explanation
"""
from skutil.grid_search import RandomizedSearchCV
from sklearn.cross_validation import KFold
from sklearn.preprocessing import StandardScaler, RobustScaler
from skutil.feature_selection import NearZeroVarianceFilterer
from scipy.stats import randint, uniform
# default CV does not shuffle, so we define our own
custom_cv = KFold(n=y_train.shape[0], n_folds=5, shuffle=True, random_state=42)
# build a pipeline -- let's also add a NearZeroVarianceFilterer prior to PCA
pipe = Pipeline([
('collinearity', MulticollinearityFilterer(threshold=0.85)),
('scaler' , SelectiveScaler()),
('boxcox' , BoxCoxTransformer()),
('filterer' , NearZeroVarianceFilterer()),
('pca' , SelectivePCA(n_components=0.9)),
('model' , RandomForestClassifier(n_jobs=-1))
])
# let's define a set of hyper-parameters over which to search
hp = {
'collinearity__threshold' : uniform(loc=.8, scale=.15),
'collinearity__method' : ['pearson','kendall','spearman'],
'scaler__scaler' : [StandardScaler(), RobustScaler()],
'filterer__threshold' : uniform(loc=1e-6, scale=0.005),
'pca__n_components' : uniform(loc=.75, scale=.2),
'pca__whiten' : [True, False],
'model__n_estimators' : randint(5,100),
'model__max_depth' : randint(2,25),
'model__min_samples_leaf' : randint(1,15),
'model__max_features' : uniform(loc=.5, scale=.5),
'model__max_leaf_nodes' : randint(10,75)
}
# define the gridsearch
search = RandomizedSearchCV(pipe, hp,
n_iter=50,
scoring='accuracy',
cv=custom_cv,
random_state=42)
# fit the search
search.fit(X_train, y_train)
# report scores
print 'Train RF accuracy: %.5f' % accuracy_score(y_train, search.predict(X_train))
print 'Test RF accuracy: %.5f' % accuracy_score(y_test, search.predict(X_test))
"""
Explanation: The performance isn't bad. The training accuracy is phenomenal, but the validation accuracy is sub-par. Plus, there's quite of variance in the model, isn't there? Let's try to improve our performance as well as reduce the variability (while sacrificing some bias, unfortunately).
Can we make this better with a gridsearch?
Beware, this grid can be a lot to handle for an older or weaker machine
End of explanation
"""
search.best_params_
"""
Explanation: This is much better! We've dramatically reduced the variance in our model, but we've taken a slight hit in terms of bias. With different models, or even creating an ensemble of different models (ensemble of ensembles?), we could probably create an even better score.
It's also important to note that we were relatively cavalier in our preprocessing... in a real world situation, you'd check each step and ensure how we're transforming our data makes sense.
Finally, note that the skutil grid search API differs slightly from the sklearn one... in sklearn, we can call search.best_estimator_.predict, however when using SelectiveMixin transformers, names may be internally altered by the grid search API for support with sklearn cross_validation. Thus, in skutil, use search.predict instead.
Here are the best parameters for the grid:
End of explanation
"""
from sklearn.externals import joblib
# write the model
joblib.dump(search, 'final_model.pkl', compress=3)
"""
Explanation: Model persistence
Once you get to a point where you're happy with your model, write it to disk:
End of explanation
"""
from __future__ import print_function
# load the model
final_model = joblib.load('final_model.pkl')
# load your data
# new_data = pd.read_csv('...')
# ... any other pre-processing you may have done outside of the pipeline
# here's our example data
new_data = X
# make predictions
predictions = final_model.predict(new_data)
# view the top few
print(predictions[:5])
# view the performance (we can do this because we have the ground truth)
print(accuracy_score(iris.target, predictions))
# disk cleanup for git
!rm final_model.pkl
"""
Explanation: Making predictions from a persistent model
When new data needs to be predicted on (for the sake of example here, we'll use iris, but you wouldn't really apply the same model to in-sample data), read your model back and make the predictions using .predict(new_data)
End of explanation
"""
|
tpin3694/tpin3694.github.io
|
machine-learning/multinomial_naive_bayes_classifier.ipynb
|
mit
|
# Load libraries
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
"""
Explanation: Title: Multinomial Naive Bayes Classifier
Slug: multinomial_naive_bayes_classifier
Summary: How to train a Multinomial naive bayes classifer in Scikit-Learn
Date: 2017-09-22 12:00
Category: Machine Learning
Tags: Naive Bayes
Authors: Chris Albon
Multinomial naive Bayes works similar to Gaussian naive Bayes, however the features are assumed to be multinomially distributed. In practice, this means that this classifier is commonly used when we have discrete data (e.g. movie ratings ranging 1 and 5).
Preliminaries
End of explanation
"""
# Create text
text_data = np.array(['I love Brazil. Brazil!',
'Brazil is best',
'Germany beats both'])
"""
Explanation: Create Text Data
End of explanation
"""
# Create bag of words
count = CountVectorizer()
bag_of_words = count.fit_transform(text_data)
# Create feature matrix
X = bag_of_words.toarray()
"""
Explanation: Create Bag Of Words
End of explanation
"""
# Create target vector
y = np.array([0,0,1])
"""
Explanation: Create Target Vector
End of explanation
"""
# Create multinomial naive Bayes object with prior probabilities of each class
clf = MultinomialNB(class_prior=[0.25, 0.5])
# Train model
model = clf.fit(X, y)
"""
Explanation: Train Multinomial Naive Bayes Classifier
End of explanation
"""
# Create new observation
new_observation = [[0, 0, 0, 1, 0, 1, 0]]
"""
Explanation: Create Previously Unseen Observation
End of explanation
"""
# Predict new observation's class
model.predict(new_observation)
"""
Explanation: Predict Observation's Class
End of explanation
"""
|
catalystcomputing/DSIoT-Python-sessions
|
Session1/code/Pandas.ipynb
|
apache-2.0
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""
Explanation: Pandas is an open source library tailored for data manipulation, data analysis, and data visualization. Written for Python, provides high-performance, robust methods and flexible data structures. It's geared to be the cornerstone for doing practical and down-to-earth data analysis in Python. More focused in data and less in programming makes pandas the ideal tool for scientific computing in Python.
In this series, we'll be processing, cleaning, slicing and modeling data, exploring the basics of the Pandas module, show concepts such as Series and DataFrames. We'll read two csv files (the first a list of titanic's passengers, and the second a GDP table), exploring the data and organizing the results of the analysis into a form suitable for plotting.
Before we begin, you'll not be using only Pandas, but you will also need libraries such as matplotlib and numpy.
<hr style="border-width: 5px;">
Throughout this chapter, we'll be using:
End of explanation
"""
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
s
a = np.array(['a', 'b', 'c', 'd', 'e'])
pd.Series(a)
labels = ['a', 'b', 'c', 'd', 'e']
s = pd.Series(np.random.randn(5), index=labels)
s
"""
Explanation: pandas provides a set of data structures which include Series and DataFrames. A few examples of Series.
End of explanation
"""
s[["a","b"]]
s["b"]
"""
Explanation: pandas allows access to its elements using labels:
End of explanation
"""
s[:3]
"""
Explanation: and slicing
End of explanation
"""
s = pd.Series({'a': 1, 'b': 2, 'c': 3})
s
"""
Explanation: Series can be initialized directly from dictionaries
End of explanation
"""
df = pd.DataFrame({'a': np.array([1,4,7]),
'b': np.array([2,5,8]),
'c': np.array([3,6,9])})
df
df = pd.DataFrame({'Stocks': ["AAPL","CA","CTXS","FIS","MA"],
'Values': [126.17,31.85,65.38,64.08,88.72]})
df
"""
Explanation: DataFrame
End of explanation
"""
df[df["Values"]>65]
"""
Explanation: You can also access all of the values in a column meeting a certain criteria.
End of explanation
"""
df = df.append({"Stocks":"GOOG", "Values":523.53}, ignore_index=True)
df
"""
Explanation: We can add new values:
End of explanation
"""
Stocks = df["Stocks"]
Stocks
df.Stocks
"""
Explanation: If we wish to access columns, we can use two methods:
End of explanation
"""
path = "data/titanic_data.csv"
df = pd.read_csv(path)
df
Class1_passengers = len(df[df.Pclass == 1])
Class1_passengers
Class3_passengers = len(df[df.Pclass == 3])
Class3_passengers
survived = df[df.Survived == 1]
survived_ages = survived["Age"]
not_survived = df[df.Survived == 0]
not_survived_ages = not_survived["Age"]
"""
Explanation: We can easily import the data that we wish to analyze reading from a CSV file. From there, we can manipulate the data by columns, create new columns, and even base the new columns on other column data. In the following example we'll be using data filtered from sample passenger manifest of the Titanic. https://github.com/richierichrawr/DataSet_Survivors
End of explanation
"""
survived_ages = survived_ages.dropna()
not_survived_ages = not_survived_ages.dropna()
"""
Explanation: We can drop NaN values
End of explanation
"""
not_survived_ages.sum()/float(len(not_survived_ages))
survived_ages.sum()/float(len(survived_ages))
"""
Explanation: and obtain the average age
End of explanation
"""
survived.groupby("Pclass")["Survived"].sum().plot(kind="bar")
"""
Explanation: Survivors by class
End of explanation
"""
titanic = df[["Survived", "Pclass", "Sex", "Age"]]
titanic[titanic.Pclass == 1][titanic.Sex == "male"][titanic.Survived == 1][titanic.Age >40]
"""
Explanation: First class survivors with age > 40
End of explanation
"""
titanic[titanic.Pclass == 3][titanic.Sex == "male"][titanic.Survived == 1][titanic.Age >40]
"""
Explanation: Third class survivors with age > 40
End of explanation
"""
path = "data/gdp.csv"
gdp = pd.read_csv(path)
gdp
gdp_data = gdp[["2013","Country name"]].dropna()
gdp_data
plt.scatter(gdp_data.index, gdp_data['2013'])
"""
Explanation: GDP table
End of explanation
"""
gdp_data[gdp_data["2013"] > 50000].sort()
"""
Explanation: Richest Countries
End of explanation
"""
gdp_data[gdp_data["2013"] < 1500].sort()
"""
Explanation: Poorest Countries
End of explanation
"""
|
bbglab/adventofcode
|
2015/ferran/day7.ipynb
|
mit
|
binary_command = {'NOT': '~', 'AND': '&', 'OR': '|', 'LSHIFT': '<<', 'RSHIFT': '>>'}
operators = binary_command.values()
import csv
def translate(l):
return [binary_command[a] if a in binary_command else a for a in l]
def display(input_file):
"""produce a dict mapping variables to expressions"""
commands = []
with open(input_file, 'rt') as f_input:
csv_reader = csv.reader(f_input, delimiter=' ')
for line in csv_reader:
commands.append((line[-1], ' '.join(list(translate(line[:-2])))))
return dict(commands)
import re
def extract_variables(expr):
varbls = []
regex_pattern = '\s|\\)|\\('
l = re.split(regex_pattern, expr)
for a in l:
if (a not in operators) and (not a.isnumeric()) and (a != ''):
varbls.append(a)
return set(varbls)
def create_instance(wire):
exec_python = commands[wire]
pending = extract_variables(commands[wire])
count = 0
while pending and (count < 200):
s = pending.pop()
expr = commands[s]
exec_python = re.sub('({0})'.format(s), '( {0} )'.format(expr), exec_python)
pending = pending.union(extract_variables(exec_python))
count += 1
return wire + ' = ' + exec_python
def evaluate(var):
instance = create_instance(var)
exec(instance)
return np.uint16(locals()[var])
"""
Explanation: Day 7
Day 7.1
Approach 1: Create a single expression by recursive substitution, then evaluate!
End of explanation
"""
commands = display('inputs/input7.test.txt')
def test():
assert(evaluate('d') == 72)
assert(evaluate('e') == 507)
assert(evaluate('f') == 492)
assert(evaluate('g') == 114)
assert(evaluate('h') == 65412)
assert(evaluate('i') == 65079)
assert(evaluate('x') == 123)
assert(evaluate('y') == 456)
test()
"""
Explanation: Test
End of explanation
"""
import numpy as np
def RSHIFT(a, b):
result = np.uint16(a) >> int(b)
return int(result)
def LSHIFT(a, b):
result = np.uint16(a) << int(b)
return int(result)
def OR(a, b):
result = np.uint16(a) | np.uint16(b)
return int(result)
def AND(a, b):
result = np.uint16(a) & np.uint16(b)
return int(result)
def NOT(a):
result = ~ np.uint16(a)
return int(result)
import csv
def display(input_file):
"""produce a dict mapping variables to expressions"""
commands = []
with open(input_file, 'rt') as f_input:
csv_reader = csv.reader(f_input, delimiter=' ')
for line in csv_reader:
commands.append((line[-1], line[:-2]))
return dict(commands)
def evaluate(wire):
known = {}
while wire not in known:
if wire in known:
break
for k, v in commands.items():
if (len(v) == 1) and (v[0].isnumeric()) and (k not in known):
known[k] = int(v[0])
elif (len(v) == 1) and (v[0] in known) and (k not in known):
known[k] = known[v[0]]
elif ('AND' in v) and (v[0] in known) and (v[2] in known):
known[k] = AND(known[v[0]], known[v[2]])
elif ('AND' in v) and (v[0].isnumeric()) and (v[2] in known):
known[k] = AND(int(v[0]), known[v[2]])
elif ('AND' in v) and (v[0] in known) and (v[2].isnumeric()):
known[k] = AND(known[v[0]], int(v[2]))
elif ('OR' in v) and (v[0] in known) and (v[2] in known):
known[k] = OR(known[v[0]], known[v[2]])
elif ('OR' in v) and (v[0].isnumeric()) and (v[2] in known):
known[k] = OR(int(v[0]), known[v[2]])
elif ('OR' in v) and (v[0] in known) and (v[2].isnumeric()):
known[k] = OR(known[v[0]], int(v[2]))
elif ('LSHIFT' in v) and (v[0] in known):
known[k] = LSHIFT(known[v[0]], v[2])
elif ('RSHIFT' in v) and (v[0] in known):
known[k] = RSHIFT(known[v[0]], v[2])
elif ('NOT' in v) and (v[1] in known):
known[k] = NOT(known[v[1]])
return known[wire]
"""
Explanation: This approach seems correct, but it creates huge expressions along the way that become harder and harder to parse. Thus the time to a final expression that wraps up all the computations is very long. Two ideas to carry on: i) concurrent evaluation of expressions; ii) define lazy variables/functions that collect all the dependencies of the circuit and start firing upon request.
Approach 2: Concurrent evaluation from known variables.
The solution provided hereto owes credit to this source: https://www.reddit.com/r/adventofcode/comments/5id6w0/2015_day_7_part_1_python_wrong_answer/
End of explanation
"""
commands = display('inputs/input7.test1.txt')
commands
evaluate('a')
"""
Explanation: Test 0
End of explanation
"""
commands = display('inputs/input7.test2.txt')
commands
test()
"""
Explanation: Test 1
End of explanation
"""
commands = display('inputs/input7.txt')
evaluate('a')
"""
Explanation: Solution
End of explanation
"""
import csv
import numpy as np
def display(input_file):
"""produce a dict mapping variables to expressions"""
commands = []
with open(input_file, 'rt') as f_input:
csv_reader = csv.reader(f_input, delimiter=' ')
for line in csv_reader:
commands.append((line[-1], line[:-2]))
return dict(commands)
class LazyVar(object):
def __init__(self, func):
self.func = func
self.value = None
def __call__(self):
if self.value is None:
self.value = self.func()
return self.value
binary_command = {'NOT': '~', 'AND': '&', 'OR': '|', 'LSHIFT': '<<', 'RSHIFT': '>>'}
def translate(l):
translated = []
for a in l:
if a in binary_command:
b = binary_command[a]
elif a.isnumeric():
b = 'np.uint16({})'.format(a)
else:
b = '{}.func()'.format('var_' + a)
translated.append(b)
return translated
"""
Explanation: Approach 3: With Lazy Variable Wrapper (Python)
End of explanation
"""
commands = display('inputs/input7.test2.txt')
commands = display('inputs/input7.test2.txt')
for k, v in commands.items():
command_str = '{0} = LazyVar(lambda: {1})'.format('var_' + k, ''.join(translate(v)))
print(command_str)
exec(command_str)
def test():
assert(var_d.func() == 72)
assert(var_e.func() == 507)
assert(var_f.func() == 492)
assert(var_g.func() == 114)
assert(var_h.func() == 65412)
assert(var_i.func() == 65079)
assert(var_x.func() == 123)
assert(var_y.func() == 456)
test()
"""
Explanation: Test
End of explanation
"""
def rscript_command(var, l):
vocab = {'AND' : 'bitwAnd',
'OR' : 'bitwOr',
'LSHIFT' : 'bitwShiftL',
'RSHIFT' : 'bitwShiftR'}
if len(l) == 3:
func = vocab[l[1]]
arg1 = l[0] if l[0].isdigit() else 'var_' + l[0] + '()'
arg2 = l[2] if l[2].isdigit() else 'var_' + l[2] + '()'
return 'var_{0} <- function(a={1}, b={2})'.format(var, arg1, arg2) + ' {' + '{0}(a,b)'.format(func) + '}'
elif len(l) == 2:
func = 'bitwNot'
arg1 = l[1] if l[1].isdigit() else 'var_' + l[1] + '()'
return 'var_{0} <- function(a={1})'.format(var, arg1) + ' {' + '{0}(a)'.format(func) + '}'
else:
arg1 = l[0] if l[0].isdigit() else 'var_' + l[0] + '()'
return 'var_{0} <- function(a={1})'.format(var, arg1) + ' {' + 'a' + '}'
def generate_rscript(commands, target):
with open('day7_commands.R', 'wt') as f:
for k, v in commands.items():
f.write(rscript_command(k, v)+'\n')
f.write('var_' + target + '()')
"""
Explanation: Although the approach passes the test, it does not end in reasonable time for the full input.
Approach 4: With Lazy Evaluation in R
The approach now is to exploit the lazy evaluation capabilities in R. So we leverage Python to create an R script that does the job.
End of explanation
"""
commands = display('inputs/input7.test2.txt')
generate_rscript(commands, 'd')
! cat day7_commands.R
!Rscript day7_commands.R
"""
Explanation: Test
End of explanation
"""
commands = display('inputs/input7.txt')
generate_rscript(commands, 'a')
! cat day7_commands.R
!Rscript day7_commands.R
"""
Explanation: Solution
End of explanation
"""
commands = display('inputs/input7.txt')
commands['b'] = ['16076']
evaluate('a')
"""
Explanation: Although this approach is more natural than defining a LazyWrapper in Python, it takes quite a lot of time to execute, so this is not a very cool solution after all.
Day 7.2
End of explanation
"""
|
patrick-kidger/diffrax
|
examples/stiff_ode.ipynb
|
apache-2.0
|
import time
import diffrax
import equinox as eqx # https://github.com/patrick-kidger/equinox
import jax
import jax.numpy as jnp
"""
Explanation: Stiff ODE
This example demonstrates the use of implicit integrators to handle stiff dynamical systems. In this case we consider the Robertson problem.
This example is available as a Jupyter notebook here.
End of explanation
"""
jax.config.update("jax_enable_x64", True)
class Robertson(eqx.Module):
k1: float
k2: float
k3: float
def __call__(self, t, y, args):
f0 = -self.k1 * y[0] + self.k3 * y[1] * y[2]
f1 = self.k1 * y[0] - self.k2 * y[1] ** 2 - self.k3 * y[1] * y[2]
f2 = self.k2 * y[1] ** 2
return jnp.stack([f0, f1, f2])
"""
Explanation: Using 64-bit precision is important when solving problems with tolerances of 1e-8 (or smaller).
End of explanation
"""
@jax.jit
def main(k1, k2, k3):
robertson = Robertson(k1, k2, k3)
terms = diffrax.ODETerm(robertson)
t0 = 0.0
t1 = 100.0
y0 = jnp.array([1.0, 0.0, 0.0])
dt0 = 0.0002
solver = diffrax.Kvaerno5()
saveat = diffrax.SaveAt(ts=jnp.array([0.0, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2]))
stepsize_controller = diffrax.PIDController(rtol=1e-8, atol=1e-8)
sol = diffrax.diffeqsolve(
terms,
solver,
t0,
t1,
dt0,
y0,
saveat=saveat,
stepsize_controller=stepsize_controller,
)
return sol
"""
Explanation: One should almost always use adaptive step sizes when using implicit integrators. This is so that the step size can be reduced if the nonlinear solve (inside the implicit solve) doesn't converge.
Note that the solver takes a nonlinear_solver argument, e.g. Kvaerno5(nonlinear_solver=NewtonNonlinearSolver()). If you want to optimise performance then you can try adjusting the error tolerances, kappa value, and maximum number of steps for the nonlinear solver.
End of explanation
"""
main(0.04, 3e7, 1e4)
start = time.time()
sol = main(0.04, 3e7, 1e4)
end = time.time()
print("Results:")
for ti, yi in zip(sol.ts, sol.ys):
print(f"t={ti.item()}, y={yi.tolist()}")
print(f"Took {sol.stats['num_steps']} steps in {end - start} seconds.")
"""
Explanation: Do one iteration to JIT compile everything. Then time the second iteration.
End of explanation
"""
|
miaecle/deepchem
|
examples/tutorials/04_Introduction_to_Graph_Convolutions.ipynb
|
mit
|
%tensorflow_version 1.x
!curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import deepchem_installer
%time deepchem_installer.install(version='2.3.0')
"""
Explanation: Tutorial Part 4: Introduction to Graph Convolutions
In the previous sections of the tutorial, we learned about Dataset and Model objects. We learned how to load some data into DeepChem from files on disk and also learned some basic facts about molecular data handling. We then dove into some basic deep learning architectures. However, until now, we stuck with vanilla deep learning architectures and didn't really consider how to handle deep architectures specifically engineered to work with life science data.
In this tutorial, we'll change that by going a little deeper and learn about "graph convolutions." These are one of the most powerful deep learning tools for working with molecular data. The reason for this is that molecules can be naturally viewed as graphs.
Note how standard chemical diagrams of the sort we're used to from high school lend themselves naturally to visualizing molecules as graphs. In the remainder of this tutorial, we'll dig into this relationship in significantly more detail. This will let us get an in-the guts understanding of how these systems work.
Colab
This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
Setup
To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment.
End of explanation
"""
import deepchem as dc
from deepchem.models.graph_models import GraphConvModel
"""
Explanation: Ok now that we have our environment installed, we can actually import the core GraphConvModel that we'll use through this tutorial.
End of explanation
"""
# Load Tox21 dataset
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21(featurizer='GraphConv', reload=False)
train_dataset, valid_dataset, test_dataset = tox21_datasets
"""
Explanation: Now, let's use the MoleculeNet suite to load the Tox21 dataset. We need to make sure to process the data in a way that graph convolutional networks can use For that, we make sure to set the featurizer option to 'GraphConv'. The MoleculeNet call will return a training set, a validation set, and a test set for us to use. The call also returns transformers, a list of data transformations that were applied to preprocess the dataset. (Most deep networks are quite finicky and require a set of data transformations to ensure that training proceeds stably.)
End of explanation
"""
n_tasks = len(tox21_tasks)
model = GraphConvModel(n_tasks, batch_size=50, mode='classification')
num_epochs = 10
losses = []
for i in range(num_epochs):
loss = model.fit(train_dataset, nb_epoch=1)
print("Epoch %d loss: %f" % (i, loss))
losses.append(loss)
"""
Explanation: Let's now train a graph convolutional network on this dataset. DeepChem has the class GraphConvModel that wraps a standard graph convolutional architecture underneath the hood for user convenience. Let's instantiate an object of this class and train it on our dataset.
End of explanation
"""
import matplotlib.pyplot as plot
plot.ylabel("Loss")
plot.xlabel("Epoch")
x = range(num_epochs)
y = losses
plot.scatter(x, y)
plot.show()
"""
Explanation: Let's plot these losses so we can take a look at how the loss changes over the process of training.
End of explanation
"""
import numpy as np
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
print("Training ROC-AUC Score: %f" % train_scores["mean-roc_auc_score"])
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Validation ROC-AUC Score: %f" % valid_scores["mean-roc_auc_score"])
"""
Explanation: We see that the losses fall nicely and give us stable learning.
Let's try to evaluate the performance of the model we've trained. For this, we need to define a metric, a measure of model performance. dc.metrics holds a collection of metrics already. For this dataset, it is standard to use the ROC-AUC score, the area under the receiver operating characteristic curve (which measures the tradeoff between precision and recall). Luckily, the ROC-AUC score is already available in DeepChem.
To measure the performance of the model under this metric, we can use the convenience function model.evaluate().
End of explanation
"""
import tensorflow as tf
import tensorflow.keras.layers as layers
atom_features = layers.Input(shape=(75,))
degree_slice = layers.Input(shape=(2,), dtype=tf.int32)
membership = layers.Input(shape=tuple(), dtype=tf.int32)
deg_adjs = []
for i in range(0, 10 + 1):
deg_adj = layers.Input(shape=(i+1,), dtype=tf.int32)
deg_adjs.append(deg_adj)
"""
Explanation: What's going on under the hood? Could we build GraphConvModel ourselves? Of course! The first step is to define the inputs to our model. Conceptually, graph convolutions just require the structure of the molecule in question and a vector of features for every atom that describes the local chemical environment. However in practice, due to TensorFlow's limitations as a general programming environment, we have to have some auxiliary information as well preprocessed.
atom_features holds a feature vector of length 75 for each atom. The other inputs are required to support minibatching in TensorFlow. degree_slice is an indexing convenience that makes it easy to locate atoms from all molecules with a given degree. membership determines the membership of atoms in molecules (atom i belongs to molecule membership[i]). deg_adjs is a list that contains adjacency lists grouped by atom degree. For more details, check out the code.
To define feature inputs with Keras, we use the Input layer. Conceptually, a model is a mathematical graph composed of layer objects. Input layers have to be the root nodes of the graph since they consitute inputs.
End of explanation
"""
from deepchem.models.layers import GraphConv, GraphPool, GraphGather
batch_size = 50
gc1 = GraphConv(64, activation_fn=tf.nn.relu)([atom_features, degree_slice, membership] + deg_adjs)
batch_norm1 = layers.BatchNormalization()(gc1)
gp1 = GraphPool()([batch_norm1, degree_slice, membership] + deg_adjs)
gc2 = GraphConv(64, activation_fn=tf.nn.relu)([gp1, degree_slice, membership] + deg_adjs)
batch_norm2 = layers.BatchNormalization()(gc2)
gp2 = GraphPool()([batch_norm2, degree_slice, membership] + deg_adjs)
dense = layers.Dense(128, activation=tf.nn.relu)(gp2)
batch_norm3 = layers.BatchNormalization()(dense)
readout = GraphGather(batch_size=batch_size, activation_fn=tf.nn.tanh)([batch_norm3, degree_slice, membership] + deg_adjs)
logits = layers.Reshape((n_tasks, 2))(layers.Dense(n_tasks*2)(readout))
softmax = layers.Softmax()(logits)
"""
Explanation: Let's now implement the body of the graph convolutional network. DeepChem has a number of layers that encode various graph operations. Namely, the GraphConv, GraphPool and GraphGather layers. We will also apply standard neural network layers such as Dense and BatchNormalization.
The layers we're adding effect a "feature transformation" that will create one vector for each molecule.
End of explanation
"""
inputs = [atom_features, degree_slice, membership] + deg_adjs
outputs = [softmax]
keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
loss = dc.models.losses.CategoricalCrossEntropy()
model = dc.models.KerasModel(keras_model, loss=loss)
"""
Explanation: Let's now create the KerasModel. To do that we specify the inputs and outputs to the model. We also have to define a loss for the model which tells the network the objective to minimize during training.
End of explanation
"""
from deepchem.metrics import to_one_hot
from deepchem.feat.mol_graphs import ConvMol
def data_generator(dataset, epochs=1, predict=False, pad_batches=True):
for epoch in range(epochs):
for ind, (X_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, pad_batches=pad_batches, deterministic=True)):
multiConvMol = ConvMol.agglomerate_mols(X_b)
inputs = [multiConvMol.get_atom_features(), multiConvMol.deg_slice, np.array(multiConvMol.membership)]
for i in range(1, len(multiConvMol.get_deg_adjacency_lists())):
inputs.append(multiConvMol.get_deg_adjacency_lists()[i])
labels = [to_one_hot(y_b.flatten(), 2).reshape(-1, n_tasks, 2)]
weights = [w_b]
yield (inputs, labels, weights)
"""
Explanation: Now that we've successfully defined our graph convolutional model, we need to train it. We can call fit(), but we need to make sure that each minibatch of data populates all the Input objects that we've created. For this, we need to create a Python generator that given a batch of data generates the lists of inputs, labels, and weights whose values are Numpy arrays we'd like to use for this step of training.
End of explanation
"""
num_epochs = 10
losses = []
for i in range(num_epochs):
loss = model.fit_generator(data_generator(train_dataset, epochs=1))
print("Epoch %d loss: %f" % (i, loss))
losses.append(loss)
"""
Explanation: Now, we can train the model using KerasModel.fit_generator(generator) which will use the generator we've defined to train the model.
End of explanation
"""
plot.title("Keras Version")
plot.ylabel("Loss")
plot.xlabel("Epoch")
x = range(num_epochs)
y = losses
plot.scatter(x, y)
plot.show()
"""
Explanation: Let's now plot these losses and take a quick look.
End of explanation
"""
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def reshape_y_pred(y_true, y_pred):
"""
GraphConv always pads batches, so we need to remove the predictions
for the padding samples. Also, it outputs two values for each task
(probabilities of positive and negative), but we only want the positive
probability.
"""
n_samples = len(y_true)
return y_pred[:n_samples, :, 1]
print("Evaluating model")
train_predictions = model.predict_on_generator(data_generator(train_dataset, predict=True))
train_predictions = reshape_y_pred(train_dataset.y, train_predictions)
train_scores = metric.compute_metric(train_dataset.y, train_predictions, train_dataset.w)
print("Training ROC-AUC Score: %f" % train_scores)
valid_predictions = model.predict_on_generator(data_generator(valid_dataset, predict=True))
valid_predictions = reshape_y_pred(valid_dataset.y, valid_predictions)
valid_scores = metric.compute_metric(valid_dataset.y, valid_predictions, valid_dataset.w)
print("Valid ROC-AUC Score: %f" % valid_scores)
"""
Explanation: Now that we have trained our graph convolutional method, let's evaluate its performance. We again have to use our defined generator to evaluate model performance.
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.2/tutorials/pblum.ipynb
|
gpl-3.0
|
!pip install -I "phoebe>=2.2,<2.3"
"""
Explanation: Passband Luminosity
Setup
Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
End of explanation
"""
b.add_dataset('lc', times=phoebe.linspace(0,1,101), dataset='lc01')
"""
Explanation: And we'll add a single light curve dataset so that we can see how passband luminosities affect the resulting synthetic light curve model.
End of explanation
"""
b.set_value('irrad_method', 'none')
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'linear')
b.set_value_all('ld_coeffs', [0.])
b.set_value_all('ld_mode_bol', 'manual')
b.set_value_all('ld_func_bol', 'linear')
b.set_value_all('ld_coeffs_bol', [0.])
b.set_value_all('atm', 'blackbody')
"""
Explanation: Lastly, just to make things a bit easier and faster, we'll turn off irradiation (reflection), use blackbody atmospheres, and disable limb-darkening (so that we can play with weird temperatures without having to worry about falling of the grids).
End of explanation
"""
print(b.get_parameter(qualifier='pblum_mode', dataset='lc01'))
"""
Explanation: Relevant Parameters & Methods
NEW in PHOEBE 2.2: A pblum_mode parameter exists for each LC dataset in the bundle. This parameter defines how passband luminosities are handled. The subsections below describe the use and parameters exposed depening on the value of this parameter.
End of explanation
"""
print(b.compute_pblums())
"""
Explanation: For any of these modes, you can expose the intrinsic (excluding extrinsic effects such as spots and irradiation) and extrinsic computed luminosities of each star (in each dataset) by calling b.compute_pblums.
Note that as its an aspect-dependent effect, boosting is ignored in all of these output values.
End of explanation
"""
print(b.filter(qualifier='pblum'))
print(b.get_parameter(qualifier='pblum_component'))
b.set_value('pblum_component', 'secondary')
print(b.filter(qualifier='pblum'))
"""
Explanation: For more details, see the section below on "Accessing Model Luminosities" as well as the b.compute_pblums API docs
pblum_mode = 'component-coupled'
pblum_mode='component-coupled' is the default option and maintains the default behavior from previous releases. Here the user provides passband luminosities for a single star in the system for the given dataset/passband, and all other stars are scaled accordingly.
By default, the value of pblum is set for the primary star in the system, but we can instead provide pblum for the secondary star by changing the value of pblum_component.
End of explanation
"""
b.set_value('pblum_component', 'primary')
print(b.get_parameter(qualifier='pblum', component='primary'))
"""
Explanation: Note that in general (for the case of a spherical star), a pblum of 4pi will result in an out-of-eclipse flux of ~1.
Now let's just reset to the default case where the primary star has a provided (default) pblum of 4pi.
End of explanation
"""
print(b.compute_pblums())
"""
Explanation: NOTE: other parameters also affect flux-levels, including limb darkening, third light, boosting, irradiation, and distance
If we call b.compute_pblums, we'll see that the computed intrinsic luminosity of the primary star (pblum@primary@lc01) matches the value of the parameter above.
End of explanation
"""
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: Let's see how changing the value of pblum affects the computed light curve. By default, pblum is set to be 4 pi, giving a total flux for the primary star of ~1.
Since the secondary star in the default binary is identical to the primary star, we'd expect an out-of-eclipse flux of the binary to be ~2.
End of explanation
"""
b.set_value('pblum', component='primary', value=2*np.pi)
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: If we now set pblum to be only 2 pi, we should expect the luminosities as well as entire light curve to be scaled in half.
End of explanation
"""
b.set_value('teff', component='secondary', value=0.5 * b.get_value('teff', component='primary'))
print(b.filter(qualifier='teff'))
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: And if we halve the temperature of the secondary star - the resulting light curve changes to the new sum of fluxes, where the primary star dominates since the secondary star flux is reduced by a factor of 16, so we expect a total out-of-eclipse flux of ~0.5 + ~0.5/16 = ~0.53.
End of explanation
"""
b.set_value_all('teff', 6000)
b.set_value_all('pblum', 4*np.pi)
"""
Explanation: Let us undo our changes before we look at decoupled luminosities.
End of explanation
"""
b.set_value('pblum_mode', 'decoupled')
"""
Explanation: pblum_mode = 'decoupled'
The luminosities are decoupled when pblums are provided for the individual components. To accomplish this, set pblum_mode to 'decoupled'.
End of explanation
"""
print(b.filter(qualifier='pblum'))
"""
Explanation: Now we see that both pblum parameters are available and can have different values.
End of explanation
"""
b.set_value_all('pblum', 4*np.pi)
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: If we set these to 4pi, then we'd expect each star to contribute 1.0 in flux units, meaning the baseline of the light curve should be at approximately 2.0
End of explanation
"""
print(b.filter(qualifier='teff'))
b.set_value('teff', component='secondary', value=3000)
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: Now let's make a significant temperature-ratio by making a very cool secondary star. Since the luminosities are decoupled - this temperature change won't affect the resulting light curve very much (compare this to the case above with coupled luminosities). What is happening here is that even though the secondary star is cooler, its luminosity is being rescaled to the same value as the primary star, so the eclipse depth doesn't change (you would see a similar lack-of-effect if you changed the radii - although in that case the eclipse widths would still change due to the change in geometry).
End of explanation
"""
b.set_value_all('teff', 6000)
b.set_value_all('pblum', 4*np.pi)
"""
Explanation: In most cases you will not want decoupled luminosities as they can easily break the self-consistency of your model.
Now we'll just undo our changes before we look at accessing model luminosities.
End of explanation
"""
b.set_value('pblum_mode', 'absolute')
"""
Explanation: pblum_mode = 'absolute'
By setting pblum_mode to 'absolute', luminosities and fluxes will be returned in absolute units and not rescaled. Note that third light and distance will still affect the resulting flux levels.
End of explanation
"""
print(b.filter(qualifier='pblum'))
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: As we no longer provide pblum values to scale, those parameters are not visible when filtering.
End of explanation
"""
fluxes = b.get_value('fluxes', context='model') * 0.8 + (np.random.random(101) * 0.1)
b.set_value('fluxes', context='dataset', value=fluxes)
afig, mplfig = b.plot(context='dataset', show=True)
"""
Explanation: (note the exponent on the y-axis of the above figure)
pblum_mode = 'dataset-scaled'
Setting pblum_mode to 'dataset-sclaed' is only allowed if fluxes are attached to the dataset itself. Let's use our existing model to generate "fake" data and then populate the dataset.
End of explanation
"""
b.set_value('pblum_mode', 'dataset-scaled')
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True)
"""
Explanation: Now if we set pblum_mode to 'dataset-scaled', the resulting model will be scaled to best fit the data. Note that in this mode we cannot access computed luminosities via b.compute_pblums (which would raise an error if we attempted to do so), nor can we access scaled intensities from the mesh.
End of explanation
"""
b.set_value('pblum_mode', 'component-coupled')
b.set_value('fluxes', context='dataset', value=[])
"""
Explanation: Before moving on, let's remove our fake data (and reset pblum_mode or else PHOEBE will complain about the lack of data).
End of explanation
"""
b.add_dataset('lc', times=phoebe.linspace(0,1,101),
ld_mode='manual', ld_func='linear', ld_coeffs=[0],
passband='Johnson:B', dataset='lc02')
b.set_value('pblum_mode', dataset='lc02', value='dataset-coupled')
"""
Explanation: pblum_mode = 'dataset-coupled'
Setting pblum_mode to 'dataset-coupled' allows for the same scaling factor to be applied to two different datasets. In order to see this in action, we'll add another LC dataset in a different passband.
End of explanation
"""
print(b.filter('pblum*'))
print(b.compute_pblums())
b.run_compute()
afig, mplfig = b.plot(show=True, legend=True)
"""
Explanation: Here we see the pblum_mode@lc01 is set to 'component-coupled' meaning it will follow the rules described earlier where pblum is provided for the primary component and the secondary is coupled to that. pblum_mode@lc02 is set to 'dataset-coupled' with pblum_dataset@lc01 pointing to 'lc01'.
End of explanation
"""
print(b.compute_pblums())
"""
Explanation: Accessing Model Luminosities
Passband luminosities at t0@system per-star (including following all coupling logic) can be computed and exposed on the fly by calling compute_pblums.
End of explanation
"""
print(b.compute_pblums(dataset='lc01', component='primary'))
"""
Explanation: By default this exposes 'pblum' and 'pblum_ext' for all component-dataset pairs in the form of a dictionary. Alternatively, you can pass a label or list of labels to component and/or dataset.
End of explanation
"""
b.add_dataset('mesh', times=np.linspace(0,1,5), dataset='mesh01', columns=['areas', 'pblum_ext@lc01', 'ldint@lc01', 'ptfarea@lc01', 'abs_normal_intensities@lc01', 'normal_intensities@lc01'])
b.run_compute()
"""
Explanation: For more options, see the b.compute_pblums API docs.
Note that this same logic is applied (at t0) to initialize all passband luminosities within the backend, so there is no need to call compute_pblums before run_compute.
In order to access passband luminosities at times other than t0, you can add a mesh dataset and request the pblum_ext column to be exposed. For stars that have pblum defined (as opposed to coupled to another star or dataset), this value should be equivalent to the value of the parameter (at t0 if no features or irradiation are present, and in simple circular cases will probably be equivalent at all times).
Let's create a mesh dataset at a few times and then access the synthetic luminosities.
End of explanation
"""
print(b.filter(qualifier='pblum_ext', context='model').twigs)
"""
Explanation: Since the luminosities are passband-dependent, they are stored with the same dataset as the light curve (or RV), but with the mesh method, and are available at each of the times at which a mesh was stored.
End of explanation
"""
t0 = b.get_value('t0@system')
print(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))
print(b.get_value('pblum@primary@dataset'))
print(b.compute_pblums(component='primary', dataset='lc01'))
"""
Explanation: Now let's compare the value of the synthetic luminosities to those of the input pblum
End of explanation
"""
print(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))
print(b.get_value(qualifier='pblum_ext', time=t0, component='secondary', kind='mesh', context='model'))
"""
Explanation: In this case, since our two stars are identical, the synthetic luminosity of the secondary star should be the same as the primary (and the same as pblum@primary).
End of explanation
"""
b['teff@secondary@component'] = 3000
print(b.compute_pblums(dataset='lc01'))
b.run_compute()
print(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))
print(b.get_value(qualifier='pblum_ext', time=t0, component='secondary', kind='mesh', context='model'))
"""
Explanation: However, if we change the temperature of the secondary star again, since the pblums are coupled, we'd expect the synthetic luminosity of the primary to remain fixed but the secondary to decrease.
End of explanation
"""
print(b['ld_mode'])
print(b['atm'])
b.run_compute(irrad_method='horvat')
print(b.get_value(qualifier='pblum_ext', time=t0, component='primary', kind='mesh', context='model'))
print(b.get_value('pblum@primary@dataset'))
print(b.compute_pblums(dataset='lc01', irrad_method='horvat'))
"""
Explanation: And lastly, if we re-enable irradiation, we'll see that the extrinsic luminosities do not match the prescribed value of pblum (an intrinsic luminosity).
End of explanation
"""
b.set_value_all('teff@component', 6000)
"""
Explanation: Now, we'll just undo our changes before continuing
End of explanation
"""
b.run_compute()
areas = b.get_value(qualifier='areas', dataset='mesh01', time=t0, component='primary', unit='m^2')
ldint = b.get_value(qualifier='ldint', component='primary', time=t0)
ptfarea = b.get_value(qualifier='ptfarea', component='primary', time=t0)
abs_normal_intensities = b.get_value(qualifier='abs_normal_intensities', dataset='lc01', time=t0, component='primary')
normal_intensities = b.get_value(qualifier='normal_intensities', dataset='lc01', time=t0, component='primary')
"""
Explanation: Role of Pblum
Let's now look at the intensities in the mesh to see how they're being scaled under-the-hood. First we'll recompute our model with the equal temperatures and irradiation disabled (to ignore the difference between pblum and pblum_ext).
End of explanation
"""
print(np.median(abs_normal_intensities))
"""
Explanation: 'abs_normal_intensities' are the intensities per triangle in absolute units, i.e. W/m^3.
End of explanation
"""
print(np.median(normal_intensities))
"""
Explanation: The values of 'normal_intensities', however, are significantly samller (in this case). These are the intensities in relative units which will eventually be integrated to give us flux for a light curve.
End of explanation
"""
pblum = b.get_value(qualifier='pblum', component='primary', context='dataset')
print(np.sum(normal_intensities * ldint * np.pi * areas) * ptfarea, pblum)
"""
Explanation: 'normal_intensities' are scaled from 'abs_normal_intensities' so that the computed luminosity matches the prescribed luminosity (pblum).
Here we compute the luminosity by summing over each triangle's intensity in the normal direction, and multiply it by pi to account for blackbody intensity emitted in all directions in the solid angle, and by the area of that triangle.
End of explanation
"""
|
AtmaMani/pyChakras
|
python_crash_course/seaborn_cheat_sheet_1.ipynb
|
mit
|
import seaborn as sns
%matplotlib inline
"""
Explanation: Seaborn crash course
<img src='https://seaborn.pydata.org/_images/hexbin_marginals.png' height="150" width="150">
Seaborn is an amazing data and statistical visualization library that is built using matplotlib. It has good defaults and very easy to use.
ToC
- load sample dataset
- Distribution plots
- Plotting dist of 2 variables
- annotating with correlation coefficient if unavailable
- Plotting dist of all variables
- Plotting data frequency
End of explanation
"""
tips = sns.load_dataset('tips')
tips.head(5)
"""
Explanation: Load sample dataset
Seaborn comes with a number of example dataset. Let us load the restaurant tipping dataset
End of explanation
"""
#find dist of total bills
sns.distplot(tips['total_bill'])
"""
Explanation: Distribution plots
One of the first things we do is to find the data dist.
End of explanation
"""
tips.total_bill.mean()
tips_mean = tips.total_bill.mean()
tips_sd = tips.total_bill.std()
ax = sns.distplot(tips['total_bill'])
# plot mean in black
ax.axvline(x=tips_mean, color='black', linestyle='dashed')
# plot mean +- 1SD in red, dotted
ax.axvline(x=tips_mean + tips_sd, color='red', linestyle='dotted')
ax.axvline(x=tips_mean - tips_sd, color='red', linestyle='dotted')
# title
ax.set_title('$\mu = {}$ | $\sigma = {}$'.format(round(tips_mean, 2), round(tips_sd, 2)))
"""
Explanation: It is often useful to overlay the mean and SD with the histograms, below is one way to do it.
End of explanation
"""
sns.distplot(tips['total_bill'], kde=False, bins=35)
"""
Explanation: You can change things like bin, kde flags to customize the plot
End of explanation
"""
sns.jointplot(x=tips['total_bill'], y=tips['tip'])
"""
Explanation: Plotting dist of 2 variables
Seaborn can very easily attach a histogram to a scatter plot to show the data distribution
End of explanation
"""
jgrid = sns.jointplot(x='min_season', y='max_wind_merged', data=hurricanes_ipl,
kind='reg', joint_kws={'line_kws':{'color':'green'}}, height=7, space=0.5)
j = jgrid.annotate(stats.pearsonr)
j = jgrid.ax_joint.set_title('Does hurricane wind speed increase over time?')
sns.jointplot(x=tips['total_bill'], y=tips['tip'], kind='hex')
sns.jointplot(x=tips['total_bill'], y=tips['tip'], kind='reg') #regression
"""
Explanation: You can use the kind argument to change the scatter to hex, reg etc
Annotating correlation coefficient and p value if unavailable
<blockquote>
<b>Note:</b> In recent versions, seaborn does not print the correlation coefficient and its p-value. To get this, use annotation as shown below:
</blockquote>
End of explanation
"""
sns.pairplot(tips, hue='sex')
"""
Explanation: Plotting dist of all variables
You can get a quick overview of the pariwise relationships between your columns using pairplot. Specifying a categorical variable to hue argument will shade it accordingly
End of explanation
"""
sns.rugplot(tips['total_bill'])
"""
Explanation: Plotting data frequency
Histograms provide data frequency. The distplot gives histograms. Another way to viz this is using rugplot. Rug plots are similar to the trading frequency bars we see in stock ticker time series datasets.
End of explanation
"""
|
pedrosiracusa/pedrosiracusa.github.io
|
_notebooks/construindo-redes-sociais-com-dados-de-colecoes-biologicas.ipynb
|
mit
|
# este pedaço de código só é necessário para atualizar o PATH do Python
import sys,os
sys.path.insert(0,os.path.expanduser('~/Documents/caryocar'))
from caryocar.models import CWN, SCN
"""
Explanation: Construindo redes sociais com dados de coleções biológicas
Em um artigo anterior fiz uma breve caracterização das redes sociais por trás do Herbário da UnB, mostrando uma nova perspectiva de aplicação para dados de coleções biológicas. Tal abordagem consiste em derivar interações sociais de colaboração entre coletores e caracterizar seus interesses taxonômicos a partir de registros de ocorrências de espécies, e incorpora conceitos e ferramentas vindos do campo de analítica de redes sociais.
Tive a oportunidade de desenvolver estas ideias durante minha pesquisa de mestrado, que resultou na síntese de dois modelos baseados em redes: as Redes Espécie-Coletor (SCN); e as Redes de Colaboração de Coletores (CWNs). Caso você ainda não tenha ouvido falar nestes modelos, recomendo a leitura do meu artigo de divulgação antes de continuar.
Neste artigo demonstrarei o processo de construção destes modelos em 3 etapas, a partir de um conjunto de dados de ocorrência de espécies e usando a biblioteca Caryocar (escrita na linguagem Python). Aqui usarei novamente o conjunto de dados do Herbário da UnB (sigla UB), que podem ser baixados através da plataforma GBIF.
Vamos começar importando as classes que implementam os modelos SCN e CWN:
End of explanation
"""
from caryocar.cleaning import NamesAtomizer, namesFromString
from caryocar.cleaning import normalize, read_NamesMap_fromJson
from caryocar.cleaning import getNamesIndexes
"""
Explanation: O pacote Caryocar também fornece algumas funções e classes auxiliares para realizar a limpeza dos dados.
End of explanation
"""
import pandas as pd
"""
Explanation: Etapa 1. Leitura do conjunto de dados
O primeiro passo é ler o conjunto de dados de ocorrência de espécies.
Para isso vamos extender as funcionalidades da linguagem Python usando uma biblioteca muito útil para a análise de dados: a Pandas.
Com esta biblioteca, podemos carregar, transformar e analisar nosso conjunto de dados no ambiente de programação.
End of explanation
"""
dsetPath = '/home/pedro/datasets/ub_herbarium/occurrence.csv'
cols = ['recordedBy','species']
occs_df = pd.read_csv(dsetPath, sep='\t', usecols=cols)
"""
Explanation: Com a função read_csv do Pandas, carregaremos nossos dados que estão no arquivo CSV e os colocaremos na estrutura de um Data Frame, que é basicamente uma tabela.
Esta função espera receber o nome do arquivo CSV que contém os dados, bem como uma lista com os nomes das colunas que estamos interessados em carregar.
Especificarei o caminho para o arquivo na variável dsetPath e a lista das colunas de interesse em cols.
O dataframe ficará armazenado na variável occs_df.
Para deixar este artigo o mais simples possível usarei apenas os campos essenciais:
* recordedBy: Armazena os nomes dos coletores responsáveis pelo registro. Caso haja mais que 1 coletor, os nomes são separados por ponto-e-vírgula;
* species: Armazena o nome científico, a nível de espécie, determinado para o espécime em questão.
End of explanation
"""
occs_df.head(10)
"""
Explanation: Vamos dar uma olhada no jeitão do dataframe. Para isso, vamos pedir as 10 primeira linhas apenas.
End of explanation
"""
occs_df.isnull().sum()
"""
Explanation: Etapa 2: Limpeza dos dados
Antes de construir o modelo, precisamos fazer uma limpeza de dados para garantir que eles estejam no formato adequado para a construção dos modelos.
O primeiro passo é filtrar os registros com elementos nulos (NaN) para cada um dos campos do dataframe. Um elemento nulo significa ausência de informação, e portanto não ajudará muito na construção dos nossos modelos.
Vejamos o número de nulos em cada campo:
End of explanation
"""
occs_df.dropna(how='any', inplace=True)
"""
Explanation: A informação de coletor está ausente em apenas 9 dos registros. Vamos simplesmente eliminá-los. Um outro ponto é que para simplificar nossa modelagem, vou apenas usar registros que tenham sido identificados ao nível de espécie. Isso significa que teremos que descartar 32711 registros, nos quais a informação sobre a identidade de espécie está ausente.
End of explanation
"""
occs_df.isnull().sum()
"""
Explanation: Agora não temos mais nulos em nenhuma das colunas, e podemos prosseguir:
End of explanation
"""
na = NamesAtomizer(atomizeOp=namesFromString)
"""
Explanation: Atomização dos nomes de coletores
O campo de coletores (recordedBy) é fundamental para nossa modelagem, mas infelizmente costuma ser um pouco problemático.
O primeiro problema é que os nomes dos coletores não são atômicos. Isso significa múltiplos nomes podem ser codificados em um mesmo valor (no caso, a lista de nomes é codificada como uma única string, sendo cada nome separado por um ponto-e-vígula).
Segundo as recomendações do Biodiversity Information Standards (TDWG), nomes de coletores devem ser incluídos, em geral, usando a seguinte regra: sobrenome com a primeira letra maiúscula, seguido por vírgula e espaço e iniciais do nome em letras maiúsculas, separadas por pontos (ex. Proença, C.E.B.).
Além disso, o TDWG recomenda que o separador utilizado para delimitar nomes de coletore deva ser o caractere pipe ( | ).
No entanto, o caractere usado no dataset do UB é o ponto-e-vírgula.
Isso não será um grande problema no nosso caso, já que neste dataset o ponto-e-vírgula é usado de forma consistente, em quase todos os registros.
Para proceder com a atomização dos nomes utilizaremos uma classe auxiliar, chamada NamesAtomizer. Criaremos o objeto atomizador e atribuiremos à variável na. Passaremos a função namesFromString que especifica as regras usadas para separar os nomes.
End of explanation
"""
names_replaces_file = '/home/pedro/data/ub_collectors_replaces.json'
"""
Explanation: O atomizador de nomes resolve a grande maioria dos casos. Mas existem alguns poucos registros com erros na delimitação dos nomes. Neste caso a correção deve ser feita fazendo a substituição em cada registro pela sua forma correta.
Para o dataset do UB, estas substituições estão especificadas no arquivo armazenado na variável names_replaces_file, abaixo:
End of explanation
"""
! cat {names_replaces_file}
"""
Explanation: Só por curiosidade, vejamos o conteúdo deste arquivo:
End of explanation
"""
na.read_replaces(names_replaces_file)
"""
Explanation: Prosseguindo com a substituição:
End of explanation
"""
occs_df['recordedBy_atomized'] = na.atomize(occs_df['recordedBy'])
"""
Explanation: Agora, com o auxílio do atomizador de nomes, vamos adicionar uma nova coluna ao dataframe, contendo os nomes dos coletores atomizados. Ela se chamará recordedBy_atomized:
End of explanation
"""
namesMap_file = '/home/pedro/data/ub_namesmap.json'
"""
Explanation: Normalização e mapeamento de nomes
Um segundo problema é que nomes de coletores podem ter sido escritos de algumas formas diferentes, seja por conta de erros ou omissão de partes do nome.
Por exemplo, o nome 'Proença, C.E.B.' pode ter alguns variantes, incluindo 'Proenca, C.E.B,', 'Proença, C.E.', Proença, C.'.
Precisamos pensar em uma forma para ligar todas essas variantes a um nome principal.
A solução para este problema até o momento é armazenar um mapa ligando cada variante a uma forma normal do nome. O processo de normalização inclui a transformação do nome para uma forma simplificada. Isso significa que só usaremos caracteres em caixa-baixo, omitiremos acentos e pontuações, e removeremos caracteres não-alfanuméricos.
No exemplo citado acima, todos os nomes seriam mapeados para 'proenca,ceb'.
Para o conjunto de dados do UB, já tenho um mapa de nomes pronto, guardado no seguinte arquivo:
End of explanation
"""
! head {namesMap_file} -n 20
"""
Explanation: Este arquivo é grande, mas vamos ver as 20 primeiras linhas para termos uma ideia:
End of explanation
"""
nm = read_NamesMap_fromJson(namesMap_file, normalizationFunc=normalize)
"""
Explanation: Note que alguns nomes de coletores que não eram nulos porêm remetem à falta da informação (por exemplo '.', '?') são mapeados para uma string vazia. Mais tarde iremos filtrar estes nomes.
Vamos agora ler o mapa de nomes do arquivo e armazená-lo na variável nm.
End of explanation
"""
collectors_names = list(set( n for n,st,num in na.getCachedNames() ))
nm.addNames(collectors_names)
"""
Explanation: Caso haja nomes de coletores que não estão no arquivo, vamos nos assegurar de que eles serão inseridos:
End of explanation
"""
nm.getMap()['Proença, CEB']
nm.getMap()['Proença, C']
"""
Explanation: Assim, este mapa nos permite buscar, para cada variante do nome, sua forma normal:
End of explanation
"""
ni = getNamesIndexes(occs_df,'recordedBy_atomized', namesMap=nm.getMap())
"""
Explanation: A figura abaixo ilustra as etapas envolvidas no preprocessamento do campo dos coletores, conforme descrito.
{:width="700px"}
O índice de nomes
Finalmente, vamos construir um índice de nomes, apenas para mantermos a referência de quais linhas do dataframe cada coletor aparece. Para isso usaremos a função getNamesIndexes. Precisamos informar o nome do dataframe, o nome da coluna que armazena os nomes atomizados e o mapa de nomes. Mas enfatizo que este passo não é necessário para a construção dos modelos (apesar de ser útil para algumas análises).
End of explanation
"""
scn = SCN(species=occs_df['species'], collectors=occs_df['recordedBy_atomized'], namesMap=nm)
"""
Explanation: Etapa 3: Construindo os modelos
Chegamos na etapa que realmente interessa. Já temos um dataframe com os dados minimamente limpos e estruturados, e podemos então construir os modelos!
Rede Espécie-Coletor (SCN)
Redes espécie-coletor modelam relações de interesse, envolvendo necessariamente um coletor e uma espécie. A semântica destas relações pode ser descrita como coletor -[registra]-> espécie ou, por outro lado, espécie-[é registrada por]-> coletor. A figura abaixo exemplifica esta estrutura (a).
Como o modelo envolve duas classes de entidades (coletores e espécies), existem duas perspectivas adicionais que podem ser exploradas: Podemos investigar o quão fortemente dois coletores estão associados entre si em termos de seus interesses em comum (b); bem como quão fortemente duas espécies estão associadas entre si em termos do conjunto de coletores que as registram (c).
Nos referimos às perspectivas (b) e (c) como projeções da rede (a). Estas projeções são obtidas simplesmente ligando entidades da mesma classe tomando como base o número de entidades da classe oposta que eles compartilham, na estrutura (a).
{:width="500px"}
Vamos então ao código. Construiremos a rede espécie-coletor usando a classe SCN, disponível no pacote Caryocar. Para sua construção, devemos fornecer:
* Uma lista de espécies, neste caso a coluna do dataframe occs_df['species'];
* Uma lista contendo listas de coletores, neste caso a coluna do dataframe occs_df['recordedBy_atomized'];
* Um mapa de nomes.
End of explanation
"""
cols_to_filter = ['','ignorado','ilegivel','incognito','etal']
scn.remove_nodes_from(cols_to_filter)
"""
Explanation: Após a construção do modelo, vamos remover nomes de coletores indevidos, como 'etal', 'ilegivel', 'incognito'.
End of explanation
"""
n_cols = len(scn.listCollectorsNodes())
cols_degrees = scn.degree(scn.listCollectorsNodes())
n_spp = len(scn.listSpeciesNodes())
spp_degrees = scn.degree(scn.listSpeciesNodes())
print(
f"""Rede Espécie-Coletor (SCN)
==========================
Número total de coletores:{n_cols}
Número total de espécies: {n_spp}
Em média, um coletor registra {round( sum( k for n,k in cols_degrees)/n_cols)} espécies distintas
Em média, uma espécie é registrada por {round( sum( k for n,k in spp_degrees)/n_spp)} coletores distintos
Número total de arestas: {len(scn.edges)}\n""")
print("Top-10 coletores mais produtivos:")
for n,k in sorted(cols_degrees,key=lambda x:x[1],reverse=True)[:10]:
print(f" {n} ({k} especies distintas)")
print("\nTop-10 espécies coletadas:")
for n,k in sorted(spp_degrees,key=lambda x:x[1],reverse=True)[:10]:
print(f" {n} ({k} coletores distintos)")
"""
Explanation: Vejamos então um pequeno resumo sobre esta rede. Este pedaço de código pode ser um pouco feio, mas o que importa mesmo aqui são as informações imprimidas abaixo dele.
End of explanation
"""
cwn = CWN(cliques=occs_df['recordedBy_atomized'],namesMap=nm)
"""
Explanation: Um aspecto interessante a ser notado é a distribuição de grau (número de conexões de um vértice) nesta rede.
Embora em média um coletor registre 21 espécies diferentes, os coletores mais produtivos registraram mais de 1000!
De forma simlar, embora em média uma espécie seja registrada por 9 coletores distintos, as primeiras 10 foram registradas por mais de 200 coletores cada.
Embora esteja fora do escopo deste artigo, é fácil mostrar que a distribuição desta rede está longe de ser normal. Na verdade, é aproximada por uma lei de potência.
Isso significa que enquanto uma grande maioria de coletores registra pouquíssimas espécies diferentes, alguns poucos (chamados hubs, ou coletores-chave) registram um número muito acima da média.
De forma análoga enquanto uma grande maioria de espécies foi coletadas por apenas um ou poucos coletores diferentes, algumas poucas foram coletadas por um grande número de coletores distintos.
Rede de Colaboração de Coletores (CWN)
Redes de colaboração de coletores (CWNs), como o nome sugere, modelam relações de colaboração que se estabelecem entre coletores enquanto registram espécies em campo. Uma ligação entre pares de coletores é criada ou fortalecida cada vez que eles co-autoram um registro de espécie. Sendo assim, a semântica destas relações é descrita como coletor -[coleta espécime com]-> coletor. A figura abaixo ilustra a estrutura destas redes. É importante notar que, diferente das SCNs, nas CWNs a identidade taxonômica de cada registro não é representada em sua estrutura. Coletores que nunca colaboraram aparecem como vértices isolados na rede.
{:width="300px"}
O pacote Caryocar também fornece a classe SCN, que facilita a construção de redes de colaboração de coletores. Para sua construção, devemos fornecer:
Uma lista contendo listas de coletores (cliques), neste caso a coluna do dataframe occs_df['recordedBy_atomized'];
Um mapa de nomes.
End of explanation
"""
cols_to_filter = ['','ignorado','ilegivel','incognito','etal']
cwn.remove_nodes_from(cols_to_filter)
"""
Explanation: Assim como fizemos com a SCN, vamos remover nomes de coletores indevidos
End of explanation
"""
n_cols = len(cwn.nodes)
cols_degrees = cwn.degree()
print(
f"""Rede de Colaboração de Coletores (CWN)
======================================
Número total de coletores:{n_cols}
Número total de arestas: {len(cwn.edges)}
Em média, um coletor colabora com {round( sum(k for n,k in cols_degrees)/n_cols )} pares ao longo de sua carreira
No total {len([ n for n,k in cols_degrees if k==0 ])} coletores nunca colaboraram
No total, {len([ n for n,k in cols_degrees if k>3 ])} coletores colaboraram com mais que 3 colegas\n""")
print("Top-10 coletores mais colaborativos:")
for n,k in sorted(cols_degrees,key=lambda x:x[1],reverse=True)[:10]:
print(f" {n} ({k} colegas)")
print("\nTop-10 coletores sem colaborações com maior número de registros:")
for n,k, in sorted([ (n,d['count']) for n,d in cwn.nodes(data=True) if cwn.degree(n)==0 ],key=lambda x: x[1], reverse=True)[:10]:
print(f" {n} ({cwn.nodes[n]['count']} registros, 0 colaborações)")
"""
Explanation: Vejamos um resumo sobre a rede:
End of explanation
"""
|
leliel12/scikit-criteria
|
doc/source/tutorial/simus.ipynb
|
bsd-3-clause
|
# first lets import the DATA class
from skcriteria import Data
data = Data(
# the alternative matrix
mtx=[[250, 120, 20, 800],
[130, 200, 40, 1000],
[350, 340, 15, 600]],
# optimal sense
criteria=[max, max, min, max],
# names of alternatives and criteria
anames=["Prj 1", "Prj 2", "Prj 3"],
cnames=["jobs", "green", "fin", "env"])
# show the data object
data
"""
Explanation: The SIMUS tutorial
SIMUS (Sequential Interactive Model for Urban Systems)
Is a tool to aid decision-making problems with
multiple objectives. The method solves successive scenarios formulated as
linear programs. For each scenario, the decision-maker must choose the
criterion to be considered objective while the remaining restrictions
constitute the constrains system that the projects are subject to. In each
case, if there is a feasible solution that is optimum, it is recorded in a
matrix of efficient results. Then, from this matrix two rankings allow the
decision maker to compare results obtained by different procedures.
The first ranking is obtained through a linear weighting of each column by
a factor - equivalent of establishing a weight - and that measures the
participation of the corresponding project. In the second ranking, the
method uses dominance and subordinate relationships between projects,
concepts from the French school of MCDM.
The Case: Land rehabilitation
An important port city has been affected by the change in
the modality of maritime transport, since the start of
containers transport in the mid-20th century. The city was left with 39 hectares
of empty docks, warehouses and a railway terminal.
Three projects was developed to decide what to do with this places
Project 1: Corporate towers - Hotels - Navy Base - Small park
Project 2: Habitational towers - Comercial Center in the old Railway terminal.
Project 3: Convention center - Big park and recreational area.
The criteria for the analysis of proposals are:
New jobs positions (jobs).
Green spaces (green)
Financial feasibility (fin)
Environmental impact (env)
Only for the 2nd criteria a maximun limit pf $500$ are provided.
The decisor has the four criteria as goals, so 4 Linear Optimizations must be solved.
The data are provided in the next table:
|Criteria|Project 1|Project 2|Project 3|Right side value|Optimal Sense|
|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |
|jobs|250|130|350|-|Maximize|
|green|120|200|340|500|Maximize|
|fin|20|40|15|-|Maximize|
|env|800|1000|600|-|Maximize|
Data input
We can create a skcriteria.Data object with all this information (except the limits):
<div class="alert alert-info">
**Note:** SIMUS uses the alternatives as columns and the criteria as rows; but in *scikit-criteria* is the oposite, so expect to see the previous table transposed.
</div>
End of explanation
"""
# import the class
from skcriteria.madm.simus import SIMUS
# create the new simus and
dm = SIMUS()
"""
Explanation: Create the model
End of explanation
"""
# store the decision inside the dec variable
dec = dm.decide(data, b=[None, 500, None, None])
# let's see the decision
dec
"""
Explanation: By default the call SIMUS() create a solver that internally uses the PuLP solver to solve the linear programs. Other availables solvers are:
SUMUS(solver='glpk') for the GNU Linear programming toolkit
SUMUS(solver='gurobi') to use Gurobi Optimizer
SUMUS(solver='cplex') for IBM ILOG CPLEX Optimization Studio
<div class="alert alert-info">
**Note:** The check the full list of available optimizers are stored in `skcriteria.utils.lp.SOLVERS`.
</div>
Also the njobs parameters determines how many cores the user want to use to run the linear programs. For example
SIMUS(njobs=2) uses up to two cores. (By default all CPUs are used).
Also the last (and most important) parameter is rank_by (default is 1): determines which of the two ranks methods executed by SIMUS is the one that determines the final ranking. If the experiment is consistent, the two methos must detemines the same ranking (Please check the paper for more details).
Solve the problem
This is achived by calling the method decide() of the decision maker object (dm)
End of explanation
"""
dec.e_
"""
Explanation: If you check the last column the raking is:
Project 3
Project 2
Project 1
Analysis
Most of the "intermediate" data of the SIMUS method are stored in the e_ field of the decision object dec.
End of explanation
"""
dec._e.stages
"""
Explanation: for example the attribute stages stores all the Linear programs executed by SIMUS:
End of explanation
"""
dec.e_.stage_results
"""
Explanation: The attribute stages_results stores the eficients restults normalized matrix
End of explanation
"""
import datetime as dt
import skcriteria
print("Scikit-Criteria version:", skcriteria.VERSION)
print("Running datetime:", dt.datetime.now())
"""
Explanation: References
Munier, N., Carignano, C., & Alberto, C. UN MÉTODO DE PROGRAMACIÓN MULTIOBJETIVO. Revista de la Escuela de Perfeccionamiento en Investigación Operativa, 24(39).
End of explanation
"""
|
aymeric-spiga/eduplanet
|
TOOLS/atlas-marsfrost.ipynb
|
gpl-2.0
|
filename = 'resultat.nc'
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
import cartopy.crs as ccrs
from netCDF4 import Dataset
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
data = Dataset(filename)
longitude=data.variables['longitude'][:]
latitude=data.variables['latitude'][:]
altitude=data.variables['altitude'][:]
Time = data.variables['Time'][:]
Ls = data.variables['Ls'][:]
dafirst = Time[0]
daint = Time[1] - dafirst
dalast = dafirst + (len(Time)-1)*daint
year = 0.
add = np.linspace(dafirst,dalast,num=len(Time)) ; add[0] = 0.
for iii in range(1,len(Ls)):
if Ls[iii] - Ls[iii-1] < 0: year = year+1.
add[iii] = year*360.
Ls_true = add + Ls
"""
Explanation: Atlas stabilité calottes Mars
Rappel : pour enregistrer une figure, placer cette ligne après la figure en question :
<code> fig.savefig('figure.png') </code>
Générer ensuite la figure avec la cellule correspondante, puis exécuter cette ligne. La figure sera sauvegardée dans le dossier de la simulation correspondante. Vous pouvez bien sûr changer le nom ou l'extension (JPG, PDF).
End of explanation
"""
# Paramètres utilisateurs -----------------------------------------
earthtopo = False # ajouter les traits de côte actuels
varname = 'tsurf'
vmin = 120
vmax = 280
# Code ------------------------------------------------------------
dataplt = data.variables[varname][:,:,:]
fig = plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
if (earthtopo): ax.coastlines(resolution="110m",linewidth=1)
gl = ax.gridlines(linestyle='--',color='black',
draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
clevs = np.linspace(vmin,vmax,29)
plt.contourf(longitude, latitude, np.mean(dataplt[:,:,:],axis=0),
clevs, transform=ccrs.PlateCarree(),cmap="jet")
plt.title(r"Température de surface moyenne", size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label(r'K',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
plt.show()
"""
Explanation: Carte en moyenne temporelle sur la totalité de l'expérience
End of explanation
"""
def psatw(temp):
# METHOD GOFF GRATCH (HygroLP) - OVER WATER
# -----------------------------------------
log10ew = -7.90298*(373.16/temp-1) \
+ 5.02808 * np.log10(373.16/temp) \
- 1.3816e-7 * (10**(11.344 * (1-temp/373.16))-1) \
+ 8.1328e-3 * (10**(-3.49149 *(373.16/temp-1))-1) \
+ np.log10(1013.246)
return 100 * (10**(log10ew))
def psati(temp):
# METHOD GOFF GRATCH (HygroLP) - OVER ICE
# ---------------------------------------
log10ei = -9.09718*(273.16/temp-1) \
- 3.56654*np.log10(273.16/temp) \
+ 0.876793*(1-temp/273.16) \
+ np.log10(6.1071)
return 100 * (10**(log10ei))
tzero = 273.15
temp = np.linspace(-80+tzero,tzero,81)
plt.yscale('log')
plt.plot(temp,psatw(temp))
plt.plot(temp,psati(temp))
plt.show()
# Paramètres utilisateurs -----------------------------------------
earthtopo = False # ajouter les traits de côte actuels
ph2oatmo = 0.05e-2*610. # assumed mean water vapor partial pressure
vmin = 0.
vmax = 1.
# Code ------------------------------------------------------------
tsurfnc = data.variables['tsurf'][:,:,:]
dataplt = ph2oatmo/psati(tsurfnc)
fig = plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
if (earthtopo): ax.coastlines(resolution="110m",linewidth=1)
gl = ax.gridlines(linestyle='--',color='black',
draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
clevs = np.linspace(vmin,vmax,21)
plt.contourf(longitude, latitude, np.mean(dataplt[:,:,:],axis=0),
clevs, transform=ccrs.PlateCarree(),cmap="jet")
plt.title(r"Saturation ratio", size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label(r'Pa',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
plt.show()
"""
Explanation: Carte en moyenne temporelle de $p_{sat}$ pour $H_2O$
End of explanation
"""
# Paramètres utilisateurs -----------------------------------------
earthtopo = False # ajouter les traits de côte actuels
year_user = 1 # année de simulation à regarder
Ls_user = 90. # longitude solaire choisie
varname = 'tsurf'
vmin = 80.
vmax = 280.
# Code ------------------------------------------------------------
Ls_true_user = year_user*360. + Ls_user
Ls_ind = np.where(abs(Ls_true-Ls_true_user)==
abs(Ls_true-Ls_true_user).min())[0]
print("La valeur la plus proche trouvée est Ls = "
+ str(Ls_true[Ls_ind]-year_user*360.)
+ " pour l'année " + str(year_user))
# Code ------------------------------------------------------------
var = data.variables[varname][:,:,:]
dataplt = var
fig = plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
if (earthtopo): ax.coastlines(resolution="110m",linewidth=1)
gl = ax.gridlines(linestyle='--',color='black',
draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
clevs = np.linspace(vmin,vmax,29)
plt.contourf(longitude, latitude, np.squeeze(dataplt[Ls_ind,:,:]),
clevs, transform=ccrs.PlateCarree(),cmap="jet")
plt.title(r"Température de surface", size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label(r'K',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
plt.show()
# Paramètres utilisateurs -----------------------------------------
earthtopo = False # ajouter les traits de côte actuels
year_user = 1 # année de simulation à regarder
Ls_user = 270. # longitude solaire choisie
ph2oatmo = 0.05e-2*610. # assumed mean water vapor partial pressure
vmin = 0.
vmax = 1.
# Code ------------------------------------------------------------
Ls_true_user = year_user*360. + Ls_user
Ls_ind = np.where(abs(Ls_true-Ls_true_user)==
abs(Ls_true-Ls_true_user).min())[0]
print("La valeur la plus proche trouvée est Ls = "
+ str(Ls_true[Ls_ind]-year_user*360.)
+ " pour l'année " + str(year_user))
# Code ------------------------------------------------------------
tsurfnc = data.variables['tsurf'][:,:,:]
dataplt = ph2oatmo/psati(tsurfnc)
fig = plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
if (earthtopo): ax.coastlines(resolution="110m",linewidth=1)
gl = ax.gridlines(linestyle='--',color='black',
draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
clevs = np.linspace(vmin,vmax,29)
plt.contourf(longitude, latitude, np.squeeze(dataplt[Ls_ind,:,:]),
clevs, transform=ccrs.PlateCarree(),cmap="jet")
plt.title(r"Saturation ratio", size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label(r'NU',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
plt.show()
"""
Explanation: Carte à $L_s$ donné de $p_{sat}$ pour $H_2O$
End of explanation
"""
def psatco2(temp):
return 1.382 * 1e12 * np.exp(-3182.48/temp)
temp = np.linspace(100,200,81)
plt.yscale('log')
plt.plot(temp,psatco2(temp))
plt.show()
# Paramètres utilisateurs -----------------------------------------
earthtopo = False # ajouter les traits de côte actuels
pco2atmo = 610. # CO2 pressure
vmin = 0.
vmax = 1.
# Code ------------------------------------------------------------
tsurfnc = data.variables['tsurf'][:,:,:]
dataplt = pco2atmo/psatco2(tsurfnc)
fig = plt.figure(figsize=(12,8))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
if (earthtopo): ax.coastlines(resolution="110m",linewidth=1)
gl = ax.gridlines(linestyle='--',color='black',
draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
clevs = np.linspace(vmin,vmax,21)
plt.contourf(longitude, latitude, np.mean(dataplt[:,:,:],axis=0),
clevs, transform=ccrs.PlateCarree(),cmap="jet")
plt.title(r"Saturation ratio", size=14)
cb = plt.colorbar(ax=ax, orientation="vertical", pad=0.02, aspect=16, shrink=0.8)
cb.set_label(r'Pa',size=12,rotation=0,labelpad=15)
cb.ax.tick_params(labelsize=10)
plt.show()
"""
Explanation: Carte en moyenne temporelle de $p_{sat}$ pour $CO_2$
End of explanation
"""
|
darrenxyli/deeplearning
|
projects/project2/dlnd_image_classification.ipynb
|
apache-2.0
|
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
"""
Explanation: Image Classification
In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.
Get the Data
Run the following cell to download the CIFAR-10 dataset for python.
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
"""
Explanation: Explore the Data
The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following:
* airplane
* automobile
* bird
* cat
* deer
* dog
* frog
* horse
* ship
* truck
Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch.
Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions.
End of explanation
"""
def normalize(x):
"""
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
"""
from sklearn import preprocessing as skp
return np.array([[skp.normalize(pixel) for pixel in data] for data in x])
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_normalize(normalize)
"""
Explanation: Implement Preprocess Functions
Normalize
In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x.
End of explanation
"""
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
def one_hot_encode(x):
"""
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
"""
lb.fit(range(10))
return lb.transform(x)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_one_hot_encode(one_hot_encode)
"""
Explanation: One-hot encode
Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function.
Hint: Don't reinvent the wheel.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
"""
Explanation: Randomize Data
As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.
Preprocess all the data and save it
Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
import tensorflow as tf
def neural_net_image_input(image_shape):
"""
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
"""
return tf.placeholder(tf.float32, shape=[None, *image_shape], name='x')
def neural_net_label_input(n_classes):
"""
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
"""
return tf.placeholder(tf.float32, shape=[None, n_classes], name='y')
def neural_net_keep_prob_input():
"""
Return a Tensor for keep probability
: return: Tensor for keep probability.
"""
return tf.placeholder(tf.float32, name='keep_prob')
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
"""
Explanation: Build the network
For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.
Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.
However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d.
Let's begin!
Input
The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions
* Implement neural_net_image_input
* Return a TF Placeholder
* Set the shape using image_shape with batch size set to None.
* Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_label_input
* Return a TF Placeholder
* Set the shape using n_classes with batch size set to None.
* Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder.
* Implement neural_net_keep_prob_input
* Return a TF Placeholder for dropout keep probability.
* Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder.
These names will be used at the end of the project to load your saved model.
Note: None for shapes in TensorFlow allow for a dynamic size.
End of explanation
"""
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
"""
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
"""
weight = tf.Variable(tf.truncated_normal([*conv_ksize, x_tensor.shape[-1].value, conv_num_outputs], stddev=0.1))
bias = tf.Variable(tf.zeros(conv_num_outputs))
# Apply Convolution & Bias
conv_layer = tf.nn.bias_add(
tf.nn.conv2d(
x_tensor, weight, strides=[1, *conv_strides, 1], padding='SAME'
),
bias
)
# Apply Activation
conv_layer = tf.nn.relu(
conv_layer
)
# Apply Max Pooling
conv_layer = tf.nn.max_pool(
conv_layer, ksize=[1, *pool_ksize, 1], strides=[1, *pool_strides, 1], padding='SAME'
)
return conv_layer
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_con_pool(conv2d_maxpool)
"""
Explanation: Convolution and Max Pooling Layer
Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling:
* Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor.
* Apply a convolution to x_tensor using weight and conv_strides.
* We recommend you use same padding, but you're welcome to use any padding.
* Add bias
* Add a nonlinear activation to the convolution.
* Apply Max Pooling using pool_ksize and pool_strides.
* We recommend you use same padding, but you're welcome to use any padding.
Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers.
End of explanation
"""
def flatten(x_tensor):
"""
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
"""
x_shape = [dim.value for dim in x_tensor.shape[1:]]
return tf.reshape(
x_tensor, shape=[-1, np.product(x_shape)]
)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_flatten(flatten)
"""
Explanation: Flatten Layer
Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def fully_conn(x_tensor, num_outputs):
"""
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
weights = tf.Variable(tf.random_normal([x_tensor.shape[-1].value, num_outputs], stddev=0.1))
biases = tf.Variable(tf.random_normal([num_outputs], stddev=0.1))
# Apply weights and bias to tensor
layer = tf.nn.relu(
tf.add(
tf.matmul(x_tensor, weights),
biases
)
)
return layer
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_fully_conn(fully_conn)
"""
Explanation: Fully-Connected Layer
Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
End of explanation
"""
def output(x_tensor, num_outputs):
"""
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
"""
weights = tf.Variable(tf.random_normal([x_tensor.shape[-1].value, num_outputs], stddev=0.1))
biases = tf.Variable(tf.random_normal([num_outputs], stddev=0.1))
# Apply weights and bias to tensor
layer = tf.add(
tf.matmul(x_tensor, weights),
biases
)
return layer
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_output(output)
"""
Explanation: Output Layer
Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages.
Note: Activation, softmax, or cross entropy should not be applied to this.
End of explanation
"""
def conv_net(x, keep_prob):
"""
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
"""
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x = conv2d_maxpool(x, 32, (4,4), (1,1), (2,2), (2,2))
x = conv2d_maxpool(x, 64, (4,4), (1,1), (2,2), (2,2))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 256)
# TODO: Apply an Output Layer
# Set this to the number of classes
x = tf.nn.dropout(x, keep_prob)
# Function Definition from Above:
# output(x_tensor, num_outputs)
x = output(x, 10)
# TODO: return output
return x
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
"""
Explanation: Create Convolutional Model
Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model:
Apply 1, 2, or 3 Convolution and Max Pool layers
Apply a Flatten Layer
Apply 1, 2, or 3 Fully Connected Layers
Apply an Output Layer
Return the output
Apply TensorFlow's Dropout to one or more layers in the model using keep_prob.
End of explanation
"""
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
"""
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
"""
session.run(optimizer, feed_dict={ x: feature_batch, y: label_batch, keep_prob: keep_probability })
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_train_nn(train_neural_network)
"""
Explanation: Train the Neural Network
Single Optimization
Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following:
* x for image input
* y for labels
* keep_prob for keep probability for dropout
This function will be called for each batch, so tf.global_variables_initializer() has already been called.
Note: Nothing needs to be returned. This function is only optimizing the neural network.
End of explanation
"""
def print_stats(session, feature_batch, label_batch, cost, accuracy):
"""
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
"""
global valid_features, valid_labels
loss = sess.run(
cost, feed_dict={ x: feature_batch, y: label_batch, keep_prob: 1.0 }
)
validation_accuracy = sess.run(
accuracy, feed_dict={ x: valid_features, y: valid_labels, keep_prob: 1.0 }
)
print(' Loss: {:<8.3} Accuracy: {:<5.3}'.format(loss, validation_accuracy), end='\n')
"""
Explanation: Show Stats
Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy.
End of explanation
"""
# TODO: Tune Parameters
epochs = 50
batch_size = 512
keep_probability = 0.75
"""
Explanation: Hyperparameters
Tune the following parameters:
* Set epochs to the number of iterations until the network stops learning or start overfitting
* Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory:
* 64
* 128
* 256
* ...
* Set keep_probability to the probability of keeping a node using dropout
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
"""
Explanation: Train on a Single CIFAR-10 Batch
Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
"""
Explanation: Fully Train the Model
Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
"""
Test the saved model against the test dataset
"""
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
"""
Explanation: Checkpoint
The model has been saved to disk.
Test Model
Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/inpe/cmip6/models/sandbox-3/atmoschem.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-3', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: INPE
Source ID: SANDBOX-3
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:07
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
atulsingh0/MachineLearning
|
MasteringML_wSkLearn/02b_Classification.ipynb
|
gpl-3.0
|
name = ['Quality','Alcohol','Malic acid', 'Ash', 'Alcalinity of ash ', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline']
wine = pd.read_csv("data/wine.data", names=name)
#print(wine.describe)
wine[:5]
# plotting the data
fig, ax = plt.subplots(figsize=(9,6))
X = wine['Alcohol']
y = wine['Quality']
ax.plot(X, y, 'b.')
ax.set_xlabel("Alcohol")
ax.set_ylabel("Quality")
ax.margins(0.2)
ax.grid(True)
lReg = LinearRegression()
X = wine[list(wine.columns)[1:]]
y = wine['Quality']
#print (X[:2])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=27)
# fitting the data
lReg.fit(X_train, y_train)
# predicting
y_pred = lReg.predict(X_test)
print('R-squared:', lReg.score(X_test, y_test))
# cross validation scores
scores = cross_val_score(lReg, X_train, y_train, cv=5)
print("Scores",scores)
print("Mean Score",np.mean(scores))
plt.scatter(y_test, y_pred)
plt.xlabel("Quality")
plt.ylabel("Predicted Quality")
"""
Explanation: Explore the WineQuality data and apply the Linear Regression
End of explanation
"""
boston = load_boston()
# splitting the data
X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, test_size=0.2, random_state=27)
#
X_scaler = StandardScaler()
y_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
y_train = y_scaler.fit_transform(y_train)
X_test = X_scaler.transform(X_test)
y_test = y_scaler.transform(y_test)
regressor = SGDRegressor(loss='squared_loss')
scores = cross_val_score(regressor, X_train, y_train, cv=5)
print ('Cross validation r-squared scores:', scores)
print ('Average cross validation r-squared score:', np.mean(scores))
regressor.fit_transform(X_train, y_train)
print ('Test set r-squared score', regressor.score(X_test, y_test))
"""
Explanation: Fitting models with gradient descent
There are two varieties of gradient descent that are distinguished by the number
of training instances that are used to update the model parameters in each training
iteration. Batch gradient descent, which is sometimes called only gradient descent,
uses all of the training instances to update the model parameters in each iteration.
Stochastic Gradient Descent (SGD), in contrast, updates the parameters using
only a single training instance in each iteration. The training instance is usually
selected randomly. Stochastic gradient descent is often preferred to optimize cost
functions when there are hundreds of thousands of training instances or more, as
it will converge more quickly than batch gradient descent. Batch gradient descent
is a deterministic algorithm, and will produce the same parameter values given the
same training set.
As a stochastic algorithm, SGD can produce different parameter
estimates each time it is run. SGD may not minimize the cost function as well as
gradient descent because it uses only single training instances to update the weights.
Its approximation is often close enough, particularly for convex cost functions such
as residual sum of squares.
End of explanation
"""
|
cipang/hello-world
|
Welcome_To_Colaboratory.ipynb
|
gpl-2.0
|
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
"""
Explanation: <a href="https://colab.research.google.com/github/cipang/hello-world/blob/master/Welcome_To_Colaboratory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<p><img alt="Colaboratory logo" height="45px" src="/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px"></p>
<h1>What is Colaboratory?</h1>
Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with
- Zero configuration required
- Free access to GPUs
- Easy sharing
Whether you're a student, a data scientist or an AI researcher, Colab can make your work easier. Watch Introduction to Colab to learn more, or just get started below!
Getting started
The document you are reading is not a static web page, but an interactive environment called a Colab notebook that lets you write and execute code.
For example, here is a code cell with a short Python script that computes a value, stores it in a variable, and prints the result:
End of explanation
"""
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
"""
Explanation: To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.
Variables that you define in one cell can later be used in other cells:
End of explanation
"""
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
"""
Explanation: Colab notebooks allow you to combine executable code and rich text in a single document, along with images, HTML, LaTeX and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see Overview of Colab. To create a new Colab notebook you can use the File menu above, or use the following link: create a new Colab notebook.
Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see jupyter.org.
Data science
With Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses numpy to generate some random data, and uses matplotlib to visualize it. To edit the code, just click the cell and start editing.
End of explanation
"""
|
Echelle/AO_bonding_paper
|
notebooks/SiGaps_12_VG12_twoGaps.ipynb
|
mit
|
%pylab inline
import emcee
import triangle
import pandas as pd
import seaborn as sns
from astroML.decorators import pickle_results
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5})
sns.set(style="ticks")
"""
Explanation: This IPython Notebook is for performing a fit and generating a figure of the spectrum of sample VG12, in the mesh region with 49+/-6 nm gap. This version is modified to fit for two gaps.
The filename of the figure is [TBD].pdf.
Author: Michael Gully-Santiago, gully@astro.as.utexas.edu
Date: January 25, 2015
End of explanation
"""
df = pd.read_csv('../data/cln_20130916_cary5000.csv', index_col=0)
df = df[df.index > 1250.0]
plt.plot(df.index[::4], df.run11[::4]/100.0, label='On-mesh')
plt.plot(df.index, df.run10/100.0, label='Off-mesh')
plt.plot(df.index, df.run12/100.0, label='Shard2')
plt.plot(df.index, df.run9/100.0, label='DSP')
plt.plot(df.index, df.run15/100.0, label='VG08')
plt.plot(df.index, df.run17/100.0, label='VG08 alt')
#plt.plot(x, T_gap_Si_withFF_fast(x, 65.0, 0.5, n1)/T_DSP, label='Model')
plt.legend(loc='best')
plt.ylim(0.80, 1.05)
"""
Explanation: Read in the data. We want "VG12"
End of explanation
"""
from etalon import *
np.random.seed(78704)
# Introduce the Real data, decimate the data.
x = df.index.values[::4]
N = len(x)
# Define T_DSP for the model
T_DSP = T_gap_Si(x, 0.0)
n1 = sellmeier_Si(x)
# Define uncertainty
yerr = 0.0004*np.ones(N)
iid_cov = np.diag(yerr ** 2)
# Select the spectrum of interest
# Normalize the spectrum by measured DSP Si wafer.
y = df.run11.values[::4]/100.0
"""
Explanation: Import all the local models, saved locally as etalon.py. See the paper for derivations of these equations.
End of explanation
"""
def lnlike(dM, eps, lna, lns):
a, s = np.exp(lna), np.exp(lns)
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
sgn, logdet = np.linalg.slogdet(C)
if sgn <= 0:
return -np.inf
T_mix = 0.5 * (T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1) + T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP
r = y - T_mix
return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)
"""
Explanation: Define the likelihood. In this case we are using two different gap sizes, but fixed fill factor.
\begin{equation}
T_{mix} = 0.5 \times T_{e}(d_M + \epsilon) + 0.5 \times T_{e}(\epsilon)
\end{equation}
End of explanation
"""
def lnprior(dM, eps, lna, lns):
prior = -0.5 * ((49.0-dM)/6.0)**2.0
if not (31.0 < dM < 67 and 0.0 < eps < 60.0 and -12 < lna < -2 and 0 < lns < 10):
return -np.inf
return prior
"""
Explanation: Define the prior. We want to put a Normal prior on $d_M$:
$d_M \sim \mathcal{N}(\hat{d_M}, \sigma_{d_M})$
End of explanation
"""
def lnprob(p):
lp = lnprior(*p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(*p)
"""
Explanation: Combine likelihood and prior to obtain the posterior.
End of explanation
"""
@pickle_results('SiGaps_12_VG12_twoGaps-sampler.pkl')
def hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials):
# Initialize the walkers
p0 = np.array([dM_Guess, eps_Guess, np.log(a_Guess), np.log(s_Guess)])
pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
pos, lp, state = sampler.run_mcmc(pos, nburnins)
sampler.reset()
pos, lp, state = sampler.run_mcmc(pos, ntrials)
return sampler
"""
Explanation: Set up emcee.
End of explanation
"""
np.random.seed(78704)
ndim, nwalkers = 4, 32
dM_Guess = 49.0
eps_Guess = 15.0
a_Guess = 0.0016
s_Guess = 25.0
nburnins = 200
ntrials = 700
"""
Explanation: Set up the initial conditions
End of explanation
"""
sampler = hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials)
"""
Explanation: Run the burn-in phase. Run the full MCMC. Pickle the results.
End of explanation
"""
chain = sampler.chain
samples_lin = copy(sampler.flatchain)
samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])
"""
Explanation: Linearize $a$ and $s$ for easy inspection of the values.
End of explanation
"""
fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98,
wspace=0.0, hspace=0.05)
[a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5)
for i, a in enumerate(axes)]
[a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d_M", "\epsilon", "\ln a", "\ln s"])]
axes[-1].set_xlim(0, chain.shape[1])
axes[-1].set_xlabel("iteration");
"""
Explanation: Inspect the chain.
End of explanation
"""
fig = triangle.corner(samples_lin,
labels=map("${0}$".format, ["d_M", "\epsilon", "a", "s"]),
quantiles=[0.16, 0.84])
fig = triangle.corner(samples_lin[:,0:2],
labels=map("${0}$".format, ["d_M", "\epsilon"]),
quantiles=[0.16, 0.84])
plt.savefig("VG12_twoGaps_cornerb.pdf")
"""
Explanation: Linearize $a$ and $s$ for graphical purposes.
Make a triangle corner plot.
End of explanation
"""
dM_mcmc, eps_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_lin, [16, 50, 84],
axis=0)))
dM_mcmc, eps_mcmc, a_mcmc, s_mcmc
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*dM_mcmc)
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*eps_mcmc)
"""
Explanation: Calculate confidence intervals.
End of explanation
"""
plt.figure(figsize=(6,3))
for dM, eps, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = 0.5*(T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1)+T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
fit = 0.5*(T_gap_Si_withFF_fast(x, dM_mcmc[0]+eps_mcmc[0], 1, n1)+T_gap_Si_withFF_fast(x, eps_mcmc[0], 1, n1))/T_DSP
fit_label = 'Model with $d_M={:.0f}$ nm, $\epsilon={:.0f}$'.format(dM_mcmc[0], eps_mcmc[0])
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
fit1 = T_gap_Si_withFF_fast(x, 43, 0.5, n1)/T_DSP
fit2 = T_gap_Si_withFF_fast(x, 55, 0.5, n1)/T_DSP
fit2_label = 'Model with $d_M={:.0f}\pm{:.0f}$ nm, $\epsilon={:.0f}$'.format(49, 6, 0)
plt.fill_between(x, fit1, fit2, alpha=0.6, color=sns.xkcd_rgb["green apple"])
plt.plot([-10, -9], [-10, -9],"-", alpha=0.85, color=sns.xkcd_rgb["green apple"], label=fit2_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 2501);
plt.ylim(0.9, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG12_twoGapsb.pdf", bbox_inches='tight')
"""
Explanation: Overlay draws from the Gaussian Process.
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.