repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
---|---|---|---|
learn1do1/learn1do1.github.io
|
python_notebooks/Sorting Revisited.ipynb
|
mit
|
import random
cards = range(52)
random.shuffle(cards)
print cards
"""
Explanation: Sorting functions in python
Python is useful for exploring algorithms because of its terseness and large set of libraries.
This post will be focused on sorting functions, using a set of shuffled cards (integers) as input and looking at measured runtime.
End of explanation
"""
def assert_sorted(cards):
current_min = -1
for card in cards:
if card < current_min:
raise AssertionError('Sort Failed')
current_min = card
return True
"""
Explanation: Before we begin, let's define a function for checking that our sorting function even works. We can call it assert_sorted:
End of explanation
"""
def selection_sort(cards):
sorted = []
search_space = list(cards)
while (len(sorted) < len(cards)):
current_min_index = 0
for i in range(len(search_space)):
if search_space[i] < search_space[current_min_index]:
current_min_index = i
sorted.append(search_space.pop(current_min_index))
return sorted
print selection_sort(cards)
"""
Explanation: Selection Sort
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
import time
sizes = [x for x in range(2000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
start_time = time.time()
sorted_cards = selection_sort(cards)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run selection sort based on input size')
plt.show()
"""
Explanation: Let's look at the performance of selection sort. I looked into cProfile as a way to profile, but I only needed performance in the time dimension, not in the variety of execution functions cProfile was giving me. So I decided instead to use a very simple way of profiling execution time: using the python time library.
I can then plot the runtime in seconds as the numer of cards I input grows.
End of explanation
"""
def insertion_sort(cards):
sorted = []
for candidate in cards:
# by default, insert at the end of the sorted list
index = len(sorted)
for i in range(len(sorted)):
if candidate < sorted[i]:
index = i
break
sorted.insert(index, candidate)
return sorted
sizes = [x for x in range(2000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
start_time = time.time()
sorted_cards = insertion_sort(cards)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run insertion sort based on input size')
plt.show()
"""
Explanation: Insertion Sort
End of explanation
"""
import bintrees
def insertion_sort_with_trees(cards, tree):
for candidate in cards:
# by default, insert only the key into the bintree. Value is None
tree.insert(candidate, None)
return [x for x in tree.keys()]
sizes = [x for x in range(2000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
redblack_tree = bintrees.RBTree()
start_time = time.time()
sorted_cards = insertion_sort_with_trees(cards, redblack_tree)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run insertion sort based on input size')
plt.show()
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
binaryTree = bintrees.BinaryTree()
start_time = time.time()
sorted_cards = insertion_sort_with_trees(cards, binaryTree)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
plt.plot(sizes, durations, color = 'yellow')
"""
Explanation: Insertion sort with a tree structure
This is how non-computer scientist friends of mine describe how they like to sort their cards. It's insertion sort, but we store the sorted cards in a tree so that inserts can happen in log(N) instead of N.
End of explanation
"""
sizes = [x for x in range(2000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
redblack_tree = bintrees.RBTree()
start_time = time.time()
sorted_cards = insertion_sort_with_trees(cards, redblack_tree)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run insertion sort based on input size')
plt.show()
durations = []
for size in sizes:
cards = range(size)
binaryTree = bintrees.BinaryTree()
start_time = time.time()
sorted_cards = insertion_sort_with_trees(cards, binaryTree)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
plt.plot(sizes, durations, color = 'yellow')
"""
Explanation: Downsides of binary tree: it performs well in the best case, but because it isn't balanced, it's worst case performance takes you back to performance as bad as insertion sort, O(N^2)
End of explanation
"""
def quicksort(cards):
if len(cards) < 2:
return cards
pivot = random.randint(0, len(cards) - 1)
upper_half = [x for x in cards if x >= cards[pivot]]
lower_half = [x for x in cards if x < cards[pivot]]
return quicksort(lower_half) + quicksort(upper_half)
sizes = [x for x in range(10000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
start_time = time.time()
sorted_cards = quicksort(cards)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run quick sort based on input size')
plt.show()
"""
Explanation: You can see that because the rbtree remains balanced, insertion into it is stabler (blue line). Whereas the binary tree (yellow line) has O(N^2) performance. It looks very similar to our insertion sort from earlier, because we have lost the benefits of storing the sorted array in a tree.
Quicksort
End of explanation
"""
def mergesort(cards):
if len(cards) <= 1:
return cards
else:
return merge(mergesort(cards[:len(cards)/2]), mergesort(cards[len(cards)/2:]))
def merge(list1, list2):
final_list = []
i = 0
j = 0
while len(final_list) != (len(list1)) + len(list2):
if i == len(list1):
final_list.append(list2[j])
j = j + 1
elif j == len(list2) or list1[i] < list2[j]:
final_list.append(list1[i])
i = i + 1
elif list1[i] >= list2[j]:
final_list.append(list2[j])
j = j + 1
return final_list
sizes = [x for x in range(10000) if x % 100 == 0]
durations = []
for size in sizes:
cards = range(size)
random.shuffle(cards)
start_time = time.time()
sorted_cards = mergesort(cards)
durations.append(time.time() - start_time)
assert_sorted(sorted_cards)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(sizes, durations)
plt.title('Time in seconds to run mergesort based on input size')
plt.show()
"""
Explanation: Mergesort
Mergesort is similar to quicksort, in that it uses recursion to only have to do NlogN compares. First divide the list into the smallest unit, then compare each element with the adjacent list to sort and merge the adjacent list.
I found that the code for mergesort is a little bit more complex that I would like, so I have taken this gif from wikipedia to demonstrate what's really going on here:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/niwa/cmip6/models/sandbox-1/landice.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'niwa', 'sandbox-1', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: NIWA
Source ID: SANDBOX-1
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:30
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
kwinkunks/rainbow
|
notebooks/Guessing_colourmaps_TSP problem.ipynb
|
apache-2.0
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib.pyplot import imread
from scipy import signal
nx, ny = 100, 100
z = np.random.rand(nx, ny)
sizex, sizey = 30, 30
x, y = np.mgrid[-sizex:sizex+1, -sizey:sizey+1]
g = np.exp(-0.333*(x**2/float(sizex)+y**2/float(sizey)))
f = g/g.sum()
z = signal.convolve(z, f, mode='valid')
z = (z - z.min())/(z.max() - z.min())
cd /home/matt/Dropbox/dev/notebooks
# Note: interpolation introduces new colours.
plt.imshow(z, cmap="spectral")
#plt.imshow(z, cmap="spectral", interpolation='none')
plt.axis('off')
plt.savefig('data/cbar/test.png', bbox_inches='tight')
plt.show()
"""
Explanation: TSP problem
See the other notebook for the grisly details and dead-ends.
Requirements:
numpy
scipy
scikit-learn
I recommend installing them with conda install.
End of explanation
"""
cd /home/matt/Dropbox/dev/notebooks
from PIL import Image
img = Image.open('data/cbar/test.png')
#img = Image.open('data/cbar/redblu.png')
img
"""
Explanation: Read an image
End of explanation
"""
n_colours = 128
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
im = np.asarray(img) / 255
h, w, d = im.shape
im_ = im.reshape((w * h, d))[:, :3]
sample = shuffle(im_, random_state=0)[:2000] # Defines training set size
"""
Explanation: Quantize with scikit
End of explanation
"""
kmeans = KMeans(n_clusters=n_colours, random_state=0).fit(sample)
"""
Explanation: Train:
End of explanation
"""
p = kmeans.cluster_centers_
# I don't know why I need to do this, but I do. Floating point precision maybe.
p[p > 1] = 1
p[p < 0] = 0
"""
Explanation: Now I can make an RGB palette p — also known as a codebook in information theory terms:
End of explanation
"""
from mpl_toolkits.mplot3d import Axes3D
# Set up the figure
fig = plt.figure(figsize=(8, 8))
# Result of TSP solver
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*p.T, c=p, lw=0, s=40, alpha=1)
ax.plot(*p.T, color='k', alpha=0.4)
ax.set_title('Codebook')
plt.show()
"""
Explanation: The only problem with this p is that it is not in order — that it, there cluster centres are more or less randomly arranged. We will fix that in the next section.
The vector p is actually all we need, but if you want to see what the quantized image looks like, carry on:
Travelling salesman problem
Remember that these points are essentially in random order:
End of explanation
"""
from pytsp import run, dumps_matrix
"""
Explanation: I propose starting at the dark end (the end of the line nearest black) and crawling aling the line of points from there. This will make a nice organized sequence of codes — in our case, this will be the colourmap.
We can solve this problem as the travelling salesman problem. Find the shortest tour from black to 'the other end'.
To start with, we need the distances between all points. This is just a norm, but there's a convenient function scipy.spatial.pdist for finding distance pairs in n-space. Then squareform casts it into a square symmetric matrix, which is what we need for our TSP solver.
Other than creating a naive TSP solver in Python – let's face it, it'll be broken or slow or non-optimal — there are three good TSP solver options:
LKH — parameter help here.
Concorde (I followed these instructions for installing concorde on my Mac.)
O-R tools
LKH and Concorde can be used via the TSP Python package (but note that it used to be called pyconcorde so you need to change the names of some functions — look at the source or use my fork.
Note that you need to add the Concorde and LKH libs to PATH as mentioned in the docs for pytsp.
End of explanation
"""
p = np.vstack([[[0,0,0]], p])
p[:6]
from scipy.spatial.distance import pdist, squareform
# Make distacnce matrix.
dists = squareform(pdist(p, 'euclidean'))
# Normalize
d = 32767 * dists / np.sqrt(3)
d = d.astype(np.int16)
sz = d.shape[1]
dss = np.vstack([d, np.zeros(sz)])
dsss = np.hstack([dss, np.expand_dims(np.zeros(sz+1), 1)])
"""
Explanation: Add black and white to p:
End of explanation
"""
outf = "/tmp/myroute_concorde.tsp"
with open(outf, 'w') as f:
f.write(dumps_matrix(dsss, name="My Route"))
# tour_concorde = run(outf, start=0, solver="Concorde")
"""
Explanation: Concorde algorithm — a little slower than LKH:
End of explanation
"""
outf = "/tmp/myroute_lkh.tsp"
with open(outf, 'w') as f:
f.write(dumps_matrix(dsss, name="My Route"))
tour_lkh = run(outf, start=0, solver="LKH")
#result = np.array(tour_concorde['tour'])
result = np.array(tour_lkh['tour'])
result
# few = result[-8:]
# np.random.shuffle(few)
# print(few)
# few_codez = p[few]
# print(few_codes)
# few_codes = np.vstack([[[0,0,0]], few_codez])
# # Make distacnce matrix.
# dist = squareform(pdist(few_codes, 'euclidean'))
# # Make integers
# ds = 32767 * dist / np.sqrt(3)
# ds = ds.astype(np.int16)
# sz = ds.shape[1]
# dss = np.vstack([ds, np.zeros(sz)])
# dsss = np.hstack([dss, np.expand_dims(np.zeros(sz+1), 1)])
# outf = "/tmp/myroute_lkh.tsp"
# with open(outf, 'w') as f:
# f.write(dumps_matrix(dsss, name="My Route"))
# tour_lkh = run(outf, start=0, solver="LKH")
# points = np.array(tour_lkh['tour'])
# cs = few_codes[points[1:-1]]
# fig = plt.figure(figsize=(8, 8))
# # Result of TSP solver
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(*cs.T, c=c, lw=0, s=40, alpha=1)
# ax.plot(*cs.T, color='k', alpha=0.4)
# ax.set_title('TSP solver')
# plt.show()
result
"""
Explanation: LKH implementation:
End of explanation
"""
c = p[result[1:-1]]
"""
Explanation: Now result is the indices of points for the shortest path, shape (256,). And p is our quantized colormap, shape (256, 3). So we can select the points easily for an ordered colourmap.
The offset is to account for the fact that we added a black point at the start.
End of explanation
"""
from mpl_toolkits.mplot3d import Axes3D
# Set up the figure
fig = plt.figure(figsize=(8, 8))
# Result of TSP solver
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*c.T, c=c, lw=0, s=40, alpha=1)
ax.plot(*c.T, color='k', alpha=0.4)
ax.set_title('TSP solver')
plt.show()
"""
Explanation: Ideally I'd like all the distances too, but it wouldn't be too hard to compute these.
Now let's look at it all.
End of explanation
"""
import plotly.graph_objs as go
import colorlover as cl
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
cb = cl.to_rgb(tuple(map(tuple, c*255)))
trace = go.Scatter3d(
name='TSP Sover',
x = c[:,0], y = c[:,1], z = c[:,2],
marker = dict(
size=4.,
color=cb
),
line=dict(
color='#000',
width=1,
),
)
data = [trace]
# Set the different layout properties of the figure:
layout = go.Layout(
autosize=False,
width=600,
height=600,
margin = dict(
t=0,b=0,l=0,r=0
),
scene = go.Scene(
xaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
yaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
zaxis=dict(
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
aspectmode='cube',
camera=dict(
eye=dict(
x=1.7,
y=-1.7,
z=1,
)
),
)
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, show_link=False)
from scipy.spatial import cKDTree
kdtree = cKDTree(c)
dx, ix = kdtree.query(im_)
plt.imshow(ix.reshape((h, w)), cmap='RdBu')
plt.colorbar()
plt.show()
plt.imshow(dx.reshape((h, w)))
plt.colorbar()
plt.show()
fig = plt.figure(figsize=(18, 5))
ax0 = fig.add_subplot(131)
plt.imshow(im, interpolation='none')
ax1 = fig.add_subplot(132, projection='3d')
ax1.scatter(*c.T, c=c, lw=0, s=40, alpha=1)
ax1.plot(*c.T, color='k', alpha=0.5)
ax1.text(*c[0], ' start')
ax1.text(*c[-1], ' end')
ax2 = fig.add_subplot(133)
plt.imshow(ix.reshape((h, w)), cmap="RdBu", interpolation='none')
plt.colorbar(shrink=0.75)
plt.show()
cmaps = [('Perceptually Uniform Sequential',
['viridis', 'inferno', 'plasma', 'magma']),
('Sequential', ['Blues', 'BuGn', 'BuPu',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool',
'copper', 'gist_heat', 'gray', 'hot',
'pink', 'spring', 'summer', 'winter']),
('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'seismic']),
('Qualitative', ['Accent', 'Dark2', 'Paired', 'Pastel1',
'Pastel2', 'Set1', 'Set2', 'Set3']),
('Miscellaneous', ['gist_earth', 'terrain', 'ocean', 'gist_stern',
'brg', 'CMRmap', 'cubehelix',
'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow',
'gist_rainbow', 'hsv', 'flag', 'prism'])]
"""
Explanation: Check below an interactive version of the 3D plot. May help when there are complicated paths between points. You need to install plotly and colorlover (with pip) if you don't already have them.
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive/08_image_keras/mnist_models.ipynb
|
apache-2.0
|
import os
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
MODEL_TYPE = "dnn" # "linear", "dnn", "dnn_dropout", or "cnn"
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["MODEL_TYPE"] = MODEL_TYPE
os.environ["TFVERSION"] = "1.13" # Tensorflow version
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
"""
Explanation: MNIST Image Classification with TensorFlow on Cloud ML Engine
This notebook demonstrates how to implement different image models on MNIST using Estimator.
Note the MODEL_TYPE; change it to try out different models
End of explanation
"""
%%bash
rm -rf mnistmodel.tar.gz mnist_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
-- \
--output_dir=${PWD}/mnist_trained \
--train_steps=100 \
--learning_rate=0.01 \
--model=$MODEL_TYPE
"""
Explanation: Run as a Python module
In the previous notebook (mnist_linear.ipynb) we ran our code directly from the notebook.
Now since we want to run our code on Cloud ML Engine, we've packaged it as a python module.
The model.py and task.py containing the model code is in <a href="mnistmodel/trainer">mnistmodel/trainer</a>
Let's first run it locally for a few steps to test the code.
End of explanation
"""
%%bash
OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE}
JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--train_steps=10000 --learning_rate=0.01 --train_batch_size=512 \
--model=$MODEL_TYPE --batch_norm
"""
Explanation: Now, let's do it on Cloud ML Engine so we can train on GPU (--scale-tier=BASIC_GPU)
Note the GPU speed up depends on the model type. You'll notice the more complex CNN model trains significantly faster on GPU, however the speed up on the simpler models is not as pronounced.
End of explanation
"""
from google.datalab.ml import TensorBoard
TensorBoard().start("gs://{}/mnist/trained_{}".format(BUCKET, MODEL_TYPE))
for pid in TensorBoard.list()["pid"]:
TensorBoard().stop(pid)
print("Stopped TensorBoard with pid {}".format(pid))
"""
Explanation: Monitoring training with TensorBoard
Use this cell to launch tensorboard
End of explanation
"""
%%bash
MODEL_NAME="mnist"
MODEL_VERSION=${MODEL_TYPE}
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/mnist/trained_${MODEL_TYPE}/export/exporter | tail -1)
echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION ... this will take a few minutes"
#gcloud ml-engine versions delete ${MODEL_VERSION} --model ${MODEL_NAME}
#gcloud ml-engine models delete ${MODEL_NAME}
gcloud ml-engine models create ${MODEL_NAME} --regions $REGION
gcloud ml-engine versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version=$TFVERSION
"""
Explanation: Here's what it looks like with a linear model for 10,000 steps:
<img src="images/eval_linear_10000.png" width="60%"/>
Here are my results:
Model | Accuracy | Time taken | Model description | Run time parameters
--- | :---: | ---
linear | 91.53 | 3 min | | 100 steps, LR=0.01, Batch=512
linear | 92.73 | 8 min | | 1000 steps, LR=0.01, Batch=512
linear | 92.29 | 18 min | | 10000 steps, LR=0.01, Batch=512
dnn | 98.14 | 15 min | 300-100-30 nodes fully connected | 10000 steps, LR=0.01, Batch=512
dnn | 97.99 | 48 min | 300-100-30 nodes fully connected | 100000 steps, LR=0.01, Batch=512
dnn_dropout | 97.84 | 29 min | 300-100-30-DL(0.1)- nodes | 20000 steps, LR=0.01, Batch=512
cnn | 98.97 | 35 min | maxpool(10 5x5 cnn, 2)-maxpool(20 5x5 cnn, 2)-300-DL(0.25) | 20000 steps, LR=0.01, Batch=512
cnn | 98.93 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25) | 20000 steps, LR=0.01, Batch=512
cnn | 99.17 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25), batch_norm (logits only) | 20000 steps, LR=0.01, Batch=512
cnn | 99.27 | 35 min | maxpool(10 11x11 cnn, 2)-maxpool(20 3x3 cnn, 2)-300-DL(0.25), batch_norm (logits, deep) | 10000 steps, LR=0.01, Batch=512
cnn | 99.48 | 12 hr | as-above but nfil1=20, nfil2=27, dprob=0.1, lr=0.001, batchsize=233 | (hyperparameter optimization)
Deploying and predicting with model
Deploy the model:
End of explanation
"""
import json, codecs
import matplotlib.pyplot as plt
import tensorflow as tf
HEIGHT = 28
WIDTH = 28
# Get mnist data
mnist = tf.keras.datasets.mnist
(_, _), (x_test, _) = mnist.load_data()
# Scale our features between 0 and 1
x_test = x_test / 255.0
IMGNO = 5 # CHANGE THIS to get different images
jsondata = {"image": x_test[IMGNO].reshape(HEIGHT, WIDTH).tolist()}
json.dump(jsondata, codecs.open("test.json", 'w', encoding = "utf-8"))
plt.imshow(x_test[IMGNO].reshape(HEIGHT, WIDTH));
"""
Explanation: To predict with the model, let's take one of the example images.
End of explanation
"""
%%bash
gcloud ml-engine predict \
--model=mnist \
--version=${MODEL_TYPE} \
--json-instances=./test.json
"""
Explanation: Send it to the prediction service
End of explanation
"""
trainingInput:
scaleTier: CUSTOM
masterType: complex_model_m_gpu
hyperparameters:
goal: MAXIMIZE
maxTrials: 30
maxParallelTrials: 2
hyperparameterMetricTag: accuracy
params:
- parameterName: train_batch_size
type: INTEGER
minValue: 32
maxValue: 512
scaleType: UNIT_LINEAR_SCALE
- parameterName: learning_rate
type: DOUBLE
minValue: 0.001
maxValue: 0.1
scaleType: UNIT_LOG_SCALE
- parameterName: nfil1
type: INTEGER
minValue: 5
maxValue: 20
scaleType: UNIT_LINEAR_SCALE
- parameterName: nfil2
type: INTEGER
minValue: 10
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
- parameterName: dprob
type: DOUBLE
minValue: 0.1
maxValue: 0.6
scaleType: UNIT_LINEAR_SCALE
"""
Explanation: DO NOT RUN anything beyond this point
This shows you what I did, but trying to repeat this will take several hours.
<br/>
Hyperparameter tuning
This is what hyperparam.yaml looked like:
End of explanation
"""
%%bash
OUTDIR=gs://${BUCKET}/mnist/trained_${MODEL_TYPE}_hparam
JOBNAME=mnist_${MODEL_TYPE}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/mnistmodel/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--runtime-version=$TFVERSION \
--config hyperparam.yaml \
-- \
--output_dir=$OUTDIR \
--model=$MODEL_TYPE --batch_norm
"""
Explanation: This takes <b>13 hours and 250 ML Units</b>, so don't try this at home :)
The key thing is here the --config parameter.
End of explanation
"""
|
ucsc-astro/coffee
|
17_02_23_astropy_quantities/astropy_quantities.ipynb
|
gpl-3.0
|
print(type(u.Msun))
u.Msun
"""
Explanation: Astropy quantities
Astropy quantitites are a great way to handle all sorts of messy unit conversions. Careful unit conversions save lives! https://en.wikipedia.org/wiki/Gimli_Glider
The simplest way to create a new quantity object is multiply or divide a number by a Unit instance.
End of explanation
"""
mass = 1 * u.Msun
print(type(mass))
mass
# quantities subclass numpy ndarray, so you can make handle arrays of quantities like you would
# any other array object
mass.__class__.__bases__
# we can convert units to other equivalent units
mass.to(u.kg)
# there are shortcuts for converting to the relevent system, regardless of what type of quantity it is
mass.cgs
mass.si
# we can inspect their unit and their numeric value with that unit
print(mass.value, mass.unit)
# calculations with quantities can produce quantities with new units
average_density = mass / (4 / 3 * np.pi * u.Rearth ** 3)
average_density.cgs
"""
Explanation: A brief word of warning, the pretty printing shown above will take longer than just printing out the values of an array. This is unnoticable in this case, but is evident if you try to print a large array of quantities.
End of explanation
"""
# Newton's constant
c.G
# Planck's constant
c.h
# speed of light
c.c
"""
Explanation: Constants
Physical constants are found in the astropy.constants module, and work just like units.
End of explanation
"""
# made into a quantity array by multiplying numeric array by unit
R = np.linspace(1, 5) * u.Rearth
v = np.sqrt(2 * c.G * u.Msun / R)
print(v)
v = v.to(u.km / u.s)
print(v)
plt.plot(R, v)
plt.xlabel(r"Radius [R$_\oplus$]")
plt.ylabel(r"Escape velocity [km s$^{-1}$]")
"""
Explanation: Calculations with quantity arrays
End of explanation
"""
obscure_quantity = 42 * c.G * c.m_e ** 2 / c.k_B ** 2 * c.c ** 3 * (5700 * u.K) ** -2 * u.Msun / u.Mpc
obscure_quantity
# what the heck is a m^6 kg Msun / (Mpc J^2 s^5)??
obscure_quantity.decompose()
# will fail!
obscure_quantity.to(u.m)
# addition works for like units
(1 * u.m) + (1 * u.cm)
# and fails for the wrong dimensions
(1 * u.m) + (1 * u.s)
"""
Explanation: Quantities as sanity checks
End of explanation
"""
wavelengths = np.linspace(0.1, 1, 100) * u.micron
# will fail without the correct equivalency passed in!
frequencies = wavelengths.to(u.THz, equivalencies=u.spectral())
plt.plot(wavelengths, frequencies)
plt.xlabel(r"$\lambda$ [$\mu$m]")
plt.ylabel(r"$\nu$ [THz]")
"""
Explanation: Unit equivalencies
Equivalencies allow you to do unit conversions under certain physical assumptions. For instance, it makes sense to talk about converting wavelength to frequency when you are discussing the properties of light waves in a vacuum. See http://docs.astropy.org/en/stable/units/equivalencies.html#unit-equivalencies.
Spectral equivalence
End of explanation
"""
intensity_unit = blackbody_nu(wavelengths[0], temperature=1e3 * u.K).unit
wavelengths = np.logspace(-1, 1, 100) * u.micron
temperatures = np.linspace(5e3, 1e4, 5) * u.K
for T in temperatures:
plt.plot(wavelengths, blackbody_nu(wavelengths, temperature=T),
label='{:.2e}'.format(T))
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel(r'$\lambda$ [$\mu$m]')
plt.ylabel('$I_\\nu$ [{}]'.format(texify(intensity_unit)))
intensity_unit = blackbody_lambda(wavelengths[0], temperature=1e3 * u.K).unit
for T in temperatures:
plt.plot(wavelengths, blackbody_lambda(wavelengths, temperature=T),
label='{:.2e}'.format(T))
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel(r'$\lambda$ [$\mu$m]')
plt.ylabel('$I_\\lambda$ [{}]'.format(texify(intensity_unit)))
T = 1e4 * u.K
solid_angle = ((1 * u.arcsec) ** 2).to(u.sr)
f_nu = blackbody_nu(wavelengths, temperature=T) * solid_angle
f_lambda = blackbody_lambda(wavelengths, temperature=T) * solid_angle
print(f_nu.unit)
print(f_lambda.unit)
# I_nu.to(I_lambda.unit) # would fail
# for conversion of spectral energy density, we need to specify what part of the spectra we're looking at
f_lambda_converted = f_nu.to(f_lambda.unit, equivalencies=u.spectral_density(wavelengths))
print(f_lambda_converted.unit)
# shouldn't raise any exceptions!
assert np.all(np.isclose(f_lambda.value, f_lambda_converted.value))
"""
Explanation: Spectral energy density equivalencies
End of explanation
"""
# e.g., from the example above
np.isclose(f_lambda, f_lambda_converted)
"""
Explanation: Other cool equivalencies
Doppler shifts (for both radio velocities and optical velocities), dimensionless angles, parallax.
Words of warning
Quantity arrays will often break functions that aren't prepared for them. Simple numpy operations still work, but for more complicated routines you'll have to convert to the units you want and then take the underlying array with the quantity.value attribute.
End of explanation
"""
print((1 * u.m) / (2 * u.m), type((1 * u.m) / (2 * u.m)))
"""
Explanation: Even when you think you have a dimensionless array, it can still be a dimensionless quantity.
End of explanation
"""
@u.quantity_input(angle=u.arcsec, distance=u.Mpc)
def angle_to_size(angle, distance):
return angle.to(u.radian).value * distance
# this should work
angle_to_size(1 * u.arcsec, 25 * u.Mpc).to(u.kpc)
# quantity_input only checks for convertability, not that it's the same unit
angle_to_size(1 * u.arcmin, 25 * u.Mpc).to(u.kpc)
# this should raise an error
angle_to_size(1 * u.m, 25 * u.Mpc)
"""
Explanation: Using units in your own code
You can use the decorator quality_input as a clean way of ensuring your functions get the proper input.
End of explanation
"""
coord = SkyCoord(45, 30, unit=u.deg)
# ICRS is the reference frame
coord
# we can transform between coordinate frames
coord.fk4
coord.fk5
coord.galactic
# latitude and longitude are accessed with ra and dec (when in icrs or fk frames)
coord.ra
coord.dec
"""
Explanation: SkyCoord
The SkyCoord class, from astropy.coordinates, is a convenient way of dealing with astronomical coordinate systems.
http://docs.astropy.org/en/stable/coordinates/index.html
End of explanation
"""
print(coord.to_string())
print(coord.to_string('dms'))
print(coord.to_string('hmsdms'))
print(coord.to_string('hmsdms', sep=':'))
print(coord.to_string('hmsdms', sep=' '))
"""
Explanation: The attributes ra and dec are Angles. They are subclasses of Quantity, and so they behave similarly, but have more specific functionality. See http://docs.astropy.org/en/stable/coordinates/angles.html#working-with-angles for more details.
You can get nice string representations of angles for all your inane legacy software requirements.
End of explanation
"""
# need network connection
center_coord = SkyCoord.from_name('M31')
center_coord
# some mock coordinates
n = 500
ra_values = np.random.randn(n) + center_coord.ra.deg
dec_values = np.random.randn(n) + center_coord.dec.deg
coords = SkyCoord(ra_values, dec_values, unit=u.deg)
plt.scatter(coords.ra.deg, coords.dec.deg, s=100,
edgecolor='k', label='Parent sample')
plt.xlim(plt.xlim()[::-1]) # ra increases right to left
plt.xlabel("Right ascension [deg]")
plt.ylabel("Declination [deg]")
# mock measurements
n_sample = 100
astrometric_noise = 1 * u.arcsec
sample_indices = np.random.choice(np.arange(len(coords)), n_sample)
sample_ra = coords[sample_indices].ra.deg
sample_dec = coords[sample_indices].dec.deg
angles = 2 * np.pi * np.random.rand(n_sample)
dr = astrometric_noise.to(u.deg).value * np.random.randn(n_sample)
dx = np.cos(angles) * dr - np.sin(angles) * dr
dy = np.sin(angles) * dr + np.cos(angles) * dr
sample_coords = SkyCoord(sample_ra + dx, sample_dec + dy, unit=u.deg)
plt.scatter(coords.ra.deg, coords.dec.deg, s=100,
edgecolor='k', marker='o', alpha=0.8, label='Parent sample')
plt.scatter(sample_coords.ra.deg, sample_coords.dec.deg, s=100,
edgecolor='k', marker='v', alpha=0.8, label='Child sample')
plt.xlim(plt.xlim()[::-1]) # ra increases right to left
plt.xlabel("Right ascension [deg]")
plt.ylabel("Declination [deg]")
plt.legend(bbox_to_anchor=(1, 1))
# match_to_catalog_sky will return indices into coords of the closest matching objects,
# the angular separation, and the physical distance (ignored here)
idx, sep, dist = sample_coords.match_to_catalog_sky(coords)
ideal_sep = astrometric_noise.to(u.deg) * np.random.randn(int(1e6))
plt.hist(np.abs(ideal_sep.to(u.arcsec)), histtype='step', lw=4, bins='auto', normed=True, label='Ideal')
plt.hist(sep.arcsec, histtype='step', lw=4, bins='auto', normed=True, label='Data')
plt.xlim(0, 3)
plt.xlabel("Separation [arcsec]")
plt.legend(loc='best')
plt.scatter(coords[idx].ra.deg, coords[idx].dec.deg, s=100,
edgecolor='k', marker='o', alpha=0.8, label='Matched parent sample')
plt.scatter(sample_coords.ra.deg, sample_coords.dec.deg, s=100,
edgecolor='k', marker='v', alpha=0.8, label='Child sample')
plt.xlim(plt.xlim()[::-1]) # ra increases right to left
plt.xlabel("Right ascension [deg]")
plt.ylabel("Declination [deg]")
plt.legend(bbox_to_anchor=(1, 1))
"""
Explanation: Matching coordinates
There are lots of specific use cases outlined here, but let's go over a simple catalog matching exercise.
End of explanation
"""
|
rubensfernando/mba-analytics-big-data
|
Python/2016-08-05/aula6-parte5-recuperar.ipynb
|
mit
|
import facebook
import simplejson as json
import requests
"""
Explanation: Recuperar posts
Segundo a Graph API v2.3, podemos recuperar:
/{user-id}/home - Retorna o fluxo de todos os posts criados pelo usuário e seus amigos. O que normalmente se encontra no Feed de Noticia.
/{user-id}/feed – inclui tudo que você vê no seu perfil (links compartilhados, checkins, fotos, atualização de status, além de incluir posts criados por amigos no perfil do usuário.
/{user-id}/statuses – Retorna apenas a atualização de status postado pelo usuário em seu próprio perfil
/{user-id}/posts – retorna os posts criados pelo usuário no seu próprio mural ou nos dos amigos, e pode incluir qualquer conteúdo como links compartilhados, checkins, fotos e atualização de status.
End of explanation
"""
req = requests.get('http://python.org')
req.status_code # Se o código for 200, a requisição foi realizada.
#req.text
'Python' in req.text
req.close()
"""
Explanation: O módulo requests é utilizado para fazer requisições HTTP, ele será útil para que possamos requisitar novas páginas com conteúdo do Facebook.
Ele funciona da seguinte maneira:
End of explanation
"""
import facebook
access_token = 'EAACEdEose0cBAAFGsk2U0Jo1Kn9GZCWuXoMwflMusq2ajIqwcF3VU9vEp2M8y5ZB3stHJ5AAShfQvL72JYV8zGQXGvN5996EXDn9FV2DJvS8FTflaak0UuC6ZAm5HPjI2gsLRVEzdYlftGL93ZBWNVJuWzRnENYY7a3dpALP0AZDZD'
api = facebook.GraphAPI(access_token, version='2.3')
noticias = api.get_object('me/feed')
#print(json.dumps(noticias, indent=4))
for item in range(0, len(noticias['data'])):
try:
print(item, '--->', noticias['data'][item]['story'])
except:
pass
noticias = api.get_object('me/feed')
while True:
try:
for item in range(0, len(noticias['data'])):
try:
print(item, '--->', noticias['data'][item]['story'])
except:
pass
noticias = requests.get(noticias['paging']['next']).json()
except Exception as e:
print(e)
break
"""
Explanation: 'me/feed'
Inclui tudo que você vê no seu perfil (links compartilhados, checkins, fotos, atualização de status, além de incluir posts criados por amigos no perfil do usuário.
End of explanation
"""
feed_noticias = api.get_object('me/home')
len(feed_noticias['data'])
for item in range(0, len(feed_noticias['data'])):
try:
print(item, '---->', feed_noticias['data'][item]['name'])
except:
pass
feed_noticias['data'][1].keys()
feed_noticias['data'][1]['type']
feed_noticias['data'][1]['name']
#feed_noticias['data'][1]['application']
feed_noticias['data'][1]['updated_time']
feed_noticias['data'][1]['created_time']
# feed_noticias['data'][1]['comments']
feed_noticias['data'][1]['likes']
"""
Explanation: Exercicio 2 - Altere o programa da aula6-parte5-recuperar ('me/feed') e imprima, além do histórico, o nome, tipo e a hora da criação.
- story
- name
- type
- created_time
'me/home'
Retorna o fluxo de todos os posts criados pelo usuário e seus amigos. O que normalmente se encontra no Feed de Noticia.
End of explanation
"""
feed_noticias['paging']
"""
Explanation: Note que só retornamos 25 resultados, porém nosso feed de notícias tem muita informação ainda para ser recuperada!
Não temos um parâmetro específico que podemos informar quantos items queremos, portanto temos que criar mecanismos para parar a captura dos dados.
End of explanation
"""
access_token = 'EAACEdEose0cBANbSsjf7TIxMNSvoGZCOZCa2Nbfjso2ZAQwKCrmotS5wEXPZCNvHx7zxqLU30Mn5J9cJdtTDJtALuSfn4mSSmCuASZAPTQy2DE3LSeboUzfZCzTIvJPrcR1E4ScwKyDAPN2mOPx1hNwfKPyYdikzGc32tauaKGMQZDZD'
amigos = api.get_connections("me", "friends")
todos_amigos = []
while True:
try:
for amigo in amigos['data']:
todos_amigos.append(amigo['name'])
amigos = requests.get(amigos['paging']['next']).json()
except KeyError:
break
print(todos_amigos)
"""
Explanation: Da mesma forma que foi feito anteriormente, podemos requisitar a próxima página até que atinja a quantidade desejada.
<p style="color: red">É importante notar que no meu caso, tenho poucas conexões, desta forma a quantidade de dados é bem inferior se comparado ao facebook de uma pessoa que usa ativamente!!!!</p>
Bônus - Lista de Amigos
Também podemos recuperar as conexões, como por exemplo, uma lista de amigos.
End of explanation
"""
|
inakic/matsoft
|
Sympy.ipynb
|
unlicense
|
from sympy import *
"""
Explanation: Sympy
Sympy je Python biblioteka za simboličku matematiku. Prednost Sympy-ja je što je potpuno napisan u Pythonu (što je katkad i mana). Mi ćemo u nastavku kolegiju obraditi i puno moćniji Sage, koji je CAS u klasi Mathematice i Maplea. No Sage nije biblioteka u Pythonu, već CAS koji koristi Python kao programski jezik.
Korištenje Sympy-ja počinje kao i kod ostalih biblioteka, s importiranjem.
End of explanation
"""
from sympy import init_printing
init_printing()
"""
Explanation: Da bi dobili lijepi $\LaTeX$ izlaz:
End of explanation
"""
from IPython.display import display
from ipywidgets import interact, fixed, interact_manual
import ipywidgets as widgets
"""
Explanation: Koristit ćemo i interaktivne widgete, pa ih ovdje učitavamo
End of explanation
"""
x = Symbol('x')
# ili x,y,z = symbols('x,y,z')
# ili from sympy.abc import x,y,z
# ili var(x:z)
(pi + x)**2
a, b, c = symbols("stranica_a, stranica_b, stranica_c")
type(a)
a
a, b, c = symbols("alpha, beta, gamma")
a**2+b**2+c**2
symbols("x:5")
"""
Explanation: Simboličke varijable
Kako je Sympy samo Python paket, trebamo deklarirati koje simbole ćemo koristiti kao simboličke vatrijable. To možemo napraviti na više načina:
End of explanation
"""
x = Symbol('x', real=True)
x.is_imaginary
x = Symbol('x', positive=True)
x > 0
"""
Explanation: Možemo navoditi i dodatne pretpostavke:
End of explanation
"""
f = Function('f')
f(0)
g = Function('g')(x)
g.diff(x), g.diff(a)
"""
Explanation: Možemo kreirati i apstraktne funkcije:
End of explanation
"""
1+1*I
I**2
(x * I + 1)**2
"""
Explanation: Kompleksni brojevi
Imaginarna jedinica se označava s I.
End of explanation
"""
r1 = Rational(4,5)
r2 = Rational(5,4)
r1
r1+r2
r1/r2
denom(r1)
"""
Explanation: Razlomci
Postoje tri numerička tipa: Real, Rational, Integer:
End of explanation
"""
pi.evalf(n=50)
y = (x + pi)**2
N(y, 5)
"""
Explanation: Numerička evaluacija
SymPy može računati u proizvoljnoj točnosti te ima predefinirane matematičke konstante kao: pi, e te oo za beskonačnost.
Funkcija evalf ili metoda N s ulaznom varijablom n računaju izraz na n decimala.
End of explanation
"""
y.subs(x, 1.5)
N(y.subs(x, 1.5))
"""
Explanation: Ukoliko želimo zamijeniti varijablu s konkretnim brojem, to možemo učiniti koristeći funkciju subs:
End of explanation
"""
y.subs(x, a+pi)
"""
Explanation: No subs možemo korisiti i općenitije:
End of explanation
"""
import numpy
x_vec = numpy.arange(0, 10, 0.1)
y_vec = numpy.array([N(((x + pi)**2).subs(x, xx)) for xx in x_vec])
from matplotlib.pyplot import subplots
%matplotlib inline
fig, ax = subplots()
ax.plot(x_vec, y_vec);
"""
Explanation: Sympy i Numpy se mogu simultano koristiti:
End of explanation
"""
# prvi argument je lista varijabli funkcije f, u ovom slučaju funckcija je x -> f(x)
f = lambdify([x], (x + pi)**2, 'numpy')
y_vec = f(x_vec)
"""
Explanation: Efikasniji kod se postiže funkcijom lambdify koja kompajlira Sympy izraz u funkciju:
End of explanation
"""
%%timeit
y_vec = numpy.array([N(((x + pi)**2).subs(x, xx)) for xx in x_vec])
%%timeit
y_vec = f(x_vec)
"""
Explanation: Razlika u brzini izvođenja:
End of explanation
"""
string = '1/(x-1) + 1/(x+1) + x + 1'
izraz = sympify(string)
izraz
"""
Explanation: Ovdje smo mogli koristiti i theano ili uFuncify.
Pretvaranje stringa u Sympy izraz:
End of explanation
"""
x = Symbol('x')
def factorit(n):
return display(Eq(x ** n - 1, factor(x ** n - 1)))
"""
Explanation: Jedan interaktivan primjer:
End of explanation
"""
factorit(18)
interact(factorit,n=(2,20));
interact(factorit,n=(1,20,2));
interact(factorit,n=widgets.widget_int.IntSlider(min=2,max=20,step=1,value=2));
"""
Explanation: Eq kreira matematičke jednakosti, tj. jednadžbe.
End of explanation
"""
together(izraz)
cancel(together(izraz))
(x+1)*(x+2)*(x+3)
expand((x+1)*(x+2)*(x+3))
"""
Explanation: Algebarske manipulacije
End of explanation
"""
sin(a+b)
expand(sin(a+b), trig=True)
simplify(sin(a)**2 + cos(a)**2)
simplify(cos(x)/sin(x))
f1 = 1/((a+1)*(a+2))
apart(f1)
f2 = 1/(a+2) + 1/(a+3)
together(f2)
"""
Explanation: expand prima dodatne argumente. Npr. trig=True:
End of explanation
"""
y
diff(y**2, x)
"""
Explanation: Analiza
Deriviranje
End of explanation
"""
diff(y**2, x, x)
diff(y**2, x, 2)
from sympy.abc import x,y,z
# ili npr. symbols ('x:z')
f = sin(x*y) + cos(y*z)
"""
Explanation: Više derivacije:
End of explanation
"""
diff(f, x, 1, y, 2)
def deriv(f):
display(diff(f,x))
interact_manual(deriv, f='x');
"""
Explanation: Želimo izračunati $$\frac{\partial^3f}{\partial x \partial y^2}$$
End of explanation
"""
f
integrate(f, x)
"""
Explanation: Integracija
End of explanation
"""
integrate(f, (x, -1, 1))
"""
Explanation: Definitni integrali:
End of explanation
"""
integrate(exp(-x**2), (x, -oo, oo))
"""
Explanation: Nepravi integrali:
End of explanation
"""
n = Symbol("n")
Sum(1/n**2, (n, 1, 10))
Sum(1/n**2, (n,1, 10)).evalf()
Sum(1/n**2, (n, 1, oo)).evalf()
Product(n, (n, 1, 10))
"""
Explanation: Sume i produkti
End of explanation
"""
limit(sin(x)/x, x, 0)
f
diff(f, x)
"""
Explanation: Limesi
End of explanation
"""
h = Symbol("h")
limit((f.subs(x, x+h) - f)/h, h, 0)
limit(1/x, x, 0, dir="+")
limit(1/x, x, 0, dir="-")
"""
Explanation: $$ \frac{\partial f(x,y)}{\partial x} = \lim_{h\to 0}\frac{f(x+h,y)-f(x,y)}{h}$$
End of explanation
"""
series(exp(x), x)
"""
Explanation: (Taylorovi) redovi
End of explanation
"""
series(exp(x), x, 1)
series(exp(x), x, 1, 10)
tan(x).series(x,pi/2)
s1 = cos(x).series(x, 0, 5)
s1
s2 = sin(x).series(x, 0, 2)
s2
expand(s1 * s2)
"""
Explanation: Rastav oko $x=1$:
End of explanation
"""
expand(s1.removeO() * s2.removeO())
"""
Explanation: S metodom removeO se možemo riješiti $\mathcal{O}$ dijela:
End of explanation
"""
(cos(x)*sin(x)).series(x, 0, 6)
"""
Explanation: Ali oprezno s time:
End of explanation
"""
residue(2/sin(x), x, 0)
"""
Explanation: Reziduumi:
End of explanation
"""
m11, m12, m21, m22 = symbols("m11, m12, m21, m22")
b1, b2 = symbols("b1, b2")
A = Matrix([[m11, m12],[m21, m22]])
A
b = Matrix([[b1], [b2]])
b
A**2
A * b
def funkcija(A,f):
return display(getattr(A,f)())
interact(funkcija,A = fixed(A), f=['det','inv','adjoint','charpoly']);
"""
Explanation: Linearna algebra
Matrice
End of explanation
"""
solve(x**2 - 1, x)
solve(x**4 - x**2 - 1, x)
eq = Eq(x**3 + 2*x**2 + 4*x + 8, 0)
eq
solve(eq, x)
"""
Explanation: Rješavanje jednadžbi
End of explanation
"""
solve([x + y - 1, x - y - 1], [x,y])
solve([x + y - a, x - y - c], [x,y])
"""
Explanation: Sustavi jednadžbi:
End of explanation
"""
from verzije import *
from IPython.display import HTML
HTML(print_sysinfo()+info_packages('sympy,matplotlib,IPython,numpy, ipywidgets'))
"""
Explanation: Više o interaktivnim widgetima možete naučiti preko primjera koji se nalaze ovdje.
End of explanation
"""
|
antisrdy/el_nino
|
el_nino.ipynb
|
mit
|
def get_mask(X, coords):
return (X.lat <= coords[0]) & (X.lat >= coords[1]) & (X.lon >= coords[2]) & (X.lon <= coords[3])
def get_pacific_data(X, mask):
# Five rectangles to cover pacific from top to bottom
mask1 = (X.lat <= 60) & (X.lat >= 51) & (X.lon >= 140) & (X.lon <= 360 - 165)
mask2 = (X.lat <= 50) & (X.lat >= 26) & (X.lon >= 140) & (X.lon <= 360 - 125)
mask3 = (X.lat <= 25) & (X.lat >= 12) & (X.lon >= 140) & (X.lon <= 360 - 110)
mask4 = (X.lat <= 11) & (X.lat >= -6) & (X.lon >= 140) & (X.lon <= 360 - 90)
mask5 = (X.lat <= -7) & (X.lat >= -70) & (X.lon >= 152) & (X.lon <= 360 - 80)
res = X.copy()
res = res.where(mask, drop=True)
return res
"""
Explanation: Restrict to Pacific data: take a square
Extremal points:
- North: 60, 140 ; 60, 360-107
- West: -70, 140 ; -70, 360-107
End of explanation
"""
X_ds = xr.open_dataset('el_nino_X_public_train.nc')
y_array =np.load('el_nino_y_public_train.npy')
def get_area_mean(tas, lat_bottom, lat_top, lon_left, lon_right):
"""The array of mean temperatures in a region at all time points."""
return tas.loc[:, lat_bottom:lat_top, lon_left:lon_right].mean(dim=('lat','lon'))
def get_mean(tas, coords):
"""The array of mean temperatures in the El Nino 3.4 region at all time points."""
return get_area_mean(tas, coords[0], coords[1], coords[2], coords[3])
enso = get_mean(X_ds['tas'], en)
en=[-5, 5, 190, 240]
up=[25, 35, 360 - 170, 360 - 120]
left=[-5, 5, 120, 170]
bottom=[-35, -25, 360-170, 360-120]
right=[-5, 5, 260, 310]
masks_coords = [up, left, bottom, right]
rectangle=[-15, 15, 165, 265]
X_pacific = X_ds['tas'].loc[:, rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
X_pacific
el_nino_lats = [en_lat_bottom, en_lat_top, en_lat_top, en_lat_bottom]
el_nino_lons = [en_lon_right, en_lon_right, en_lon_left, en_lon_left]
from matplotlib.patches import Polygon
def plot_map(X_ds, time_index):
def draw_screen_poly(lats, lons, m):
x, y = m(lons, lats)
xy = list(zip(x, y))
poly = Polygon(xy, edgecolor='black', fill=False)
plt.gca().add_patch(poly)
lons, lats = np.meshgrid(X_ds['lon'], X_ds['lat'])
fig = plt.figure()
ax = fig.add_axes([0.05, 0.05, 0.9,0.9])
map = Basemap(llcrnrlon=0, llcrnrlat=-89, urcrnrlon=360, urcrnrlat=89, projection='mill')
# draw coastlines, country boundaries, fill continents.
map.drawcoastlines(linewidth=0.25)
#map.drawcountries(linewidth=0.25)
#map.fillcontinents(color='coral',lake_color='aqua')
# draw the edge of the map projection region (the projection limb)
#map.drawmapboundary(fill_color='aqua')
im = map.pcolormesh(
lons, lats, X_ds[time_index] - 273.15, shading='flat', cmap=plt.cm.jet, latlon=True)
cb = map.colorbar(im,"bottom", size="5%", pad="2%")
draw_screen_poly(el_nino_lats, el_nino_lons, map)
time_str = str(pd.to_datetime(str(X_ds['time'].values[time_index])))[:7]
ax.set_title("Temperature map " + time_str)
#plt.savefig("test_plot.pdf")
plt.show()
ul_enso = X_ds['tas'].loc[:, 0:5, 360-170:197.5].mean(dim=('lat', 'lon'))
ur_enso = X_ds['tas'].loc[:, 0:5, 197.5:360-120].mean(dim=('lat', 'lon'))
bl_enso = X_ds['tas'].loc[:, -5:0, 360-170:197.5].mean(dim=('lat', 'lon'))
br_enso = X_ds['tas'].loc[:, -5:0, 197.5:360-120].mean(dim=('lat', 'lon'))
np.corrcoef(enso,ul_enso)
plt.plot(enso, ul_enso)
plt.plot(range(295,305), range(295,305))
np.corrcoef(enso, ur_enso)
plt.plot(enso, ur_enso)
plt.plot(range(295,305), range(295,305))
np.corrcoef(enso, bl_enso)
plt.plot(enso, bl_enso)
plt.plot(range(295,305), range(295,305))
np.corrcoef(enso, br_enso)
plt.plot(enso, br_enso)
plt.plot(range(295,305), range(295,305))
mask = (get_mask(X_ds, en) | get_mask(X_ds, up) | get_mask(X_ds, left) | get_mask(X_ds, bottom) | get_mask(X_ds, right))
X_pacific = X_ds.where(mask, drop=True)
X_pacific = X_ds.where(get_mask(X_ds, en), drop=True)
up_mean = get_mean(X_ds['tas'], up)
left_mean = get_mean(X_ds['tas'], left)
bottom_mean = get_mean(X_ds['tas'], bottom)
right_mean = get_mean(X_ds['tas'], right)
def plotMonthCorrelation(who, mos):
who_grouped = who.groupby('time.month')
for emonth, egrouped in enso.groupby('time.month'):
for month, grouped in who_grouped:
if emonth==month and month in mos: plt.plot(egrouped, grouped)
plotMonthCorrelation(right_mean, [7,8,9])
plt.plot(range(295,305), range(295,305))
np.corrcoef(enso, bottom_mean)
np.corrcoef(enso, right_mean)
right_mean.plot()
enso.plot()
enso.plot()
"""
Explanation: Load data
End of explanation
"""
X_pacific = get_pacific_data(X_ds)
X_pacific
X_pacific.lat
X_pacific.lon
t = 500
plot_map(X_pacific.fillna(273.15), t)
X_pacific['tas'].loc[:,60,:]
"""
Explanation: Pacific data
Loading
End of explanation
"""
X_pacific['tas'].loc[:, -5, 192.5].plot()
X_pacific['tas'].loc[:, 5, 192.5].plot()
np.corrcoef(X_pacific['tas'].loc[:, -5, 192.5].values,
X_pacific['tas'].loc[:, 5, 192.5].values)
X_pacific['tas'].loc[:, -5, 242.5].plot()
X_pacific['tas'].loc[:, 5, 242.5].plot()
np.corrcoef(X_pacific['tas'].loc[:, -5, 242.5].values,
X_pacific['tas'].loc[:, 5, 242.5].values)
"""
Explanation: Description
El nino zone
End of explanation
"""
np.corrcoef(X_pacific['tas'].loc[:, -5, 242.5].values,
X_pacific['tas'].loc[:, 0, 212.5].values)
np.corrcoef(X_pacific['tas'].loc[:, 5, 242.5].values,
X_pacific['tas'].loc[:, 0, 212.5].values)
np.corrcoef(X_pacific['tas'].loc[:, 5, 192.5].values,
X_pacific['tas'].loc[:, 0, 212.5].values)
np.corrcoef(X_pacific['tas'].loc[:, -5, 192.5].values,
X_pacific['tas'].loc[:, 0, 212.5].values)
#X_pacific['tas'].loc[:, 5, 192.5].plot()
np.corrcoef(X_pacific['tas'].loc[:, 50, 192.5].values,
X_pacific['tas'].loc[:, -45, 192.5].values)
start=np.datetime64('1752-02')
end=np.datetime64('1753-12')
X_pacific['tas'].loc[start:end, 0, 212.5].plot()
X_pacific['tas'].loc[start:end, 50, 192.5].plot()
X_pacific['tas'].loc[start:end, -45, 192.5].plot()
xcoords = [np.datetime64('1752-09-15'),
np.datetime64('1753-02-15'),
np.datetime64('1753-08-15')]
for xc in xcoords:
plt.axvline(x=xc)
start=np.datetime64('1750-02')
end=np.datetime64('1760-12')
X_pacific['tas'].loc[start:end, 0, 212.5].plot()
class FeatureExtractor(object):
def __init__(self):
pass
def transform(self, X_ds):
"""Compute the El Nino mean at time t - (12 - X_ds.n_lookahead),
corresponding the month to be predicted."""
# This is the range for which features should be provided. Strip
# the burn-in from the beginning.
valid_range = np.arange(X_ds.n_burn_in, len(X_ds['time']))
enso = get_enso_mean(X_ds['tas'])
# Roll the input series back so it corresponds to the month to be
# predicted
enso_rolled = np.roll(enso, 12 - X_ds.n_lookahead)
# Strip burn in.
enso_valid = enso_rolled[valid_range]
# Reshape into a matrix of one column
X_array = enso_valid.reshape((-1, 1))
return X_array
en_lat_bottom = -5
en_lat_top = 5
en_lon_left = 360 - 170
en_lon_right = 360 - 120
class Physical_point:
def __init__(self, lat, lon):
self.lat = lat
self.lon = lon
pacific_points = [#Physical_point(60,142.5),
#Physical_point(55,152.5),
Physical_point(50,197.5),
Physical_point(50,142.5),
Physical_point(50,237.5),
Physical_point(35,197.5),
Physical_point(25,142.5),
Physical_point(25,237.5),
Physical_point(10,142.5)]#,
#Physical_point(10, 272.5)]
for i in range(-5, -45, -5):
for j in [152.5, 277.5]: # 217.5,
pacific_points.append(Physical_point(i,j))
en_points = [Physical_point(5,192.5),
Physical_point(5, 242.5),
#Physical_point(0, 217.5),
Physical_point(-5, 192.5),
Physical_point(-5, 242.5)]
class Node:
def __init__(self, en_series, pacific_series):
self.en_series = en_series.values
self.pacific_series = pacific_series.values
self.C = []
self.meanC = 0
self.stdC = 0
self.maxC = 0
self.minC = 0
self.W = []
self.maxW = 0
self.minW = 0
def updateMetrics(self, t, lag):
en_current = self.en_series[t-lag+1:t+1]
pp_current = self.pacific_series[t-lag+1:t+1]
C = np.corrcoef(en_current, pp_current)[0,1]
self.C.append(C)
self.meanC = np.mean(self.C)
self.stdC = np.std(self.C)
if C > self.maxC: self.maxC = C
elif C < self.minC: self.minC = C
W = (self.maxC - self.meanC)/self.stdC
if W > self.maxW: self.maxW = W
elif W < self.minW: self.minW = W
return True
def update(self, t, lag):
self.updateMetrics(t, lag)
return True
class Graph:
def __init__(self, X_ds):
"""
Init graph
Every el nino point vs all pacific points
"""
self.nodes = []
self.n_burn_in = X_ds.n_burn_in
for en_point in en_points:
en_lat = en_point.lat
en_lon = en_point.lon
for pacific_point in pacific_points:
pp_lat = pacific_point.lat
pp_lon = pacific_point.lon
self.nodes.append(Node(X_ds['tas'].loc[:, en_lat, en_lon],
X_ds['tas'].loc[:, pp_lat, pp_lon]))
print len(self.nodes)
def burn_in_update(self, t, lag):
for node in self.nodes:
node.update(t, lag)
def update(self, t, lag):
array = []
for node in self.nodes:
node.update(t, lag)
array.append(node.maxC)
array.append(node.minC)
array.append(node.maxW)
array.append(node.minW)
array.append(node.stdC)
array.append(node.meanC)
return array
class FeatureExtractor(object):
def __init__(self, X_ds):
self.graph = Graph(X_ds)
def transform(self, X_ds, lag):
# Burn in update
for t in range(X_ds.n_burn_in):
if t%11 == 0 and t > 0: self.graph.burn_in_update(t, lag)
# Prevision
X_array = []
for t in range(X_ds.n_burn_in, len(X_ds['time'])):
X_array.append(self.graph.update(t, lag))
return np.array(X_array)
fE = FeatureExtractor(X_ds)
X_array = fE.transform(X_ds, 12)
X_array.shape
import xgboost as xgb
xgb.DMatrix(X_array)
import xgboost as xgb
from sklearn.base import BaseEstimator
class Regressor(BaseEstimator):
def __init__(self):
self.clf = xgb.sklearn.XGBRegressor(max_depth=5,
learning_rate=0.1,
n_estimators=100,
silent=True,
objective='reg:linear',
nthread=1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
seed=0,
missing=None)
def fit(self, X, y):
self.clf.fit(X,y)
def predict(self, X):
return self.clf.predict(X)
reg = Regressor()
pd.DatetimeIndex(X_ds['tas'][0].time.values)
int(str(X_ds['tas'][0].time.values).split("-")[1])
int(01)
[1, 1] + [3,4]
from kera
from keras.models import Sequential
"""
Explanation: Temperature looks correlated to the longitude in the el nino zone; surprising...
End of explanation
"""
|
NelisW/ComputationalRadiometry
|
03-Introduction-to-Radiometry.ipynb
|
mpl-2.0
|
from IPython.display import display
from IPython.display import Image
from IPython.display import HTML
"""
Explanation: 3 Brief Introduction to Radiometry
This notebook forms part of a series on computational optical radiometry
The date of this document and module versions used in this document are given at the end of the file.
Feedback is appreciated: neliswillers at gmail dot com.
Overview
End of explanation
"""
display(Image(filename='images/PM236.jpg'))
"""
Explanation: The pyradi toolkit is a Python toolkit to perform optical and infrared computational radiometry (flux flow) calculations.
Radiometry is the measurement and calculation of electromagnetic flux transfer for systems operating in the spectral region ranging from ultraviolet to microwaves. Indeed, these principles can be applied to electromagnetic radiation of any wavelength. This book only considers ray-based radiometry for incoherent radiation fields.
The briefly summarised information in this notebook is taken from my book, see the book for more details.
End of explanation
"""
display(Image(filename='images/radiometry03.png'))
"""
Explanation: Electromagnetic radiation can be modeled as a number of different phenomena: rays, electromagnetic waves, wavefronts, or particles. All of these models are mathematically related. The appropriate model to use depends on the task at hand. Either the electromagnetic wave model(developed by Maxwell) or the particle model (developed by Einstein) are used when most appropriate. The part of the electromagnetic spectrum normally considered in optical radiometry is as follows:
End of explanation
"""
display(Image(filename='images/radiometry01.png'))
display(Image(filename='images/radiometry02.png'))
"""
Explanation: The photon is a massless elementary particle and acts as the energy carrier for the electromagnetic wave.
Photon particles have discrete energy quanta proportional to the frequency of the electromagnetic energy, $Q = h\nu = hc/\lambda$, where $h$ is Planck's constant.
Definitions
The following figure (expanded from Pinson) and table defines the key radiometry units. The difference operator '$d$' is used to denote 'a small quantity of ...'. This 'small quantity' of one variable is almost always related to a 'small quantity' of another variable in some physical dependency. For example, irradiance is defined as $E=d\Phi/dA$, which means that a small amount of flux $d\Phi$ impinges on a small area $dA$, resulting in an irradiance of $E$. 'Small' is defined as the extent or domain over which the quantity, or any of its dependent quantities, does not vary significantly. Because any finite-sized quantity varies over a finite-sized domain, the $d$ operation is only valid over an infinitely small domain $dA=\lim_{\Delta A \to 0}\Delta A$. The difference operator, written in the form of a differential such as $E=d\Phi/dA$, is not primarily meant to mean differentiation in the mathematical sense. Rather, it is used to indicate something that can be integrated (or summed).
In practice, it is impossible to consider infinitely many, infinitely small domains. Following the reductionist approach, any real system can, however, be assembled as the sum of a set of these small domains, by integration over the physical domain as in $A=\int dA$. Hence, the 'small-quantity' approach proves very useful to describe and understand the problem, whereas the real-world solution can be obtained as the sum of a set of such small quantities. In almost all of the cases in this notebook, it is implied that such 'small-quantity' domains will be integrated (or summed) over the (larger) problem domain.
Photon rates are measured in quanta per second.
The 'second' is an SI unit, whereas quanta is a unitless count: the number of photons. Photon rate therefore has units of [1/s] or [s$^{-1}$]. This form tends to lose track of the fact that the number of quanta per second is described. The notebook may occasionally contain units of the form [q/s] to emphasize the photon count. In this case, the 'q' is not a formal unit, it is merely a reminder of 'counts.' In dimensional analysis the 'q' is handled the same as any other unit.
Radiometric quantities can be defined in terms of three different but related units: radiant power (watts), photon rates (quanta per second), or photometric luminosity (lumen). Photometry is radiometry applied to human visual perception.
The conversion from radiometric to photometric quantities is
covered in more detail in my book. It is important to realize
that the underlying concepts are the same, irrespective of the nature of
the quantity. All of the derivations and examples presented in this book are equally valid for radiant, photon, or photometric quantities.
Flux is the amount of optical power, a photon rate, or photometric luminous flux, flowing between two surfaces. There is always a source area and a receiving area, with the flux flowing between them. All quantities of flux are denoted by the symbol $\Phi$. The units are [W], [q/s], or [lm], depending on the nature of the quantity.
Irradiance (areance) is the areal density of flux on the receiving surface area. The flux flows inward onto the surface with no regard to incoming angular density. All quantities of irradiance are denoted by the symbol $E$. The units are [W/m$^2$], [q/(s$\cdot$m$^2$)], or [lm/m$^2$], depending on the nature of the quantity.
Exitance (areance)
is the areal density of flux on the source surface
area. The flux flows outward from the surface with no regard to angular density. The exitance leaving a surface
can be due to reflected light, transmitted light, emitted light, or any combination thereof. All quantities of exitance are denoted by the
symbol $M$. The units are [W/m$^2$], [q/(s$\cdot$m$^2$)], or [lm/m$^2$], depending on the
nature of the quantity.
Intensity (pointance) is the density of flux over solid angle. The flux flows outward from the source with no regard for surface area. Intensity is denoted by the symbol $I$. The human perception of a point source (e.g., a star at long range) 'brightness' is an intensity measurement. The units are [W/sr], [q/(s$\cdot$sr)], or [lm/sr], depending on the nature of the quantity.
Radiance (sterance) is the density of flux per unit source surface area and unit solid angle.
Radiance is a property of the electromagnetic field irrespective of spatial location (in a lossless medium). For a radiating surface, the radiance may comprise transmitted light, reflected light, emitted light, or any combination thereof. The radiance in a field created by a Lambertian source is conserved: the radiance is constant anywhere in space, also on the receiving surface. All radiance quantities are denoted by the symbol $L$. The human perception of 'brightness' of a large surface can be likened to a radiance experience (beware of the nonlinear response in the eye, however). The units are
[W/(m$^2$ $\cdot$sr)], [q/(s$\cdot$m$^2$ $\cdot$sr)], or [lm/(m$^2$ $\cdot$sr)], depending on the nature of the
quantity.
End of explanation
"""
display(Image(filename='images/radiometry04.png'))
"""
Explanation: Spectral quantities
See notebook 4 in this series, Introduction to computational radiometry with pyradi, for a detailed description of spectral quantities.
Three spectral domains are commonly used: wavelength $\lambda$ in [m], frequency $\nu$ in [Hz], and wavenumber $\tilde{\nu}$ in [cm$^{-1}$] (the number of waves that will fit into a 1-cm length).
Spectral quantities indicate an amount of the quantity within a small spectral width $d\lambda$ around the value of $\lambda$: it is a spectral density. Spectral density quantity symbols are subscripted with a $\lambda$ or $\nu$, i.e., $L_\lambda$ or $L_\nu$. The dimensional units of a spectral density quantity are indicated as [$\mu$m$^{-1}$] or [(cm$^{-1})^{-1}$], i.e., [W/(m$^2$ $\cdot$sr$\cdot$ $\mu$m)].
The relationship between the wavelength and wavenumber spectral domains is $\tilde{\nu}=10^4/\lambda$ , where $\lambda$ is in units of $\mu$m. The conversion of a spectral density quantity such as [W/(m$^2$ $\cdot$sr$\cdot$cm$^{-1}$)] requires the derivative, %$d{\tilde{\nu}}=-\frac{10^4}{\lambda^2}d\lambda=-\frac{{\tilde{\nu}}^2}{10^4}d\lambda$.
$d{\tilde{\nu}}=-10^4d\lambda /\lambda^2=-\tilde{\nu}^2d\lambda/10^4$.
The derivative relationship converts between the spectral widths, and hence the spectral densities, in the two respective domains.
The conversion from a wavelength spectral density quantity to a wavenumber spectral density quantity is
$d{}L_{\tilde{\nu}}=d{}L_\lambda \lambda^2/10^4=d{} L_\lambda 10^4/\tilde{\nu}^2$.
Spectral quantities denote the amount in a small spectral width $d\lambda$ around a wavelength $\lambda$. It follows that the total quantity over a spectral range can be determined by integration (summation) over the spectral range of interest:
$$
L=\int_{\lambda_1}^{\lambda_2}L_\lambda d\lambda.
$$
The above integral satisfies the requirements of dimensional analysis (see my book) because the units of $L_\lambda$ are [W/(m$^2$ $\cdot$sr$\cdot$ $\mu$m)], whereas $d\lambda$ has the units of [$\mu$m], and $L$ has units of [W/(m$^2$ $\cdot$sr)].
Solid Angle
The geometric solid angle $\omega$ of any arbitrary surface $P$ from the reference point is given by
$$
\omega=\int!!!!\int^{P} \frac{d^2 P \cos\theta_1}{R^2},
$$
where $d^2 P \cos\theta_1$ is the projected surface area of the surface $P$ in the direction of the reference point, and $R$ is the distance from $d^2 P$ to the reference point. The integral is independent of the viewing direction $(\theta_0, \alpha_0)$ from the reference point. Hence, a given area at a given distance will always have the same geometric solid angle irrespective of the direction of the area.
The geometric solid angle of a cone is $\omega=4\pi\sin^2\left(\frac{\Theta}{2}\right)$, where $\Theta$ is the cone half-apex angle.
The projected solid angle $\Omega$ of any arbitrary surface $P$ from the reference area $dA_0$ is given by
$$
\Omega=\int!!!!\int^{P} \frac{d^2 P \cos\theta_0 \cos\theta_1}{R^2},
$$
where $d^2 P \cos\theta_1$ is the projected surface area of the surface $P$ in the direction of the reference area, and $R$ is the distance from $d^2 P$ to the reference area. The integral depends on the viewing direction $(\theta_0, \alpha_0)$ from the reference area, by the projected area ($dA_0\cos\theta_0$) of $dA_0$ in the direction of $d^2 P$.
Hence, a given area at a given distance will always have a different projected solid angle in different directions.
The projected solid angle of a cone is $\omega=\pi\sin^2\left(\Theta\right)$, where $\Theta$ is the cone half-apex angle.
End of explanation
"""
display(Image(filename='images/radiometry05.png'))
"""
Explanation: Lambertian radiators
A Lambertian source is, by definition, one whose radiance is completely independent of viewing angle. Many (but not all) rough and natural surfaces produce radiation whose radiance is approximately independent of the angle of observation. These surfaces generally have a rough texture at microscopic scales. Planck-law blackbody radiators are also Lambertian sources (see my book). Any Lambertian radiator is completely described by its scalar radiance magnitude only, with no angular dependence in radiance.
The relationship between the exitance and radiance for such a Lambertian surface can be easily derived. If the flux radiated from a Lambertian surface $\Phi$ [W] is known, it is a simple matter to calculate the exitance $M=\Phi/A$ [W/m$^2$], where $A$ is the radiating surface area. The exitance of a Lambertian radiator is related to radiance by the projected solid angle of $\pi$ sr, not the geometric solid angle of $2\pi$ sr as one might expect. The details are given in my book.
Conservation of radiance
Radiance is conserved for flux from a Lambertian surface propagation through a lossless optical
medium. Consider the construction below: two elemental areas $dA_0$ and $dA_1$ are separated by a distance $R_{01}$, with the angles between the normal vector of each surface and the line of sight given by $\theta_0$ and $\theta_1$. A total flux of $d^2\Phi$ is flowing through both the surfaces. It can be shown (see my book) that for a Lambertian radiator the radiance in an arbitrary $dA_n$ is the same as the radiance in $dA_1$.
As light propagates through mediums with different refractive indices $n$ such as air, water, glass, etc., the entity called basic radiance, defined by $L/n^2$, is invariant. It can be shown that for light propagating from a medium with refractive index $n_1$ to a medium with refractive index $n_2$, the basic radiance is conserved:
$$
\frac{L_1}{n_1^2}=\frac{L_2}{n_2^2}.
$$
End of explanation
"""
display(Image(filename='images/radiometry06.png'))
"""
Explanation: Flux transfer through lossless and lossy mediums
A lossless medium is defined as a medium with no losses between the source and the receiver, such as a complete vacuum. This implies that no absorption, scattering, or any other attenuating mechanism is present in the medium. For a lossless medium the flux that flow between both $dA_0$ and $dA_1$ is given by
$$
d^2 \Phi= \frac{L_{01}\,d A_0\,\cos\theta_0\, d A_1\,\cos\theta_1}{R_{01}^2}.
$$
If the medium has loss, the loss effect is accounted for by including a 'transmittance' factor $\tau_{01}=\Phi_1/\Phi_0=L_{10}/L_{01}$, i.e., the fraction of the flux from $A_0$ that arrives at $A_1$, then
$$
d^2 \Phi= \frac{L_{01}\,d A_0\,\cos\theta_0\, d A_1\,\cos\theta_1 \tau_{01}}{R_{01}^2}.
$$
Sources and receivers of arbitrary shape
The above equation calculates the flux flowing between two infinitely small areas. The flux flowing between two arbitrary shapes can be calculated by integrating the equation over the source surface and the receiving surface. In the general case, the radiance $L$ cannot be assumed constant over $A_0$, introducing the spatial radiance distribution $L(dA_{0})$ as a factor into the spatial integral.
Likewise, the medium transmittance between any two areas $dA_{0}$ and $dA_{1}$ varies with the spatial locations of $dA_{0}$ and $dA_{1}$ --- hence $\tau_{01}(dA_{0},dA_{1})$ should also be included in the spatial integral.
The integral can be performed over any arbitrary shape, as shown in the following figure, supporting the solution with complex geometries. Clearly matters such as obscuration and occlusion should be considered when performing this integral:
$$
\Phi=\int_{A_0}\int_{A_1}
\frac{L(dA_{0})\,dA_0\,\cos\theta_0\, dA_1\,\cos\theta_1\,\tau_{01}(dA_{0},dA_{1})}{R_{01}^2}.
$$
End of explanation
"""
display(Image(filename='images/radiometry07.png'))
"""
Explanation: Multi-spectral flux transfer
The optical power leaving a source undergoes a succession of scaling or 'spectral filtering' processes as the flux propagates through the system, as shown below. This filtering varies with wavelength.
Examples of such filters are source emissivity, atmospheric transmittance, optical filter transmittance, and detector responsivity. The multi-spectral filter approach described here is conceptually simple but fundamental to the calculation of radiometric flux.
End of explanation
"""
try:
import pyradi.ryutils as ryutils
print(ryutils.VersionInformation('matplotlib,numpy,pyradi,scipy,pandas'))
except:
print("pyradi.ryutils not found")
"""
Explanation: Extend the above flux-transfer equation for multi-spectral calculations by noting that over a spectral width $d\lambda$ the radiance is given by $L = L_\lambda d\lambda$:
$$
d^3 \Phi_\lambda=
\frac{L_{01\lambda}\,dA_0\;\cos\theta_0\,dA_1\;\cos\theta_1
\;\tau_{01}\,d\lambda}{R_{01}^2},
$$
where $d^3\Phi_\lambda$ is the total flux in [W] or [q/s] flowing in a spectral width $d\lambda$ at wavelength $\lambda$, from a radiator with radiance $L_{0\lambda}$ with units [W/(m$^2$ $\cdot$sr$\cdot$ $\mu$m)] and projected surface area $dA_0\cos\theta_0$, through a receiver with projected surface area $dA_1\cos\theta_1$ at a distance $R_{01}$, with a transmittance of $\tau_{01}$ between the two surfaces. The transmittance $\tau_{01}$ now includes all of the spectral variables in the path between the source and the receiver.
To determine the total flux flowing from elemental area $dA_0$ through $dA_1$ over a wide spectral width, divide the wide spectral band into a large number $N$ of narrow widths $\Delta\lambda$ at wavelengths $\lambda_n$ and add the flux for all of these narrow bandwidths together as follows:
$$
d^2 \Phi=
\sum_{n=0}^{N}
\left(
\frac{L_{01\lambda_n}
\,dA_{0}\,\cos\theta_0\,
\,dA_{1}\,\cos\theta_1\,
\tau_{01\lambda_n}
\Delta\lambda}{R_{01}^2}
\right).
$$
By the Riemann--Stieltjes theorem in reverse, if now $\Delta\lambda\rightarrow 0$ and $N\rightarrow\infty$, the summation becomes the integral
$$
d^2 \Phi=
\int_{\lambda_1}^{\lambda_2}
\frac{L_{01\lambda}
\,dA_{0}\,\cos\theta_0\,
\,dA_{1}\,\cos\theta_1 \,\tau_{01\lambda}d\lambda}{R_{01}^2}\ .
$$
This equation describes the total flux at all wavelengths in the spectral range $\lambda_1$ to $\lambda_2$ passing
through the system. This equation is developed further in my book.
Conclusion
The flux transfer between any two arbitrary surfaces, over any spectral band can be calculated by
$$
\Phi=
\int_{A_0}
\int_{A_1}
\int_{\lambda_1}^{\lambda_2}
\frac{L_{01\lambda}
\,dA_{0}\,\cos\theta_0\,
\,dA_{1}\,\cos\theta_1 \,\tau_{01\lambda}d\lambda}{R_{01}^2}\ .
$$
In practice these integrals are performed by finite sums of small elemental areas and spectral widths.
Any arbitrary problem can be solved using this approach. For a simple example see the flame sensor and the other pages of this notebook series.
Python and module versions, and dates
End of explanation
"""
|
AEW2015/PYNQ_PR_Overlay
|
Pynq-Z1/notebooks/Video_PR/RGB_Filter.ipynb
|
bsd-3-clause
|
from pynq.drivers.video import HDMI
from pynq import Bitstream_Part
from pynq.board import Register
from pynq import Overlay
Overlay("demo.bit").download()
"""
Explanation: Don't forget to delete the hdmi_out and hdmi_in when finished
RGB Filter Example
In this notebook, we will explore the colors that are used to create an image. Although humans are able to see a multitude of colors, our eyes are technically only capable of detecting red, green, and blue. Every other color that we see is a composition of these three primary colors. Black is the absense of any color while white is the combination of all colors.
<img src="data/AdditiveColor.png"/>
This diagram shows how colors are added together to create new colors.
This notebook will use a video filter that will allow for the addition and removal of colors from a live video feed. This will help show how colors are formed from the three primary colors: red, green, and blue.
1. Download base overlay to the board
Ensure that the camera is not connected to the board. Run the following script to provide the PYNQ with its base overlay.
End of explanation
"""
hdmi_in = HDMI('in')
hdmi_out = HDMI('out', frame_list=hdmi_in.frame_list)
hdmi_out.mode(2)
hdmi_out.start()
hdmi_in.start()
"""
Explanation: 2. Connect camera
Physically connect the camera to the HDMI-in port of the PYNQ. Run the following code to instruct the PYNQ to capture the video from the camera and to begin streaming video to your monitor (connected to the HDMI-out port).
End of explanation
"""
Bitstream_Part("rgb_p.bit").download()
"""
Explanation: 3. Program board with RGB Filter
Run the following script to download the RGB Filter to the PYNQ. This will allow us to modify the colors of the video stream.
End of explanation
"""
import ipywidgets as widgets
from ipywidgets import Button, HBox, VBox, Label
words = ['HDMI Reset']
items = [Button(description=w) for w in words]
def on_hdmi_clicked(b):
hdmi_out.stop()
hdmi_in.stop()
hdmi_out.start()
hdmi_in.start()
R0=Register(0)
R1=Register(1)
R2=Register(2)
R0.write(255)
R1.write(255)
R2.write(255)
R0_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Red:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='red'
)
R1_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Green:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='green'
)
R2_s = widgets.IntSlider(
value=255,
min=0,
max=255,
step=1,
description='Blue:',
disabled=False,
continuous_update=True,
orientation='vertical',
readout=True,
readout_format='i',
slider_color='blue'
)
def update_r0(*args):
R0.write(R0_s.value)
R0_s.observe(update_r0, 'value')
def update_r1(*args):
R1.write(R1_s.value)
R1_s.observe(update_r1, 'value')
def update_r2(*args):
R2.write(R2_s.value)
R2_s.observe(update_r2, 'value')
items[0].on_click(on_hdmi_clicked)
widgets.HBox([VBox([items[0]]),R0_s,R1_s,R2_s])
"""
Explanation: 4. Create a user interface
We will communicate with the filter using a nice user interface. Run the following code to activate that interface.
End of explanation
"""
hdmi_out.stop()
hdmi_in.stop()
del hdmi_out
del hdmi_in
"""
Explanation: 5. Exploration
Feel free to play with the sliders above. As the slider decreases in value, the color associated with that slider will be removed from the video. Likewise, increasing the slider value will add color back into the image.
Notice that when all sliders are reduced to 0 that the image is black. Now, increase the red slider. The image should only include various shades of red. Add green into the image. The video should now include shades of red and green, but also yellow! This is because yellow is the combination of red and green.
6. Clean up
When you are done playing with the RGB filter, run the following code to stop the video stream
End of explanation
"""
|
darioizzo/d-CGP
|
doc/sphinx/notebooks/An_intro_to_dCGPANNs.ipynb
|
gpl-3.0
|
# Initial import
import dcgpy
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
%matplotlib inline
"""
Explanation: Representing an Artificial Neural Network as a Cartesian Genetic Program
(a.k.a dCGPANN)
Neural networks (deep, shallow, convolutional or not) are, after all, computer programs and as such can be encoded in a chromosome and represented as a Genetic Program. A dCGPANN, coded in the class expression_ann, is exactly this: a feed forward neural network represented into a chromosome using a Cartesian Genetic Programming encoding. Derivatives with respect to weights and biases can be easily extracted, the underlying algorithm being backward automated differentiation. Unlike other dCGP expressions, higher order derivatives are not available (no gduals).
End of explanation
"""
# A limited amount of kernels are available for dCGPANN. Notably the most common in deep learning literature are.
nonlinearities = dcgpy.kernel_set_double(["sig", "ReLu", "tanh"])
# After defining the possible nonlinearities, we instantiate the dCGPANN
dcgpann = dcgpy.expression_ann(inputs=2, outputs=2, rows=20, cols=5,
levels_back=2, arity=[2,20,10,10,5], kernels=nonlinearities())
# By default all weights (and biases) are set to 1 (and 0). We initialize them normally distributed
dcgpann.randomise_weights(mean = 0., std = 0.1)
dcgpann.randomise_biases(mean = 0., std = 0.001)
# We then visualize the network thus encoded as a cartesian program
ax = dcgpann.visualize(show_nonlinearities=True, active_connection_alpha=0.0)
# The weights and biases can be extracted as a whole
w = dcgpann.get_weights()
b = dcgpann.get_biases()
# Or only for a specific node/input
w5_1 = dcgpann.get_weight(node_id = 5, input_id = 1)
# The resulting expression can, as usual be computed both on numerical values ...
x = [0.1,-0.3]
print("Value in", x, " is", dcgpann(x))
# ... and symbolic names (this can get real messy so we show only 100 characters of the first output)
x = ["x", "y"]
print("Value in", x, " is", dcgpann(x)[0][:150])
"""
Explanation: Instantiating and inspecting a dCGPANN
End of explanation
"""
# We define a single input single output dCGPANN.
dcgpann = dcgpy.expression_ann(inputs=1, outputs=1, rows=10, cols=5,
levels_back=2, arity=[1,10,10,10,10], kernels=nonlinearities())
# By default all weights (and biases) are set to 1 (and 0). We initialize them randomly
dcgpann.randomise_weights(mean = 0., std = 0.1)
w = dcgpann.get_weights()
b = dcgpann.get_biases()
# As we all CGP expressions, we can now mutate producing a slightly different architecture
# Note that mutation only affect the chromosome (i.e. the ANN encoding) not any of the weights nor biases
# We visualize the starting dCGPANN
f, ax = plt.subplots(1,4, figsize=(15,3))
ax = plt.subplot(1,4,1)
ax = dcgpann.visualize(show_nonlinearities=True, legend = False, axes = ax, active_connection_alpha=0.1)
_ = ax.set_title("Original")
# mutate three function genes (these will be easily visualized)
dcgpann.mutate_active_fgene(3)
ax = plt.subplot(1,4,2)
ax = dcgpann.visualize(show_nonlinearities=True, legend = False, axes = ax, active_connection_alpha=0.1)
_ = ax.set_title("Kernels mutation")
# mutate active connections (its difficult to "see" the change, except when some node become inactive or active)
dcgpann.mutate_active_cgene(30)
ax = plt.subplot(1,4,3)
ax = dcgpann.visualize(show_nonlinearities=True, legend = False, axes = ax, active_connection_alpha=0.1)
_ = ax.set_title("Connections mutation")
# mutate active connections (its difficult to "see" the change, except when some node become inactive or active)
dcgpann.mutate_ogene(1)
ax = plt.subplot(1,4,4)
ax = dcgpann.visualize(show_nonlinearities=True, legend = False, axes = ax, active_connection_alpha=0.1)
_ = ax.set_title("Output connection mutation")
"""
Explanation: Mutating a dCGPANN
End of explanation
"""
# We want to train the dCGPANN in a regression task. Lets create the points
sample_size = 100
points = np.linspace(-1.,1.,sample_size)
np.random.shuffle(points)
labels = ((points-0.5)**2 + np.cos(points * 2 * np.pi)) / 3.5
points = points.reshape((sample_size,1))
labels = labels.reshape((sample_size,1))
plt.plot(points,labels, '.')
_ = plt.title("function to be learned")
# Since the output is in [-1, 1] we force the output nonliearity to be tanh
dcgpann.set_output_f("tanh")
print("Starting error:", dcgpann.loss(points,labels, "MSE"))
print("Net complexity (number of active weights):", dcgpann.n_active_weights())
print("Net complexity (number of unique active weights):", dcgpann.n_active_weights(unique=True))
print("Net complexity (number of active nodes):", len(dcgpann.get_active_nodes()))
# This will store the learning history
n_epochs = 50000
res = [0] * n_epochs
dcgpann.set_weights(w)
dcgpann.set_biases(b)
# Let's go
for i in tqdm(range(n_epochs)):
res[i] = dcgpann.sgd(points = points, labels = labels, lr = 0.1, batch_size = 32, loss = "MSE", parallel = 4, shuffle = True)
print("End MSE: ", dcgpann.loss(points,labels, "MSE"))
f, ax = plt.subplots(1,2, figsize=(15,3))
# We plot the learned function against the target
plt.subplot(1,2,1)
_ = plt.plot(points,labels, '.')
_ = plt.plot(points, [dcgpann(p) for p in points],'.')
plt.subplot(1,2,2)
# We plot the mse during learning
_ = plt.semilogy(res)
"""
Explanation: Training a dCGPANN
End of explanation
"""
|
nikbearbrown/Deep_Learning
|
NEU/Tejas_Bawaskar _DL/t-SNE.ipynb
|
mit
|
dataframe_all = pd.read_csv("https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv")
num_rows = dataframe_all.shape[0]
print('No. of rows:', num_rows)
dataframe_all.head()
"""
Explanation: Step 1: download the data
End of explanation
"""
#List all fators from our response variable
dataframe_all.classe.unique()
"""
Explanation: Each row represents a different person and each column is on of many physical measurments lke the position of their arm, or forearm and each person gets one of 5 labels (classes) like sitting, standing, jumping, running and jogging.
End of explanation
"""
# count the number of missing elements (NaN) in each column
counter_nan = dataframe_all.isnull().sum()
counter_without_nan = counter_nan[counter_nan==0]
print('Columns without Nan:', counter_without_nan )
# remove the columns with missing elements
dataframe_all = dataframe_all[counter_without_nan.keys()]
# remove the first 7 columns which contain no discriminative information
dataframe_all = dataframe_all.iloc[:,7:]
# the list of columns (the last column is the class label)
columns = dataframe_all.columns
print (columns)
"""
Explanation: Step 2: remove useless data
End of explanation
"""
# get x and convert it to numpy array
x = dataframe_all.iloc[:,:-1].values
standard_scaler = StandardScaler()
x_std = standard_scaler.fit_transform(x)
"""
Explanation: Step 3: get features (x) and scale the features
End of explanation
"""
# get class label data
y = dataframe_all.iloc[:,-1].values
# encode the class label
class_labels = np.unique(y)
label_encoder = LabelEncoder()
y = label_encoder.fit_transform(y)
"""
Explanation: Step 4: get class labels y and then encode it into number
End of explanation
"""
test_percentage = 0.3
x_train, x_test, y_train, y_test = train_test_split(x_std, y, test_size = test_percentage, random_state = 0)
"""
Explanation: Step 5: split the data into training set and test set
End of explanation
"""
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0)
x_test_2d = tsne.fit_transform(x_test)
"""
Explanation: t-distributed Stochastic Neighbor Embedding (t-SNE) visualization
End of explanation
"""
markers=('s', 'd', 'o', '^', 'v')
color_map = {0:'red', 1:'blue', 2:'lightgreen', 3:'purple', 4:'cyan'}
plt.figure()
plt.figure(figsize=(10,10))
for idx, cl in enumerate(np.unique(y_test)):
plt.scatter(x=x_test_2d[y_test==cl,0], y=x_test_2d[y_test==cl,1], c=color_map[idx], marker=markers[idx], label=cl)
plt.xlabel('X in t-SNE')
plt.ylabel('Y in t-SNE')
plt.legend(loc='upper right')
plt.title('t-SNE visualization of test data')
plt.show()
"""
Explanation: Scatter plot the sample points among 5 classes
End of explanation
"""
|
awhite40/pymks
|
notebooks/stress_homogenization_2D.ipynb
|
mit
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Effective Stiffness
Introduction
This example uses the MKSHomogenizationModel to create a homogenization linkage for the effective stiffness. This example starts with a brief background of the homogenization theory on the components of the effective elastic stiffness tensor for a composite material. Then the example generates random microstructures and their average stress values that will be used to show how to calibrate and use our model. We will also show how to use tools from sklearn to optimize fit parameters for the MKSHomogenizationModel. Lastly, the data is used to evaluate the MKSHomogenizationModel for effective stiffness values for a new set of microstructures.
Linear Elasticity and Effective Elastic Modulus
For this example we are looking to create a homogenization linkage that predicts the effective isotropic stiffness components for two-phase microstructures. The specific stiffness component we are looking to predict in this example is $C_{xxxx}$ which is easily accessed by applying an uniaxial macroscal strain tensor (the only non-zero component is $\varepsilon_{xx}$).
$$ u(L, y) = u(0, y) + L\bar{\varepsilon}_{xx}$$
$$ u(0, L) = u(0, 0) = 0 $$
$$ u(x, 0) = u(x, L) $$
More details about these boundary conditions can be found in [1]. Using these boundary conditions, $C_{xxxx}$ can be estimated calculating the ratio of the averaged stress over the applied averaged strain.
$$ C_{xxxx}^* \cong \bar{\sigma}{xx} / \bar{\varepsilon}{xx}$$
In this example, $C_{xxxx}$ for 6 different types of microstructures will be estimated, using the MKSHomogenizationModel from pymks, and provides a method to compute $\bar{\sigma}{xx}$ for a new microstructure with an applied strain of $\bar{\varepsilon}{xx}$.
End of explanation
"""
from pymks.datasets import make_elastic_stress_random
sample_size = 200
grain_size = [(15, 2), (2, 15), (7, 7), (8, 3), (3, 9), (2, 2)]
n_samples = [sample_size] * 6
elastic_modulus = (410, 200)
poissons_ratio = (0.28, 0.3)
macro_strain = 0.001
size = (21, 21)
X, y = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size,
elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio,
macro_strain=macro_strain, seed=0)
"""
Explanation: Data Generation
A set of periodic microstructures and their volume averaged elastic stress values $\bar{\sigma}_{xx}$ can be generated by importing the make_elastic_stress_random function from pymks.datasets. This function has several arguments. n_samples is the number of samples that will be generated, size specifies the dimensions of the microstructures, grain_size controls the effective microstructure feature size, elastic_modulus and poissons_ratio are used to indicate the material property for each of the
phases, macro_strain is the value of the applied uniaxial strain, and the seed can be used to change the the random number generator seed.
Let's go ahead and create 6 different types of microstructures each with 200 samples with dimensions 21 x 21. Each of the 6 samples will have a different microstructure feature size. The function will return and the microstructures and their associated volume averaged stress values.
End of explanation
"""
print(X.shape)
print(y.shape)
"""
Explanation: The array X contains the microstructure information and has the dimensions
of (n_samples, Nx, Ny). The array y contains the average stress value for
each of the microstructures and has dimensions of (n_samples,).
End of explanation
"""
from pymks.tools import draw_microstructures
X_examples = X[::sample_size]
draw_microstructures((X_examples[:3]))
draw_microstructures((X_examples[3:]))
"""
Explanation: Lets take a look at the 6 types the microstructures to get an idea of what they
look like. We can do this by importing draw_microstructures.
End of explanation
"""
print('Stress Values'), (y[::200])
"""
Explanation: In this dataset 4 of the 6 microstructure types have grains that are elongated in either
the x or y directions. The remaining 2 types of samples have equiaxed grains with
different average sizes.
Let's look at the stress values for each of the microstructures shown above.
End of explanation
"""
from pymks import MKSHomogenizationModel
from pymks import PrimitiveBasis
prim_basis = PrimitiveBasis(n_states=2, domain=[0, 1])
model = MKSHomogenizationModel(basis=prim_basis,
correlations=[(0, 0), (1, 1), (0, 1)])
"""
Explanation: Now that we have a dataset to work with, we can look at how to use the MKSHomogenizationModelto predict stress values for new microstructures.
MKSHomogenizationModel Work Flow
The default instance of the MKSHomogenizationModel takes in a dataset and
- calculates the 2-point statistics
- performs dimensionality reduction using Singular Valued Decomposition (SVD)
- and fits a polynomial regression model model to the low-dimensional representation.
This work flow has been shown to accurately predict effective properties in several examples [2][3], and requires that we specify the number of components used in dimensionality reduction and the order of the polynomial we will be using for the polynomial regression. In this example we will show how we can use tools from sklearn to try and optimize our selection for these two parameters.
Modeling with MKSHomogenizationModel
In order to make an instance of the MKSHomogenizationModel, we need to pass an instance of a basis (used to compute the 2-point statistics). For this particular example, there are only 2 discrete phases, so we will use the PrimitiveBasis from pymks. We only have two phases denoted by 0 and 1, therefore we have two local states and our domain is 0 to 1.
Let's make an instance of the MKSHomgenizationModel.
End of explanation
"""
print('Default Number of Components'), (model.n_components)
print('Default Polynomail Order'), (model.degree)
"""
Explanation: Let's take a look at the default values for the number of components and the order of the polynomial.
End of explanation
"""
model.n_components = 40
model.fit(X, y, periodic_axes=[0, 1])
"""
Explanation: These default parameters may not be the best model for a given problem; we will now show one method that can be used to optimize them.
Optimizing the Number of Components and Polynomial Order
To start with, we can look at how the variance changes as a function of the number of components.
In general for SVD as well as PCA, the amount of variance captured in each component decreases
as the component number increases.
This means that as the number of components used in the dimensionality reduction increases, the percentage of the variance will asymptotically approach 100%. Let's see if this is true for our dataset.
In order to do this we will change the number of components to 40 and then
fit the data we have using the fit function. This function performs the dimensionality reduction and
also fits the regression model. Because our microstructures are periodic, we need to
use the periodic_axes argument when we fit the data.
End of explanation
"""
from pymks.tools import draw_component_variance
draw_component_variance(model.dimension_reducer.explained_variance_ratio_)
"""
Explanation: Now look at how the cumlative variance changes as a function of the number of components using draw_component_variance
from pymks.tools.
End of explanation
"""
from sklearn.cross_validation import train_test_split
flat_shape = (X.shape[0],) + (np.prod(X.shape[1:]),)
X_train, X_test, y_train, y_test = train_test_split(X.reshape(flat_shape), y,
test_size=0.2, random_state=3)
print(X_train.shape)
print(X_test.shape)
"""
Explanation: Roughly 90 percent of the variance is captured with the first 5 components. This means our model may only need a few components to predict the average stress.
Next we need to optimize the number of components and the polynomial order. To do this we are going to split the data into test and training sets. This can be done using the train_test_spilt function from sklearn.
End of explanation
"""
from sklearn.grid_search import GridSearchCV
params_to_tune = {'degree': np.arange(1, 4), 'n_components': np.arange(1, 8)}
fit_params = {'size': X[0].shape, 'periodic_axes': [0, 1]}
gs = GridSearchCV(model, params_to_tune, cv=12, n_jobs=6, fit_params=fit_params).fit(X_train, y_train)
"""
Explanation: We will use cross validation with the testing data to fit a number
of models, each with a different number
of components and a different polynomial order.
Then we will use the testing data to verify the best model.
This can be done using GridSeachCV
from sklearn.
We will pass a dictionary params_to_tune with the range of
polynomial order degree and components n_components we want to try.
A dictionary fit_params can be used to pass the periodic_axes variable to
calculate periodic 2-point statistics. The argument cv can be used to specify
the number of folds used in cross validation and n_jobs can be used to specify
the number of jobs that are ran in parallel.
Let's vary n_components from 1 to 7 and degree from 1 to 3.
End of explanation
"""
from pymks.tools import draw_gridscores_matrix
draw_gridscores_matrix(gs, ['n_components', 'degree'], score_label='R-Squared',
param_labels=['Number of Components', 'Order of Polynomial'])
"""
Explanation: The default score method for the MKSHomogenizationModel is the R-squared value. Let's look at the how the mean R-squared values and their
standard deviations change, as we varied the number of n_components and degree, using
draw_gridscores_matrix from pymks.tools.
End of explanation
"""
print('Order of Polynomial'), (gs.best_estimator_.degree)
print('Number of Components'), (gs.best_estimator_.n_components)
print('R-squared Value'), (gs.score(X_test, y_test))
"""
Explanation: It looks like we get a poor fit, when only the first and second component are used, and when we increase
the polynomial order and the components together. The models have a high standard deviation and
poor R-squared values for both of these cases.
There seems to be several potential models that use 3 to 6 components. It's difficult to see which model
is the best. Let's use our test data X_test to see which model performs the best.
End of explanation
"""
from pymks.tools import draw_gridscores
gs_deg_1 = [x for x in gs.grid_scores_ \
if x.parameters['degree'] == 1][2:-1]
gs_deg_2 = [x for x in gs.grid_scores_ \
if x.parameters['degree'] == 2][2:-1]
gs_deg_3 = [x for x in gs.grid_scores_ \
if x.parameters['degree'] == 3][2:-1]
draw_gridscores([gs_deg_1, gs_deg_2, gs_deg_3], 'n_components',
data_labels=['1st Order', '2nd Order', '3rd Order'],
colors=['#f46d43', '#1a9641', '#762a83'],
param_label='Number of Components', score_label='R-Squared')
"""
Explanation: For the parameter range that we searched, we have found that a model with 3rd order polynomial
and 3 components had the best R-squared value. It's difficult to see the differences in the score
values and the standard deviation when we have 3 or more components. Let's take a closer look at those values, using draw_grid_scores.
End of explanation
"""
model = gs.best_estimator_
"""
Explanation: As we said, a model with a 3rd order polynomial and 3 components will give us the best result,
but there are several other models that will likely provide comparable results. Let's make the
best model from our grid scores.
End of explanation
"""
model.fit(X, y, periodic_axes=[0, 1])
"""
Explanation: Prediction using MKSHomogenizationModel
Now that we have selected values for n_components and degree, lets fit the model with the data. Again, because
our microstructures are periodic, we need to use the periodic_axes argument.
End of explanation
"""
test_sample_size = 20
n_samples = [test_sample_size] * 6
X_new, y_new = make_elastic_stress_random(n_samples=n_samples, size=size, grain_size=grain_size,
elastic_modulus=elastic_modulus, poissons_ratio=poissons_ratio,
macro_strain=macro_strain, seed=1)
"""
Explanation: Let's generate some more data that can be used to try and validate our model's prediction accuracy. We are going to
generate 20 samples of all six different types of microstructures using the same
make_elastic_stress_random function.
End of explanation
"""
y_predict = model.predict(X_new, periodic_axes=[0, 1])
"""
Explanation: Now let's predict the stress values for the new microstructures.
End of explanation
"""
from pymks.tools import draw_components
draw_components([model.reduced_fit_data[:, :2],
model.reduced_predict_data[:, :2]],
['Training Data', 'Test Data'])
"""
Explanation: We can look to see, if the low-dimensional representation of the
new data is similar to the low-dimensional representation of the data
we used to fit the model using draw_components from pymks.tools.
End of explanation
"""
from sklearn.metrics import r2_score
print('R-squared'), (model.score(X_new, y_new, periodic_axes=[0, 1]))
"""
Explanation: The predicted data seems to be reasonably similar to the data we used to fit the model
with. Now let's look at the score value for the predicted data.
End of explanation
"""
print('Actual Stress '), (y_new[::20])
print('Predicted Stress'), (y_predict[::20])
"""
Explanation: Looks pretty good. Let's print out one actual and predicted stress value for each of the 6 microstructure types to see how they compare.
End of explanation
"""
from pymks.tools import draw_goodness_of_fit
fit_data = np.array([y, model.predict(X, periodic_axes=[0, 1])])
pred_data = np.array([y_new, y_predict])
draw_goodness_of_fit(fit_data, pred_data, ['Training Data', 'Test Data'])
"""
Explanation: Lastly, we can also evaluate our prediction by looking at a goodness-of-fit plot. We
can do this by importing draw_goodness_of_fit from pymks.tools.
End of explanation
"""
|
Yangqing/caffe2
|
caffe2/python/tutorials/Loading_Pretrained_Models.ipynb
|
apache-2.0
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
%matplotlib inline
from caffe2.proto import caffe2_pb2
import numpy as np
import skimage.io
import skimage.transform
from matplotlib import pyplot
import os
from caffe2.python import core, workspace, models
import urllib2
import operator
print("Required modules imported.")
"""
Explanation: Loading Pre-Trained Models
Description
In this tutorial, we will use the pre-trained squeezenet model from the ModelZoo to classify our own images. As input, we will provide the path (or URL) to an image we want to classify. It will also be helpful to know the ImageNet object code for the image so we can verify our results. The 'object code' is nothing more than the integer label for the class used during training, for example "985" is the code for the class "daisy". Note, although we are using squeezenet here, this tutorial serves as a somewhat universal method for running inference on pretrained models.
If you came from the Image Pre-Processing Tutorial, you will see that we are using rescale and crop functions to prep the image, as well as reformatting the image to be CHW, BGR, and finally NCHW. We also correct for the image mean by either using the calculated mean from a provided npy file, or statically removing 128 as a placeholder average.
Hopefully, you will find that loading pre-trained models is simple and syntactically concise. From a high level, these are the three required steps for running inference on a pretrained model:
Read the init and predict protobuf (.pb) files of the pretrained model
with open("init_net.pb") as f:
init_net = f.read()
with open("predict_net.pb") as f:
predict_net = f.read()
Initialize a Predictor in your workspace with the blobs from the protobufs
p = workspace.Predictor(init_net, predict_net)
Run the net on some data and get the (softmax) results!
results = p.run({'data': img})
Note, assuming the last layer of the network is a softmax layer, the results come back as a multidimensional array of probabilities with length equal to the number of classes that the model was trained on. The probabilities may be indexed by the object code (integer type), so if you know the object code you can index the results array at that index to view the network's confidence that the input image is of that class.
Model Download Options
Although we will use squeezenet here, you can check out the Model Zoo for pre-trained models to browse/download a variety of pretrained models, or you can use Caffe2's caffe2.python.models.download module to easily acquire pre-trained models from Github caffe2/models.
For our purposes, we will use the models.download module to download squeezenet into the /caffe2/python/models folder of our local Caffe2 installation with the following command:
python -m caffe2.python.models.download -i squeezenet
If the above download worked then you should have a directory named squeezenet in your /caffe2/python/models folder that contains init_net.pb and predict_net.pb. Note, if you do not use the -i flag, the model will be downloaded to your CWD, however it will still be a directory named squeezenet containing two protobuf files. Alternatively, if you wish to download all of the models, you can clone the entire repo using:
git clone https://github.com/caffe2/models
Code
Before we start, lets take care of the required imports.
End of explanation
"""
# Configuration --- Change to your setup and preferences!
# This directory should contain the models downloaded from the model zoo. To run this
# tutorial, make sure there is a 'squeezenet' directory at this location that
# contains both the 'init_net.pb' and 'predict_net.pb'
CAFFE_MODELS = "~/caffe2/caffe2/python/models"
# Some sample images you can try, or use any URL to a regular image.
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Whole-Lemon.jpg/1235px-Whole-Lemon.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/7/7b/Orange-Whole-%26-Split.jpg"
# IMAGE_LOCATION = "https://upload.wikimedia.org/wikipedia/commons/a/ac/Pretzel.jpg"
# IMAGE_LOCATION = "https://cdn.pixabay.com/photo/2015/02/10/21/28/flower-631765_1280.jpg"
IMAGE_LOCATION = "images/flower.jpg"
# What model are we using?
# Format below is the model's: <folder, INIT_NET, predict_net, mean, input image size>
# You can switch 'squeezenet' out with 'bvlc_alexnet', 'bvlc_googlenet' or others that you have downloaded
MODEL = 'squeezenet', 'init_net.pb', 'predict_net.pb', 'ilsvrc_2012_mean.npy', 227
# codes - these help decypher the output and source from a list from ImageNet's object codes
# to provide an result like "tabby cat" or "lemon" depending on what's in the picture
# you submit to the CNN.
codes = "https://gist.githubusercontent.com/aaronmarkham/cd3a6b6ac071eca6f7b4a6e40e6038aa/raw/9edb4038a37da6b5a44c3b5bc52e448ff09bfe5b/alexnet_codes"
print("Config set!")
"""
Explanation: Inputs
Here, we will specify the inputs to be used for this run, including the input image, the model location, the mean file (optional), the required size of the image, and the location of the label mapping file.
End of explanation
"""
# set paths and variables from model choice and prep image
CAFFE_MODELS = os.path.expanduser(CAFFE_MODELS)
# mean can be 128 or custom based on the model
# gives better results to remove the colors found in all of the training images
MEAN_FILE = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[3])
if not os.path.exists(MEAN_FILE):
print("No mean file found!")
mean = 128
else:
print ("Mean file found!")
mean = np.load(MEAN_FILE).mean(1).mean(1)
mean = mean[:, np.newaxis, np.newaxis]
print("mean was set to: ", mean)
# some models were trained with different image sizes, this helps you calibrate your image
INPUT_IMAGE_SIZE = MODEL[4]
# make sure all of the files are around...
INIT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[1])
PREDICT_NET = os.path.join(CAFFE_MODELS, MODEL[0], MODEL[2])
# Check to see if the files exist
if not os.path.exists(INIT_NET):
print("WARNING: " + INIT_NET + " not found!")
else:
if not os.path.exists(PREDICT_NET):
print("WARNING: " + PREDICT_NET + " not found!")
else:
print("All needed files found!")
"""
Explanation: Setup paths
With the configs set, we can now load the mean file (if it exists), as well as the predict net and the init net.
End of explanation
"""
# Function to crop the center cropX x cropY pixels from the input image
def crop_center(img,cropx,cropy):
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
# Function to rescale the input image to the desired height and/or width. This function will preserve
# the aspect ratio of the original image while making the image the correct scale so we can retrieve
# a good center crop. This function is best used with center crop to resize any size input images into
# specific sized images that our model can use.
def rescale(img, input_height, input_width):
# Get original aspect ratio
aspect = img.shape[1]/float(img.shape[0])
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
return imgScaled
# Load the image as a 32-bit float
# Note: skimage.io.imread returns a HWC ordered RGB image of some size
img = skimage.img_as_float(skimage.io.imread(IMAGE_LOCATION)).astype(np.float32)
print("Original Image Shape: " , img.shape)
# Rescale the image to comply with our desired input size. This will not make the image 227x227
# but it will make either the height or width 227 so we can get the ideal center crop.
img = rescale(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
print("Image Shape after rescaling: " , img.shape)
pyplot.figure()
pyplot.imshow(img)
pyplot.title('Rescaled image')
# Crop the center 227x227 pixels of the image so we can feed it to our model
img = crop_center(img, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE)
print("Image Shape after cropping: " , img.shape)
pyplot.figure()
pyplot.imshow(img)
pyplot.title('Center Cropped')
# switch to CHW (HWC --> CHW)
img = img.swapaxes(1, 2).swapaxes(0, 1)
print("CHW Image Shape: " , img.shape)
pyplot.figure()
for i in range(3):
# For some reason, pyplot subplot follows Matlab's indexing
# convention (starting with 1). Well, we'll just follow it...
pyplot.subplot(1, 3, i+1)
pyplot.imshow(img[i])
pyplot.axis('off')
pyplot.title('RGB channel %d' % (i+1))
# switch to BGR (RGB --> BGR)
img = img[(2, 1, 0), :, :]
# remove mean for better results
img = img * 255 - mean
# add batch size axis which completes the formation of the NCHW shaped input that we want
img = img[np.newaxis, :, :, :].astype(np.float32)
print("NCHW image (ready to be used as input): ", img.shape)
"""
Explanation: Image Preprocessing
Now that we have our inputs specified and verified the existance of the input network, we can load the image and pre-processing the image for ingestion into a Caffe2 convolutional neural network! This is a very important step as the trained CNN requires a specifically sized input image whose values are from a particular distribution.
End of explanation
"""
# Read the contents of the input protobufs into local variables
with open(INIT_NET) as f:
init_net = f.read()
with open(PREDICT_NET) as f:
predict_net = f.read()
# Initialize the predictor from the input protobufs
p = workspace.Predictor(init_net, predict_net)
# Run the net and return prediction
results = p.run({'data': img})
# Turn it into something we can play with and examine which is in a multi-dimensional array
results = np.asarray(results)
print("results shape: ", results.shape)
# Quick way to get the top-1 prediction result
# Squeeze out the unnecessary axis. This returns a 1-D array of length 1000
preds = np.squeeze(results)
# Get the prediction and the confidence by finding the maximum value and index of maximum value in preds array
curr_pred, curr_conf = max(enumerate(preds), key=operator.itemgetter(1))
print("Prediction: ", curr_pred)
print("Confidence: ", curr_conf)
"""
Explanation: Prepare the CNN and run the net!
Now that the image is ready to be ingested by the CNN, let's open the protobufs, load them into the workspace, and run the net.
End of explanation
"""
# the rest of this is digging through the results
results = np.delete(results, 1)
index = 0
highest = 0
arr = np.empty((0,2), dtype=object)
arr[:,0] = int(10)
arr[:,1:] = float(10)
for i, r in enumerate(results):
# imagenet index begins with 1!
i=i+1
arr = np.append(arr, np.array([[i,r]]), axis=0)
if (r > highest):
highest = r
index = i
# top N results
N = 5
topN = sorted(arr, key=lambda x: x[1], reverse=True)[:N]
print("Raw top {} results: {}".format(N,topN))
# Isolate the indexes of the top-N most likely classes
topN_inds = [int(x[0]) for x in topN]
print("Top {} classes in order: {}".format(N,topN_inds))
# Now we can grab the code list and create a class Look Up Table
response = urllib2.urlopen(codes)
class_LUT = []
for line in response:
code, result = line.partition(":")[::2]
code = code.strip()
result = result.replace("'", "")
if code.isdigit():
class_LUT.append(result.split(",")[0][1:])
# For each of the top-N results, associate the integer result with an actual class
for n in topN:
print("Model predicts '{}' with {}% confidence".format(class_LUT[int(n[0])],float("{0:.2f}".format(n[1]*100))))
"""
Explanation: Process Results
Recall ImageNet is a 1000 class dataset and observe that it is no coincidence that the third axis of results is length 1000. This axis is holding the probability for each category in the pre-trained model. So when you look at the results array at a specific index, the number can be interpreted as the probability that the input belongs to the class corresponding to that index. Now that we have run the predictor and collected the results, we can interpret them by matching them to their corresponding english labels.
End of explanation
"""
# List of input images to be fed
images = ["images/cowboy-hat.jpg",
"images/cell-tower.jpg",
"images/Ducreux.jpg",
"images/pretzel.jpg",
"images/orangutan.jpg",
"images/aircraft-carrier.jpg",
"images/cat.jpg"]
# Allocate space for the batch of formatted images
NCHW_batch = np.zeros((len(images),3,227,227))
print ("Batch Shape: ",NCHW_batch.shape)
# For each of the images in the list, format it and place it in the batch
for i,curr_img in enumerate(images):
img = skimage.img_as_float(skimage.io.imread(curr_img)).astype(np.float32)
img = rescale(img, 227, 227)
img = crop_center(img, 227, 227)
img = img.swapaxes(1, 2).swapaxes(0, 1)
img = img[(2, 1, 0), :, :]
img = img * 255 - mean
NCHW_batch[i] = img
print("NCHW image (ready to be used as input): ", NCHW_batch.shape)
# Run the net on the batch
results = p.run([NCHW_batch.astype(np.float32)])
# Turn it into something we can play with and examine which is in a multi-dimensional array
results = np.asarray(results)
# Squeeze out the unnecessary axis
preds = np.squeeze(results)
print("Squeezed Predictions Shape, with batch size {}: {}".format(len(images),preds.shape))
# Describe the results
for i,pred in enumerate(preds):
print("Results for: '{}'".format(images[i]))
# Get the prediction and the confidence by finding the maximum value
# and index of maximum value in preds array
curr_pred, curr_conf = max(enumerate(pred), key=operator.itemgetter(1))
print("\tPrediction: ", curr_pred)
print("\tClass Name: ", class_LUT[int(curr_pred)])
print("\tConfidence: ", curr_conf)
"""
Explanation: Feeding Larger Batches
Above is an example of how to feed one image at a time. We can achieve higher throughput if we feed multiple images at a time in a single batch. Recall, the data fed into the classifier is in 'NCHW' order, so to feed multiple images, we will expand the 'N' axis.
End of explanation
"""
|
Tsiems/machine-learning-projects
|
In_Class/ICA3_MachineLearning.ipynb
|
mit
|
# fetch the dataset
from sklearn.datasets import fetch_kddcup99
from sklearn import __version__ as sklearn_version
print('Sklearn Version:',sklearn_version)
ds = fetch_kddcup99(subset='http')
import numpy as np
# get some of the specifics of the dataset
X = ds.data
y = ds.target != b'normal.'
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
print("n_samples: {}".format(n_samples))
print("n_features: {}".format(n_features))
print("n_classes: {}".format(n_classes))
"""
Explanation: Enter Team Member Names here (double click to edit):
Name 1: Ian Johnson
Name 2: Travis Siems
Name 3: Derek Phanekham
In Class Assignment Three
In the following assignment you will be asked to fill in python code and derivations for a number of different problems. Please read all instructions carefully and turn in the rendered notebook (or HTML of the rendered notebook) before the end of class (or right after class). The initial portion of this notebook is given before class and the remainder is given during class. Please answer the initial questions before class, to the best of your ability. Once class has started you may rework your answers as a team for the initial part of the assignment.
<a id="top"></a>
Contents
<a href="#LoadingKDD">Loading KDDCup Data</a>
<a href="#kdd_eval">KDDCup Evaluation and Cross Validation</a>
<a href="#data_snooping">More Cross Validation</a>
<a href="#stats">Statistical Comparison</a>
Before coming to class, please make sure you have the latest version of scikit-learn. This notebook was created for version 0.18 and higher.
<a id="LoadingKDD"></a>
<a href="#top">Back to Top</a>
Loading KDDCup Data
Please run the following code to read in the "KDD Cup" dataset from sklearn's data loading module. It consists of examples of different simulated attacks for the 1998 DARPA Intrusion Detection System (IDS).
This will load the data into the variable ds. ds is a bunch object with fields like ds.data and ds.target. The field ds.data is a numpy matrix of the continuous features in the dataset. The object is not a pandas dataframe. It is a numpy matrix. Each row is a set of observed instances, each column is a different feature. It also has a field called ds.target that is an integer value we are trying to predict (i.e., a specific integer represents a specific person). Each entry in ds.target is a label for each row of the ds.data matrix.
End of explanation
"""
from matplotlib import pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
#=== Fill in code below========
print('Total number of instances', len(y))
print('Number of instances in each class:',np.bincount(y))
vals = np.bincount(y)
plt.bar(range(len(vals)), vals)
plt.ylabel('Counts')
plt.title('Counts of Normal and Abnormal Events')
plt.xticks(range(2), ('Normal','Abnormal'))
"""
Explanation: Question 1: How many instances are in the binary classification problem loaded above? How many instances are in each class? Plot a pie chart or bar chart of the number of classes.
End of explanation
"""
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
# select model
clf = LogisticRegression()
#select cross validation
cv = KFold(n_splits=10)
# select evaluation criteria
my_scorer = make_scorer(accuracy_score)
# run model training and cross validation
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
"""
Explanation: <a id="kdd_eval"></a>
<a href="#top">Back to Top</a>
KDDCup Evaluation and Cross Validation
End of explanation
"""
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, ShuffleSplit
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.linear_model import LogisticRegression
# these imports above might help you
#=====Write your code below here=================
# select model
clf = LogisticRegression()
#select cross validation
cv = StratifiedKFold(n_splits=10) ##CHANGED TO STRATIFIED K FOLD
# select evaluation criteria
my_scorer = make_scorer(f1_score) ##SWITCHED TO F1_SCORE
# run model training and cross validation
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
print("Mean F1: ", np.mean(per_fold_eval_criteria))
"""
Explanation: Question 2 Is the code above a proper separation of training and testing sets for the given dataset? Why or why not?
For this dataset, due to the class imbalance, stratified K fold partitioning should be used to guarantee that each of the folds actually includes examples of each class. A normal K-fold partitioning will result in only a handful of folds including any of the abnormal "attack" messages.
Question 3: Is the evaluation metric chosen in the above code appropriate for the dataset? Why or Why not?
No, accuracy is not a good metric in this case because there is a significant class imbalance problem. Accuracy is not meaningful because there are so many normal messages in the data and very few abnormal messages in the data.
Exercise 1: If the code above is not a proper separation of the train or does not use the proper evaluation criteria, fix the code in the block below to use appropriate train/test separation and appropriate evaluation criterion (criteria).
End of explanation
"""
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
#======If there are errors, fix them below======
n_components = 1
##REMOVED THE PCA FROM HERE AND PUT IT IN THE PIPELINE
##We also removed the StandardScaler, since PCA performs scaling internally
clf = Pipeline([
('pca',PCA(n_components=n_components)),
('clf',LogisticRegression())])
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=my_scorer
)
plt.bar(range(len(per_fold_eval_criteria)),per_fold_eval_criteria)
plt.ylim([min(per_fold_eval_criteria)-0.01,max(per_fold_eval_criteria)])
# =====fixed code======
# write the fixed code (if needed) below
#We put the PCA inside of the pipeline, as that is where it should be, and we removed the StandardScaler, because it's redundant
"""
Explanation: Question 4: Does the learning algorithm perform well based on the evaluation criteria? Why or why not?
Yes, the algorithm performs well, as the mean F1 score is 0.9968. Because the F1 score is a weighted average of the recall scores and precision scores, this high F1 score means that we have a very small number of not only Type I errors, but also Type II errors, relative to the number of actual positive and negative instances in the dataset.
<a id="data_snooping"></a>
<a href="#top">Back to Top</a>
More Cross Validation
Exercise 2: Does the code below contain any errors in the implementation of the cross validation? If so, fix the code below.
End of explanation
"""
#plotting function for use in next question
# takes input 'test_scores', and an x-axis label
def plot_filled(test_scores,train_x_axis, xlabel=''):
test_mean = np.percentile(test_scores,50, axis=1)
test_max = np.percentile(test_scores,95, axis=1)
test_min = np.percentile(test_scores,5, axis=1)
plt.plot(train_x_axis, test_mean,
color='blue', linestyle='--',
marker='s', markersize=5,
label='validation set')
plt.fill_between(train_x_axis,
test_min,
test_max,
alpha=0.15, color='blue')
plt.grid(True)
plt.xlabel(xlabel)
plt.ylabel('Evaluation Criterion')
plt.legend(loc='lower right')
plt.tight_layout()
"""
Explanation: For this question, the circumstances for the DARPA KDD99 cup are changed in the following way:
- When the model for detecting attacks is deployed, we now think that it will often need to be retrained.
- DARPA anticipates that there will be a handful of different style attacks on their systems that have never been seen before. To detect these new attacks, they are employing programmers and analysts to find them manually every day.
- DARPA believes the perpetrators of these new attacks are more sophisticated, so finding the new attacks will take priority over detecting the older, known attacks.
- DARPA wants to use your learning algorithm for detecting only these new attacks, but the amount of training and testing data will be extremely small, because the analysts can only identify a handful of new style attacks each day.
- DARPA asks you if you think its a good idea to employ retraining your model each day to find these new attacks.
Question 5: How would you change the method of cross validation to answer this question from DARPA? That is, how can you change your cross validation method to better mirror how your system will be used and deployed by DARPA?
We would use the time-series approach discussed in the flipped lecture, where we use all existing data to try to classify the new data from every day.
At day 0 (the day that we start identifying the new type of attack), we would build a new dataset consisting of all of the new messages, including the new sophisticated attacks, which we would label as sophisticated attacks.
At day N, we would use all of the collected data from days 0..N (not including N) to build a model to classify the data from day N. We would then compare the model's output (for predicting day N) to the actual measured classes of the data from day N. We would compute the F1 score of the model with respect to identifying only sophisticated attacks, and use that to evaluate the model, or we could use a cost matrix where the cost of misidentifying a sophisiticated attack is much higher than the cost of misidentifying any other type of attack.
We believe that we should re-train the model every day, because we have so little data about the new attacks that leaving any of it out of the model would be a waste.
End of explanation
"""
clf = LogisticRegression()
test_scores = []
train_sizes=np.linspace(5e-4,5e-3,10)
for size in train_sizes:
cv = StratifiedShuffleSplit(n_splits=100,
train_size = size,
test_size = 1-size,
)
test_scores.append(cross_val_score(estimator=clf,X=X,y=y,cv=cv,scoring=my_scorer))
plot_filled(np.array(test_scores), train_sizes*100, 'Percentage training data (%)')
print(.0015 * len(X))
"""
Explanation: DARPA is also concerned about how much training data they will need from the analysts in order to have a high performing model. They would like to use the current dataset to help answer that question. The code below is written for you to help answer DARPA's question about how many examples will be needed for training. Examine the code and then answer the following question:
Question 6: Based on the analysis graphed below, how many positive examples are required to have a good tradeoff between bias and variance for the given evaluation criteria? Why?
End of explanation
"""
clf1 = LogisticRegression(C=100)
clf2 = LogisticRegression(C=1)
clf3 = LogisticRegression(C=0.1)
train_size = 0.003 # small training size
cv = StratifiedShuffleSplit(n_splits=10,train_size=train_size,test_size=1-train_size)
evals1 = cross_val_score(estimator=clf1,X=X,y=y,scoring=my_scorer,cv=cv)
evals2 = cross_val_score(estimator=clf2,X=X,y=y,scoring=my_scorer,cv=cv)
evals3 = cross_val_score(estimator=clf3,X=X,y=y,scoring=my_scorer,cv=cv)
"""
Explanation: It seems that approximately 0.15% of the data must be comprised of positive examples in order to optimize the tradeoff between vias and variance.
For the entire dataset, this means about 88 examples.
<a id="stats"></a>
<a href="#top">Back to Top</a>
Statistical Comparison
Now lets create a few different models and see if any of them have statistically better performances.
We are creating three different classifiers below to compare to one another. For creating different training and testing splits, we are using stratified shuffle splits on the datasets.
End of explanation
"""
#===================================================
# Enter your code below
diff12 = evals1 - evals2
diff13 = evals1 - evals3
diff23 = evals2 - evals3
sigma12 = np.sqrt(np.sum(diff12*diff12) * 1/(10-1))
sigma13 = np.sqrt(np.sum(diff13*diff13) * 1/(10-1))
sigma23 = np.sqrt(np.sum(diff23*diff23) * 1/(10-1))
d12 = (np.mean(diff12) + 1/(np.sqrt(10) * 2.26 * sigma12), np.mean(diff12) - 1/(np.sqrt(10) * 2.26 * sigma12))
d13 = (np.mean(diff13) + 1/(np.sqrt(10) * 2.26 * sigma13), np.mean(diff13) - 1/(np.sqrt(10) * 2.26 * sigma13))
d23 = (np.mean(diff23) + 1/(np.sqrt(10) * 2.26 * sigma23), np.mean(diff23) - 1/(np.sqrt(10) * 2.26 * sigma23))
print('Models 1 and 2 have statistically the best F1_score with 95% confidence (compared to model 3)')
print('Models 1 and 2 do not have statistically different F1 scores with 95% confidence.')
#===================================================
"""
Explanation: Question 7: Given the code above, what statistical test is more appropriate for selecting confidence intervals, and why? Your options are:
- A: approximating the evaluation criterion as a binomial distribution and bounding by the variance (the first option we used in the flipped lecture video)
- B: approximating the bounds using the folds of the cross validation to get mean and variance (the second option we used in the flipped lecture video)
- C: Either are acceptable statistical tests for obtaining confidence intervals
Answer: B
A) is not an acceptable answer, because, since the training size is so small, the testing sets are not independent, and the binomial approximation requires that the datasets are independent.
B) is an acceptable answer, because we have 3 sets of scores for the three classifiers, so we can compute the mean and variance of the difference between these sets. This is the only acceptable test in this scenario.
C) is not an acceptable answer, since A is incorrect.
Final Exercise: With 95% confidence, perform the statistical test that you selected above. Is any model or set of models statistically the best performer(s)? Or can we not say if the models are different with greater than 95% confidence?
If you chose option A, use a multiplier of Z=1.96. The number of instances used in testing can be calculated from the variable train_size.
If you chose option B, use a multiplier of t=2.26 and k=10.
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
dev/_downloads/a179627fc73cce931ace004638e9685c/read_inverse.ipynb
|
bsd-3-clause
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator
from mne.viz import set_3d_view
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects'
meg_path = data_path / 'MEG' / 'sample'
fname_trans = meg_path / 'sample_audvis_raw-trans.fif'
inv_fname = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(inv_fname)
print("Method: %s" % inv['methods'])
print("fMRI prior: %s" % inv['fmri_prior'])
print("Number of sources: %s" % inv['nsource'])
print("Number of channels: %s" % inv['nchan'])
src = inv['src'] # get the source space
# Get access to the triangulation of the cortex
print("Number of vertices on the left hemisphere: %d" % len(src[0]['rr']))
print("Number of triangles on left hemisphere: %d" % len(src[0]['use_tris']))
print("Number of vertices on the right hemisphere: %d" % len(src[1]['rr']))
print("Number of triangles on right hemisphere: %d" % len(src[1]['use_tris']))
"""
Explanation: Reading an inverse operator
The inverse operator's source space is shown in 3D.
End of explanation
"""
fig = mne.viz.plot_alignment(subject='sample', subjects_dir=subjects_dir,
trans=fname_trans, surfaces='white', src=src)
set_3d_view(fig, focalpoint=(0., 0., 0.06))
"""
Explanation: Show the 3D source space
End of explanation
"""
|
cipri-tom/Swiss-on-Amazon
|
analyse_swiss_reviews.ipynb
|
gpl-3.0
|
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
from ggplot import *
plt.style.use('seaborn-whitegrid')
plt.style.use('seaborn-notebook')
#['grayscale', 'fivethirtyeight', 'seaborn-deep', 'bmh', 'seaborn-poster', 'seaborn-ticks', 'seaborn-dark', 'seaborn-darkgrid', 'seaborn-whitegrid', 'seaborn-white', 'seaborn-bright', 'seaborn-paper', 'seaborn-pastel', 'seaborn-colorblind', 'seaborn-notebook', 'seaborn-dark-palette', 'dark_background', 'ggplot', 'seaborn-muted', 'seaborn-talk', 'classic']
"""
Explanation: Comparing Switzerland to the world on Amazon
We found these statistics about the dataset on amazon: http://minimaxir.com/2017/01/amazon-spark/, credits: Max Woolf. Based on his analysis we made a comparison computing the same analysis but with only swiss reviews.
End of explanation
"""
def parse(path):
f = open(path, 'r')
for l in f:
yield eval(l)
f.close()
def getDF(path):
i = 0
df = {}
for d in parse(path):
df[i] = d
i += 1
return pd.DataFrame.from_dict(df, orient='index')
df_products = getDF('data/swiss_products.json')
df_products = df_products.sort_values(['brand', 'price'])
df_products[df_products['brand'] == 'victorinox']
"""
Explanation: 1 Explore products dataset
We start by looking at the product data without the reviews.
Load the product data in a dataframe
End of explanation
"""
df_products.brand.value_counts()
"""
Explanation: Our data contains false positives. We found no way of filtering them out besides a quick manual check.
End of explanation
"""
false_positives = ['samsonite', 'mazda', 'lacoste', 'pelikan', 'bell']
df_products = df_products[~df_products.brand.isin(false_positives)]
"""
Explanation: We decided to remove the obvious ones
End of explanation
"""
for column_name in df_products.columns:
print("NA values for " + column_name + ": " + str(df_products[column_name].isnull().sum()))
"""
Explanation: Let's take a took at the data. We start with detecting NA values
End of explanation
"""
df_products['salesrank'].fillna(value='{}', inplace=True)
df_products['related'].fillna(value='{}', inplace=True)
df_products['description'].fillna(value='', inplace=True)
av_price = df_products['price'].mean()
df_products['price'].fillna(value=av_price, inplace=True)
av_price
"""
Explanation: We fill na values as follows: empty for salesrank, related and description, and average price for price
End of explanation
"""
df_products[df_products.price == max(df_products['price'])].iloc[0].asin
from IPython.display import Image
Image(filename='data/expensivewatch.jpg')
"""
Explanation: The average price of the swiss products is $79.73.
Let's look at the most expansive product on amazon
End of explanation
"""
df_products.price.describe()
"""
Explanation: Let's look at some statistics about the products
End of explanation
"""
df_products.brand.value_counts()[:20].plot(kind='bar', figsize=(10,5), title="Number of occurences of brands")
"""
Explanation: Which brand occurs the most in our dataset?
End of explanation
"""
Which category occurs the most
flatten = lambda l: [item for sublist in l for item in sublist]
categories_list = []
for categories in df_products.categories:
for categorie in flatten(categories):
categories_list.append(categorie)
pd.Series(categories_list).value_counts()[:20].plot(kind='bar', figsize=(10,5), title="Number of occurences of categories")
"""
Explanation: Which category occurs the most?
End of explanation
"""
df = pd.read_csv("data/amazon_ratings.csv")
df.head(10)
"""
Explanation: 2. Explore review dataset
transform data
End of explanation
"""
def get_category(x):
while 'u\'' in x:
x = x.replace('u\'','\'')
for y in eval(x):
for first in y:
return first
df['category'] = df['category'].apply(get_category)
df.head(10)
"""
Explanation: Transform categories into list
End of explanation
"""
df = df.sort_values(['user_id', 'timestamp'])
df[10:20]
df['nth_user'] = 1
user_id = ''
counter = 1
for i in range(0,df.shape[0]):
if df.iloc[i].user_id != user_id:
counter = 1
user_id = df.iloc[i].user_id
else:
counter += 1
df.set_value(i, 'nth_user', counter)
df[10:20]
"""
Explanation: Give number to each review according to n-th review of a user
End of explanation
"""
df = df.sort_values(['item_id', 'timestamp']).reset_index(drop=True)
df.head()
df['nth_product'] = 1
item_id = ''
counter = 1
for i in range(0,df.shape[0]):
if df.iloc[i].item_id != item_id:
counter = 1
item_id = df.iloc[i].item_id
else:
counter += 1
df.set_value(i, 'nth_product', counter)
df.head()
"""
Explanation: Give number to each review according to n-th review of product
End of explanation
"""
def get_time(x):
return datetime.datetime.fromtimestamp(
int(x)
).strftime('%Y-%m-%d %H:%M:%S')
df['time'] = df['timestamp'].apply(get_time)
df.head(10)
"""
Explanation: Transform date into readable date
End of explanation
"""
df_ratings = df.groupby(df.rating).count().ix[:,0]
df_ratings
df_ratings.plot.bar()
"""
Explanation: 3. Analysis
How many times each rating is given?
End of explanation
"""
def percentage_rating(year):
df_year = df[df.time.str.contains(year)]
counts_year = df_year.groupby(['rating']).count().ix[:,0]
print(df_year.shape)
counts_year = counts_year/sum(counts_year)
ret = pd.DataFrame(counts_year)
ret.columns = [year]
return ret.transpose()
percentage_rating('1999')
ax = pd.concat([percentage_rating('1999'), percentage_rating('2014')]).transpose().plot.bar()
ax.set_title("Percentage of ratings given per year", fontsize=18)
ax.set_xlabel("Rating given", fontsize=16)
ax.set_ylabel("Percentage of total reviews", fontsize=16)
ax.legend(loc=2,prop={'size':14})
"""
Explanation: Do ratings evolve over time?
End of explanation
"""
df_user_review_counts = df.groupby(df.user_id)
counts = df_user_review_counts.count().ix[:,0].value_counts().sort_index()
counts
num_reviews = []
prop = []
s = 0
for i in range(0,counts.shape[0]):
num_reviews.append(counts.index[i])
s += counts.iloc[i]/sum(counts)
prop.append(s)
df_counts = pd.DataFrame(
{'num_reviews': num_reviews,
'prop': prop
})
df_counts.head()
"""
Explanation: 4. Comparison to the world
We will now start with making plots to compare the swiss products to the analysis of Max Woolf, as mentioned earlier.
End of explanation
"""
ax = df_counts.plot(x='num_reviews', y='prop', figsize=(10,7),
color="#2980b9", legend=None)
ax.set_title("Cumulative Proportion of # Amazon Reviews Given by User", fontsize=18)
ax.set_xlabel("# Reviews Given By User", fontsize=14)
ax.set_ylabel("Cumulative Proportion of All Amazon Reviewers", fontsize=14)
ax.set_ylim((0,1.1))
ax.yaxis.set_ticks(np.arange(0, 1.1, 0.25))
ax.set_xlim((0,50))
df_user_review_counts = df.groupby(df.item_id)
counts = df_user_review_counts.count().ix[:,0].value_counts().sort_index()
counts
num_reviews = []
prop = []
s = 0
for i in range(0,counts.shape[0]):
num_reviews.append(counts.index[i])
s += counts.iloc[i]/sum(counts)
prop.append(s)
df_counts = pd.DataFrame(
{'num_reviews': num_reviews,
'prop': prop
})
df_counts.head()
"""
Explanation: This graph shows the accumalative distribution of reviews a user gives on average on swiss products.
End of explanation
"""
ax = df_counts.plot(x='num_reviews', y='prop', figsize=(10,7),
color="#27ae60", legend=None)
ax.set_title("Cumulative Proportion of # Reviews Given For Product", fontsize=18)
ax.set_xlabel("# Reviews Given For Product", fontsize=14)
ax.set_ylabel("Cumulative Proportion of All Amazon Products", fontsize=14)
ax.set_ylim((0,1.1))
ax.yaxis.set_ticks(np.arange(0, 1.1, 0.25))
ax.set_xlim((0,50))
df_user_review_counts = df.groupby(df.category)
counts = df_user_review_counts.count().ix[:,0]
categories = pd.DataFrame({'avg_rating': df_user_review_counts.agg({'rating': 'mean'})['rating'],
'count': counts})
categories
"""
Explanation: This graph shows the accumalative distribution of reviews a swiss product receives on average.
End of explanation
"""
# [((categories.index == "baby products") | (categories.index == "office & school supplies"))]
cats = categories['avg_rating'].sort_values(ascending=True)
ax = cats.plot.barh(width=1.0, color = "#e67e22", alpha=0.9, figsize=(6, 16))
ax.set_title("Average Rating Score Given For Amazon Reviews, by Product Category", fontsize = 16)
ax.set_xlabel("Avg. Rating For Reviews Given in Category", fontsize = 14)
ax.set_ylabel("Category", fontsize = 14)
for i in range(0,cats.shape[0]):
height = cats[i]
ax.text(height-1/3, i-1/4,
'%.2f' % height,
ha='center', va='bottom', color = 'white', fontweight='bold', size=16)
"""
Explanation: This graph shows the average rating by product category.
End of explanation
"""
counts = df.groupby(df.user_id).count().ix[:,0]
avg_rating = df.groupby(df.user_id).agg({'rating': 'mean'})['rating']
users = pd.DataFrame({'avg_rating': avg_rating,
'count_reviews': counts})
users = users[users.count_reviews > 4]
users['avg_rating'] = round(users['avg_rating'],1)
users.head()
counts = users['avg_rating'].value_counts()
counts = pd.DataFrame(counts).sort_index()
counts2 = counts.reset_index()
counts2.head()
av = sum(counts2['index']*counts2['avg_rating'])/ sum(counts2['avg_rating'])
av
"""
Explanation: Distribution of ratings
End of explanation
"""
ax = counts.plot.bar(figsize=(8,5), color ='#2980b9', legend=None)
ax.axvline(counts2[counts2['index']==round(av,1)].index, color='k', linestyle='--')
ax.set_title("Distribution of Average Ratings by User, for Amazon Products")
ax.set_xlabel("Average Rating for Amazon Products Given By User (5 Ratings Minimum)")
ax.set_ylabel("Count of Users")
ax.set_xticks([0,7,16,26,36])
ax.set_xticklabels(['1','2','3','4','5'])
counts = df.groupby(df.item_id).count().ix[:,0]
avg_rating = df.groupby(df.item_id).agg({'rating': 'mean'})['rating']
products = pd.DataFrame({'avg_rating': avg_rating,
'count_reviews': counts})
products = products[products.count_reviews > 4]
products['avg_rating'] = round(products['avg_rating'],1)
products.head()
counts = products['avg_rating'].value_counts()
counts = pd.DataFrame(counts).sort_index()
counts2 = counts.reset_index()
counts2.head()
av = sum(counts2['index']*counts2['avg_rating'])/ sum(counts2['avg_rating'])
av
"""
Explanation: This graph illustrates the average score a user gives (provided that he at least reviewd 5 products in our dataset).
End of explanation
"""
ax = counts.plot.bar(figsize=(10,6), color ='#27ae60', legend=None, width = 1)
ax.axvline(counts2[counts2['index']==round(av,1)].index, color='k', linestyle='--')
ax.set_title("Distribution of Overall Ratings on Amazon Products", fontsize = 18)
ax.set_xlabel("Overall Amazon Product Ratings (5 Ratings Minimum)", fontsize = 14)
ax.set_ylabel("Count of Products", fontsize = 14)
ax.set_xticks([0,9,19,29,39])
ax.set_xticklabels(['1.1','2','3','4','5'])
df_breakdown_users =df[df.nth_user <= 50].groupby(['nth_user', 'rating']).count()
df_breakdown_users = df_breakdown_users.reset_index(level=1)
df_breakdown_users.head()
s = 0
df2 = pd.DataFrame({'rating_5':[], 'rating_4':[], 'rating_3':[], 'rating_2':[], 'rating_1':[]})
for i in range(1,51):
s = sum(df_breakdown_users[df_breakdown_users.index == i]['user_id'])
df2 = df2.append(pd.DataFrame({'rating_5':[0],
'rating_4':[0],
'rating_3':[0],
'rating_2':[0],
'rating_1':[0]},
index = [i]))
for j in range(1,6):
result = df_breakdown_users[(df_breakdown_users.index == i) & (df_breakdown_users.rating == j*1.0)]
if not result.empty:
df2.set_value(i,'rating_'+str(j), result.user_id/s)
df2 = df2[["rating_5","rating_4","rating_3","rating_2","rating_1"]]
df2.head()
"""
Explanation: This graph illustrates the average score a product has (provided that it has at least 5 reviews).
End of explanation
"""
ax = df2.plot.bar(stacked=True, color = ['#003a09', '#049104', '#d36743', '#dd310f', '#870202'], width = 0.9)
ax.set_xticks([10,20,30,40,50])
ax.set_xticklabels(['10','20','30','40','50'])
ax.set_title("Breakdown of Amazon Ratings Given by Users, by n-th Rating Given\n\n")
ax.set_xlabel("n-th Rating Given by User")
ax.set_ylabel("Proportion")
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.)
ax.set_ylim((0,1))
df_breakdown_users =df[df.nth_product <= 50].groupby(['nth_product', 'rating']).count()
df_breakdown_users = df_breakdown_users.reset_index(level=1)
df_breakdown_users.head()
s = 0
df2 = pd.DataFrame({'rating_5':[], 'rating_4':[], 'rating_3':[], 'rating_2':[], 'rating_1':[]})
for i in range(1,51):
s = sum(df_breakdown_users[df_breakdown_users.index == i]['item_id'])
df2 = df2.append(pd.DataFrame({'rating_5':[0],
'rating_4':[0],
'rating_3':[0],
'rating_2':[0],
'rating_1':[0]},
index = [i]))
for j in range(1,6):
result = df_breakdown_users[(df_breakdown_users.index == i) & (df_breakdown_users.rating == j*1.0)]
if not result.empty:
df2.set_value(i,'rating_'+str(j), result.user_id/s)
df2 = df2[["rating_5","rating_4","rating_3","rating_2","rating_1"]]
df2.head()
"""
Explanation: This graph shows how the ratings a person gives evolve over time on average.
End of explanation
"""
ax = df2.plot.bar(stacked=True, color = ['#003a09', '#049104', '#d36743', '#dd310f', '#870202'], width = 0.9)
ax.set_xticks([10,20,30,40,50])
ax.set_xticklabels(['10','20','30','40','50'])
ax.set_title("Breakdown of Ratings for Amazon Products by n-th Item Rating Given\n\n")
ax.set_xlabel("n-th Review for Item Given")
ax.set_ylabel("Proportion")
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.)
ax.set_ylim((0,1))
"""
Explanation: This graph shows how the ratings a product receives evolve over time on average.
End of explanation
"""
|
seg/2016-ml-contest
|
LA_Team/Facies_classification_LA_TEAM_07.ipynb
|
apache-2.0
|
%%sh
pip install pandas
pip install scikit-learn
pip install tpot
from __future__ import print_function
import numpy as np
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold , StratifiedKFold
from classification_utilities import display_cm, display_adj_cm
from sklearn.metrics import confusion_matrix, f1_score
from sklearn import preprocessing
from sklearn.model_selection import LeavePGroupsOut
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.signal import medfilt
"""
Explanation: Facies classification using Machine Learning
LA Team Submission 5 ##
Lukas Mosser, Alfredo De la Fuente
In this approach for solving the facies classfication problem ( https://github.com/seg/2016-ml-contest. ) we will explore the following statregies:
- Features Exploration: based on Paolo Bestagini's work, we will consider imputation, normalization and augmentation routines for the initial features.
- Model tuning:
Libraries
We will need to install the following libraries and packages.
End of explanation
"""
#Load Data
data = pd.read_csv('../facies_vectors.csv')
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
print(data.head())
data['PE'] = data.groupby("Facies").PE.transform(lambda x: x.fillna(x.mean()))
"""
Explanation: Data Preprocessing
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X_reg, y_reg, train_size=0.75, test_size=0.25)
# Store features and labels
X = data[feature_names].values
y = data['Facies'].values
# Store well labels and depths
well = data['Well Name'].values
depth = data['Depth'].values
# Feature windows concatenation function
def augment_features_window(X, N_neig):
# Parameters
N_row = X.shape[0]
N_feat = X.shape[1]
# Zero padding
X = np.vstack((np.zeros((N_neig, N_feat)), X, (np.zeros((N_neig, N_feat)))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig+1)))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
this_row = np.hstack((this_row, X[r+c]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth):
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X, axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, N_neig=1):
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1]*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
X_aug, padded_rows = augment_features(X, well, depth)
# Initialize model selection methods
lpgo = LeavePGroupsOut(2)
# Generate splits
split_list = []
for train, val in lpgo.split(X, y, groups=data['Well Name']):
hist_tr = np.histogram(y[train], bins=np.arange(len(facies_names)+1)+.5)
hist_val = np.histogram(y[val], bins=np.arange(len(facies_names)+1)+.5)
if np.all(hist_tr[0] != 0) & np.all(hist_val[0] != 0):
split_list.append({'train':train, 'val':val})
def preprocess():
# Preprocess data to use in model
X_train_aux = []
X_test_aux = []
y_train_aux = []
y_test_aux = []
# For each data split
split = split_list[5]
# Remove padded rows
split_train_no_pad = np.setdiff1d(split['train'], padded_rows)
# Select training and validation data from current split
X_tr = X_aug[split_train_no_pad, :]
X_v = X_aug[split['val'], :]
y_tr = y[split_train_no_pad]
y_v = y[split['val']]
# Select well labels for validation data
well_v = well[split['val']]
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
X_train_aux.append( X_tr )
X_test_aux.append( X_v )
y_train_aux.append( y_tr )
y_test_aux.append ( y_v )
X_train = np.concatenate( X_train_aux )
X_test = np.concatenate ( X_test_aux )
y_train = np.concatenate ( y_train_aux )
y_test = np.concatenate ( y_test_aux )
return X_train , X_test , y_train , y_test
"""
Explanation: We procceed to run Paolo Bestagini's routine to include a small window of values to acount for the spatial component in the log analysis, as well as the gradient information with respect to depth. This will be our prepared training dataset.
End of explanation
"""
from tpot import TPOTClassifier
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = preprocess()
tpot = TPOTClassifier(generations=5, population_size=20,
verbosity=2,max_eval_time_mins=20,
max_time_mins=100,scoring='f1_micro',
random_state = 17)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
tpot.export('FinalPipeline_LM_mean_per_facies.py')
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import FunctionTransformer
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
# Train and test a classifier
def train_and_test(X_tr, y_tr, X_v, well_v):
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
# Train classifier
#clf = make_pipeline(make_union(VotingClassifier([("est", ExtraTreesClassifier(criterion="gini", max_features=1.0, n_estimators=500))]), FunctionTransformer(lambda X: X)), XGBClassifier(learning_rate=0.73, max_depth=10, min_child_weight=10, n_estimators=500, subsample=0.27))
#clf = make_pipeline( KNeighborsClassifier(n_neighbors=5, weights="distance") )
#clf = make_pipeline(MaxAbsScaler(),make_union(VotingClassifier([("est", RandomForestClassifier(n_estimators=500))]), FunctionTransformer(lambda X: X)),ExtraTreesClassifier(criterion="entropy", max_features=0.0001, n_estimators=500))
# * clf = make_pipeline( make_union(VotingClassifier([("est", BernoulliNB(alpha=60.0, binarize=0.26, fit_prior=True))]), FunctionTransformer(lambda X: X)),RandomForestClassifier(n_estimators=500))
clf = make_pipeline ( XGBClassifier(learning_rate=0.12, max_depth=3, min_child_weight=10, n_estimators=150, seed = 17, colsample_bytree = 0.9) )
clf.fit(X_tr, y_tr)
# Test classifier
y_v_hat = clf.predict(X_v)
# Clean isolated facies for each well
for w in np.unique(well_v):
y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=5)
return y_v_hat
"""
Explanation: Data Analysis
In this section we will run a Cross Validation routine
End of explanation
"""
#Load testing data
test_data = pd.read_csv('../validation_data_nofacies.csv')
# Prepare training data
X_tr = X
y_tr = y
# Augment features
X_tr, padded_rows = augment_features(X_tr, well, depth)
# Removed padded rows
X_tr = np.delete(X_tr, padded_rows, axis=0)
y_tr = np.delete(y_tr, padded_rows, axis=0)
# Prepare test data
well_ts = test_data['Well Name'].values
depth_ts = test_data['Depth'].values
X_ts = test_data[feature_names].values
# Augment features
X_ts, padded_rows = augment_features(X_ts, well_ts, depth_ts)
# Predict test labels
y_ts_hat = train_and_test(X_tr, y_tr, X_ts, well_ts)
# Save predicted labels
test_data['Facies'] = y_ts_hat
test_data.to_csv('Prediction_XX_Final.csv')
"""
Explanation: Prediction
End of explanation
"""
|
Leguark/pynoddy
|
docs/notebooks/3-Events.ipynb
|
gpl-2.0
|
from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
%matplotlib inline
"""
Explanation: Geological events in pynoddy: organisation and adpatiation
We will here describe how the single geological events of a Noddy history are organised within pynoddy. We will then evaluate in some more detail how aspects of events can be adapted and their effect evaluated.
End of explanation
"""
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy
import pynoddy.history
import pynoddy.events
import pynoddy.output
reload(pynoddy)
# Change to sandbox directory to store results
os.chdir(os.path.join(repo_path, 'sandbox'))
# Path to exmaple directory in this repository
example_directory = os.path.join(repo_path,'examples')
# Compute noddy model for history file
history = 'simple_two_faults.his'
history_ori = os.path.join(example_directory, history)
output_name = 'noddy_out'
reload(pynoddy.history)
reload(pynoddy.events)
H1 = pynoddy.history.NoddyHistory(history_ori)
# Before we do anything else, let's actually define the cube size here to
# adjust the resolution for all subsequent examples
H1.change_cube_size(100)
# compute model - note: not strictly required, here just to ensure changed cube size
H1.write_history(history)
pynoddy.compute_model(history, output_name)
"""
Explanation: Loading events from a Noddy history
In the current set-up of pynoddy, we always start with a pre-defined Noddy history loaded from a file, and then change aspects of the history and the single events. The first step is therefore to load the history file and to extract the single geological events. This is done automatically as default when loading the history file into the History object:
End of explanation
"""
H1.events
"""
Explanation: Events are stored in the object dictionary "events" (who would have thought), where the key corresponds to the position in the timeline:
End of explanation
"""
H1.events[3].properties
"""
Explanation: We can see here that three events are defined in the history. Events are organised as objects themselves, containing all the relevant properties and information about the events. For example, the second fault event is defined as:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# get the original dip of the fault
dip_ori = H1.events[3].properties['Dip']
# add 10 degrees to dip
add_dip = -10
dip_new = dip_ori + add_dip
# and assign back to properties dictionary:
H1.events[3].properties['Dip'] = dip_new
# H1.events[2].properties['Dip'] = dip_new1
new_history = "dip_changed"
new_output = "dip_changed_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
# load output from both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', position=0, ax = ax1, colorbar=False, title="Dip = %.0f" % dip_ori, savefig=True, fig_filename ="tmp.eps")
NO2.plot_section('y', position=1, ax = ax2, colorbar=False, title="Dip = %.0f" % dip_new)
plt.show()
"""
Explanation: Changing aspects of geological events
So what we now want to do, of course, is to change aspects of these events and to evaluate the effect on the resulting geological model. Parameters can directly be updated in the properties dictionary:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# The names of the two fault events defined in the history file are:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: Changing the order of geological events
The geological history is parameterised as single events in a timeline. Changing the order of events can be performed with two basic methods:
Swapping two events with a simple command
Adjusting the entire timeline with a complete remapping of events
The first method is probably the most useful to test how a simple change in the order of events will effect the final geological model. We will use it here with our example to test how the model would change if the timing of the faults is swapped.
The method to swap two geological events is defined on the level of the history object:
End of explanation
"""
# Now: swap the events:
H1.swap_events(2,3)
# And let's check if this is correctly relfected in the events order now:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: We now swap the position of two events in the kinematic history. For this purpose, a high-level function can directly be used:
End of explanation
"""
new_history = "faults_changed_order.his"
new_output = "faults_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
reload(pynoddy.output)
# Load and compare both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', ax = ax1, colorbar=False, title="Model 1")
NO2.plot_section('y', ax = ax2, colorbar=False, title="Model 2")
plt.show()
"""
Explanation: Now let's create a new history file and evaluate the effect of the changed order in a cross section view:
End of explanation
"""
diff = (NO2.block - NO1.block)
"""
Explanation: Determining the stratigraphic difference between two models
Just as another quick example of a possible application of pynoddy to evaluate aspects that are not simply possible with, for example, the GUI version of Noddy itself. In the last example with the changed order of the faults, we might be interested to determine where in space this change had an effect. We can test this quite simply using the NoddyOutput objects.
The geology data is stored in the NoddyOutput.block attribute. To evaluate the difference between two models, we can therefore simply compute:
End of explanation
"""
fig = plt.figure(figsize = (5,3))
ax = fig.add_subplot(111)
ax.imshow(diff[:,10,:].transpose(), interpolation='nearest',
cmap = "RdBu", origin = 'lower left')
"""
Explanation: And create a simple visualisation of the difference in a slice plot with:
End of explanation
"""
NO1.export_to_vtk(vtk_filename = "model_diff", data = diff)
"""
Explanation: (Adding a meaningful title and axis labels to the plot is left to the reader as simple excercise :-) Future versions of pynoddy might provide an automatic implementation for this step...)
Again, we may want to visualise results in 3-D. We can use the export_to_vtk-function as before, but now assing the data array to be exported as the calulcated differnce field:
End of explanation
"""
|
tensorflow/docs
|
site/en/tutorials/keras/overfit_and_underfit.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import regularizers
print(tf.__version__)
!pip install git+https://github.com/tensorflow/docs
import tensorflow_docs as tfdocs
import tensorflow_docs.modeling
import tensorflow_docs.plots
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import pathlib
import shutil
import tempfile
logdir = pathlib.Path(tempfile.mkdtemp())/"tensorboard_logs"
shutil.rmtree(logdir, ignore_errors=True)
"""
Explanation: Overfit and underfit
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/overfit_and_underfit"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/overfit_and_underfit.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
As always, the code in this example will use the tf.keras API, which you can learn more about in the TensorFlow Keras guide.
In both of the previous examples—classifying text and predicting fuel efficiency—the accuracy of models on the validation data would peak after training for a number of epochs and then stagnate or start decreasing.
In other words, your model would overfit to the training data. Learning how to deal with overfitting is important. Although it's often possible to achieve high accuracy on the training set, what you really want is to develop models that generalize well to a testing set (or data they haven't seen before).
The opposite of overfitting is underfitting. Underfitting occurs when there is still room for improvement on the train data. This can happen for a number of reasons: If the model is not powerful enough, is over-regularized, or has simply not been trained long enough. This means the network has not learned the relevant patterns in the training data.
If you train for too long though, the model will start to overfit and learn patterns from the training data that don't generalize to the test data. You need to strike a balance. Understanding how to train for an appropriate number of epochs as you'll explore below is a useful skill.
To prevent overfitting, the best solution is to use more complete training data. The dataset should cover the full range of inputs that the model is expected to handle. Additional data may only be useful if it covers new and interesting cases.
A model trained on more complete data will naturally generalize better. When that is no longer possible, the next best solution is to use techniques like regularization. These place constraints on the quantity and type of information your model can store. If a network can only afford to memorize a small number of patterns, the optimization process will force it to focus on the most prominent patterns, which have a better chance of generalizing well.
In this notebook, you'll explore several common regularization techniques, and use them to improve on a classification model.
Setup
Before getting started, import the necessary packages:
End of explanation
"""
gz = tf.keras.utils.get_file('HIGGS.csv.gz', 'http://mlphysics.ics.uci.edu/data/higgs/HIGGS.csv.gz')
FEATURES = 28
"""
Explanation: The Higgs dataset
The goal of this tutorial is not to do particle physics, so don't dwell on the details of the dataset. It contains 11,000,000 examples, each with 28 features, and a binary class label.
End of explanation
"""
ds = tf.data.experimental.CsvDataset(gz,[float(),]*(FEATURES+1), compression_type="GZIP")
"""
Explanation: The tf.data.experimental.CsvDataset class can be used to read csv records directly from a gzip file with no intermediate decompression step.
End of explanation
"""
def pack_row(*row):
label = row[0]
features = tf.stack(row[1:],1)
return features, label
"""
Explanation: That csv reader class returns a list of scalars for each record. The following function repacks that list of scalars into a (feature_vector, label) pair.
End of explanation
"""
packed_ds = ds.batch(10000).map(pack_row).unbatch()
"""
Explanation: TensorFlow is most efficient when operating on large batches of data.
So, instead of repacking each row individually make a new tf.data.Dataset that takes batches of 10,000 examples, applies the pack_row function to each batch, and then splits the batches back up into individual records:
End of explanation
"""
for features,label in packed_ds.batch(1000).take(1):
print(features[0])
plt.hist(features.numpy().flatten(), bins = 101)
"""
Explanation: Inspect some of the records from this new packed_ds.
The features are not perfectly normalized, but this is sufficient for this tutorial.
End of explanation
"""
N_VALIDATION = int(1e3)
N_TRAIN = int(1e4)
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 500
STEPS_PER_EPOCH = N_TRAIN//BATCH_SIZE
"""
Explanation: To keep this tutorial relatively short, use just the first 1,000 samples for validation, and the next 10,000 for training:
End of explanation
"""
validate_ds = packed_ds.take(N_VALIDATION).cache()
train_ds = packed_ds.skip(N_VALIDATION).take(N_TRAIN).cache()
train_ds
"""
Explanation: The Dataset.skip and Dataset.take methods make this easy.
At the same time, use the Dataset.cache method to ensure that the loader doesn't need to re-read the data from the file on each epoch:
End of explanation
"""
validate_ds = validate_ds.batch(BATCH_SIZE)
train_ds = train_ds.shuffle(BUFFER_SIZE).repeat().batch(BATCH_SIZE)
"""
Explanation: These datasets return individual examples. Use the Dataset.batch method to create batches of an appropriate size for training. Before batching, also remember to use Dataset.shuffle and Dataset.repeat on the training set.
End of explanation
"""
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
0.001,
decay_steps=STEPS_PER_EPOCH*1000,
decay_rate=1,
staircase=False)
def get_optimizer():
return tf.keras.optimizers.Adam(lr_schedule)
"""
Explanation: Demonstrate overfitting
The simplest way to prevent overfitting is to start with a small model: A model with a small number of learnable parameters (which is determined by the number of layers and the number of units per layer). In deep learning, the number of learnable parameters in a model is often referred to as the model's "capacity".
Intuitively, a model with more parameters will have more "memorization capacity" and therefore will be able to easily learn a perfect dictionary-like mapping between training samples and their targets, a mapping without any generalization power, but this would be useless when making predictions on previously unseen data.
Always keep this in mind: deep learning models tend to be good at fitting to the training data, but the real challenge is generalization, not fitting.
On the other hand, if the network has limited memorization resources, it will not be able to learn the mapping as easily. To minimize its loss, it will have to learn compressed representations that have more predictive power. At the same time, if you make your model too small, it will have difficulty fitting to the training data. There is a balance between "too much capacity" and "not enough capacity".
Unfortunately, there is no magical formula to determine the right size or architecture of your model (in terms of the number of layers, or the right size for each layer). You will have to experiment using a series of different architectures.
To find an appropriate model size, it's best to start with relatively few layers and parameters, then begin increasing the size of the layers or adding new layers until you see diminishing returns on the validation loss.
Start with a simple model using only densely-connected layers (tf.keras.layers.Dense) as a baseline, then create larger models, and compare them.
Training procedure
Many models train better if you gradually reduce the learning rate during training. Use tf.keras.optimizers.schedules to reduce the learning rate over time:
End of explanation
"""
step = np.linspace(0,100000)
lr = lr_schedule(step)
plt.figure(figsize = (8,6))
plt.plot(step/STEPS_PER_EPOCH, lr)
plt.ylim([0,max(plt.ylim())])
plt.xlabel('Epoch')
_ = plt.ylabel('Learning Rate')
"""
Explanation: The code above sets a tf.keras.optimizers.schedules.InverseTimeDecay to hyperbolically decrease the learning rate to 1/2 of the base rate at 1,000 epochs, 1/3 at 2,000 epochs, and so on.
End of explanation
"""
def get_callbacks(name):
return [
tfdocs.modeling.EpochDots(),
tf.keras.callbacks.EarlyStopping(monitor='val_binary_crossentropy', patience=200),
tf.keras.callbacks.TensorBoard(logdir/name),
]
"""
Explanation: Each model in this tutorial will use the same training configuration. So set these up in a reusable way, starting with the list of callbacks.
The training for this tutorial runs for many short epochs. To reduce the logging noise use the tfdocs.EpochDots which simply prints a . for each epoch, and a full set of metrics every 100 epochs.
Next include tf.keras.callbacks.EarlyStopping to avoid long and unnecessary training times. Note that this callback is set to monitor the val_binary_crossentropy, not the val_loss. This difference will be important later.
Use callbacks.TensorBoard to generate TensorBoard logs for the training.
End of explanation
"""
def compile_and_fit(model, name, optimizer=None, max_epochs=10000):
if optimizer is None:
optimizer = get_optimizer()
model.compile(optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[
tf.keras.losses.BinaryCrossentropy(
from_logits=True, name='binary_crossentropy'),
'accuracy'])
model.summary()
history = model.fit(
train_ds,
steps_per_epoch = STEPS_PER_EPOCH,
epochs=max_epochs,
validation_data=validate_ds,
callbacks=get_callbacks(name),
verbose=0)
return history
"""
Explanation: Similarly each model will use the same Model.compile and Model.fit settings:
End of explanation
"""
tiny_model = tf.keras.Sequential([
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(1)
])
size_histories = {}
size_histories['Tiny'] = compile_and_fit(tiny_model, 'sizes/Tiny')
"""
Explanation: Tiny model
Start by training a model:
End of explanation
"""
plotter = tfdocs.plots.HistoryPlotter(metric = 'binary_crossentropy', smoothing_std=10)
plotter.plot(size_histories)
plt.ylim([0.5, 0.7])
"""
Explanation: Now check how the model did:
End of explanation
"""
small_model = tf.keras.Sequential([
# `input_shape` is only required here so that `.summary` works.
layers.Dense(16, activation='elu', input_shape=(FEATURES,)),
layers.Dense(16, activation='elu'),
layers.Dense(1)
])
size_histories['Small'] = compile_and_fit(small_model, 'sizes/Small')
"""
Explanation: Small model
To check if you can beat the performance of the small model, progressively train some larger models.
Try two hidden layers with 16 units each:
End of explanation
"""
medium_model = tf.keras.Sequential([
layers.Dense(64, activation='elu', input_shape=(FEATURES,)),
layers.Dense(64, activation='elu'),
layers.Dense(64, activation='elu'),
layers.Dense(1)
])
"""
Explanation: Medium model
Now try three hidden layers with 64 units each:
End of explanation
"""
size_histories['Medium'] = compile_and_fit(medium_model, "sizes/Medium")
"""
Explanation: And train the model using the same data:
End of explanation
"""
large_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(512, activation='elu'),
layers.Dense(1)
])
"""
Explanation: Large model
As an exercise, you can create an even larger model and check how quickly it begins overfitting. Next, add to this benchmark a network that has much more capacity, far more than the problem would warrant:
End of explanation
"""
size_histories['large'] = compile_and_fit(large_model, "sizes/large")
"""
Explanation: And, again, train the model using the same data:
End of explanation
"""
plotter.plot(size_histories)
a = plt.xscale('log')
plt.xlim([5, max(plt.xlim())])
plt.ylim([0.5, 0.7])
plt.xlabel("Epochs [Log Scale]")
"""
Explanation: Plot the training and validation losses
The solid lines show the training loss, and the dashed lines show the validation loss (remember: a lower validation loss indicates a better model).
While building a larger model gives it more power, if this power is not constrained somehow it can easily overfit to the training set.
In this example, typically, only the "Tiny" model manages to avoid overfitting altogether, and each of the larger models overfit the data more quickly. This becomes so severe for the "large" model that you need to switch the plot to a log-scale to really figure out what's happening.
This is apparent if you plot and compare the validation metrics to the training metrics.
It's normal for there to be a small difference.
If both metrics are moving in the same direction, everything is fine.
If the validation metric begins to stagnate while the training metric continues to improve, you are probably close to overfitting.
If the validation metric is going in the wrong direction, the model is clearly overfitting.
End of explanation
"""
#docs_infra: no_execute
# Load the TensorBoard notebook extension
%load_ext tensorboard
# Open an embedded TensorBoard viewer
%tensorboard --logdir {logdir}/sizes
"""
Explanation: Note: All the above training runs used the callbacks.EarlyStopping to end the training once it was clear the model was not making progress.
View in TensorBoard
These models all wrote TensorBoard logs during training.
Open an embedded TensorBoard viewer inside a notebook:
End of explanation
"""
display.IFrame(
src="https://tensorboard.dev/experiment/vW7jmmF9TmKmy3rbheMQpw/#scalars&_smoothingWeight=0.97",
width="100%", height="800px")
"""
Explanation: You can view the results of a previous run of this notebook on TensorBoard.dev.
TensorBoard.dev is a managed experience for hosting, tracking, and sharing ML experiments with everyone.
It's also included in an <iframe> for convenience:
End of explanation
"""
shutil.rmtree(logdir/'regularizers/Tiny', ignore_errors=True)
shutil.copytree(logdir/'sizes/Tiny', logdir/'regularizers/Tiny')
regularizer_histories = {}
regularizer_histories['Tiny'] = size_histories['Tiny']
"""
Explanation: If you want to share TensorBoard results you can upload the logs to TensorBoard.dev by copying the following into a code-cell.
Note: This step requires a Google account.
!tensorboard dev upload --logdir {logdir}/sizes
Caution: This command does not terminate. It's designed to continuously upload the results of long-running experiments. Once your data is uploaded you need to stop it using the "interrupt execution" option in your notebook tool.
Strategies to prevent overfitting
Before getting into the content of this section copy the training logs from the "Tiny" model above, to use as a baseline for comparison.
End of explanation
"""
l2_model = tf.keras.Sequential([
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001),
input_shape=(FEATURES,)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(512, activation='elu',
kernel_regularizer=regularizers.l2(0.001)),
layers.Dense(1)
])
regularizer_histories['l2'] = compile_and_fit(l2_model, "regularizers/l2")
"""
Explanation: Add weight regularization
You may be familiar with Occam's Razor principle: given two explanations for something, the explanation most likely to be correct is the "simplest" one, the one that makes the least amount of assumptions. This also applies to the models learned by neural networks: given some training data and a network architecture, there are multiple sets of weights values (multiple models) that could explain the data, and simpler models are less likely to overfit than complex ones.
A "simple model" in this context is a model where the distribution of parameter values has less entropy (or a model with fewer parameters altogether, as demonstrated in the section above). Thus a common way to mitigate overfitting is to put constraints on the complexity of a network by forcing its weights only to take small values, which makes the distribution of weight values more "regular". This is called "weight regularization", and it is done by adding to the loss function of the network a cost associated with having large weights. This cost comes in two flavors:
L1 regularization, where the cost added is proportional to the absolute value of the weights coefficients (i.e. to what is called the "L1 norm" of the weights).
L2 regularization, where the cost added is proportional to the square of the value of the weights coefficients (i.e. to what is called the squared "L2 norm" of the weights). L2 regularization is also called weight decay in the context of neural networks. Don't let the different name confuse you: weight decay is mathematically the exact same as L2 regularization.
L1 regularization pushes weights towards exactly zero, encouraging a sparse model. L2 regularization will penalize the weights parameters without making them sparse since the penalty goes to zero for small weights—one reason why L2 is more common.
In tf.keras, weight regularization is added by passing weight regularizer instances to layers as keyword arguments. Add L2 weight regularization:
End of explanation
"""
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
"""
Explanation: l2(0.001) means that every coefficient in the weight matrix of the layer will add 0.001 * weight_coefficient_value**2 to the total loss of the network.
That is why we're monitoring the binary_crossentropy directly. Because it doesn't have this regularization component mixed in.
So, that same "Large" model with an L2 regularization penalty performs much better:
End of explanation
"""
result = l2_model(features)
regularization_loss=tf.add_n(l2_model.losses)
"""
Explanation: As demonstrated in the diagram above, the "L2" regularized model is now much more competitive with the "Tiny" model. This "L2" model is also much more resistant to overfitting than the "Large" model it was based on despite having the same number of parameters.
More info
There are two important things to note about this sort of regularization:
If you are writing your own training loop, then you need to be sure to ask the model for its regularization losses.
End of explanation
"""
dropout_model = tf.keras.Sequential([
layers.Dense(512, activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['dropout'] = compile_and_fit(dropout_model, "regularizers/dropout")
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
"""
Explanation: This implementation works by adding the weight penalties to the model's loss, and then applying a standard optimization procedure after that.
There is a second approach that instead only runs the optimizer on the raw loss, and then while applying the calculated step the optimizer also applies some weight decay. This "decoupled weight decay" is used in optimizers like tf.keras.optimizers.Ftrl and tfa.optimizers.AdamW.
Add dropout
Dropout is one of the most effective and most commonly used regularization techniques for neural networks, developed by Hinton and his students at the University of Toronto.
The intuitive explanation for dropout is that because individual nodes in the network cannot rely on the output of the others, each node must output features that are useful on their own.
Dropout, applied to a layer, consists of randomly "dropping out" (i.e. set to zero) a number of output features of the layer during training. For example, a given layer would normally have returned a vector [0.2, 0.5, 1.3, 0.8, 1.1] for a given input sample during training; after applying dropout, this vector will have a few zero entries distributed at random, e.g. [0, 0.5, 1.3, 0, 1.1].
The "dropout rate" is the fraction of the features that are being zeroed-out; it is usually set between 0.2 and 0.5. At test time, no units are dropped out, and instead the layer's output values are scaled down by a factor equal to the dropout rate, so as to balance for the fact that more units are active than at training time.
In Keras, you can introduce dropout in a network via the tf.keras.layers.Dropout layer, which gets applied to the output of layer right before.
Add two dropout layers to your network to check how well they do at reducing overfitting:
End of explanation
"""
combined_model = tf.keras.Sequential([
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu', input_shape=(FEATURES,)),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(512, kernel_regularizer=regularizers.l2(0.0001),
activation='elu'),
layers.Dropout(0.5),
layers.Dense(1)
])
regularizer_histories['combined'] = compile_and_fit(combined_model, "regularizers/combined")
plotter.plot(regularizer_histories)
plt.ylim([0.5, 0.7])
"""
Explanation: It's clear from this plot that both of these regularization approaches improve the behavior of the "Large" model. But this still doesn't beat even the "Tiny" baseline.
Next try them both, together, and see if that does better.
Combined L2 + dropout
End of explanation
"""
display.IFrame(
src="https://tensorboard.dev/experiment/fGInKDo8TXes1z7HQku9mw/#scalars&_smoothingWeight=0.97",
width = "100%",
height="800px")
"""
Explanation: This model with the "Combined" regularization is obviously the best one so far.
View in TensorBoard
These models also recorded TensorBoard logs.
To open an embedded tensorboard viewer inside a notebook, copy the following into a code-cell:
%tensorboard --logdir {logdir}/regularizers
You can view the results of a previous run of this notebook on TensorBoard.dev.
It's also included in an <iframe> for convenience:
End of explanation
"""
|
gaargly/gaargly.github.io
|
Lira_Assignment_distribution.ipynb
|
mit
|
#codes here for a)
import math
import pandas as pd
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def demo1():
mu, sigma = 0, 0.1
sampleNo = 1000
s = np.random.normal(mu, sigma, sampleNo)
plt.hist(s, bins=100, density=True)
plt.show()
demo1()
"""
Explanation: <a href="https://colab.research.google.com/github/gaargly/gaargly.github.io/blob/master/Lira_Assignment_distribution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
By using distribution=np.random.name_distribution([],[]), write the name of distribution of your choice in place of name_distribution and fill out the bracket with your choice again. Then please, a) Draw the histogram and interpret b) Draw Q-Q plot and interpret
End of explanation
"""
#codes here
def demo2():
mu, sigma = 9, 10
sampleNo = 50
s = np.random.normal(mu, sigma, sampleNo)
plt.hist(s, bins=100, density=True)
plt.show()
demo2()
def demo3():
mu, sigma = 9, 10
sampleNo = 1000
s = np.random.normal(mu, sigma, sampleNo)
plt.hist(s, bins=100, density=True)
plt.show()
demo3()
"""
Explanation: a) We see from the above histogram that the distribution is approximately normal when we draw a sample of 1000 values from a standard normal distribution.
b) We also see from the Q-Q plot that the distribution is approximately normal when we draw a sample of 1000 values from a standard normal distribution.
In this question, you test whether the central limit theorem works. You generate 1000 variables with two normal distributions. You can determine the mean and standard deviation of these variables yourself. All you have to do is generate the first variable 50 times and averaged it each time. Generate the second variable 1000 times and averages this variable each time. Then plot the histogram of the averages of the two variables. Which of the variables has a mean distribution closer to the normal distribution? Do you think the Central Limit Theorem seems to have worked?
End of explanation
"""
|
adfriedm/Geometric-K-Server-Experiments
|
experiments.ipynb
|
mit
|
# Load modules
import sys
from __future__ import print_function
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
import pandas as pd
from pandas import DataFrame
import time
import random
from pqt import PQTDecomposition
from splice import splice_alg
from asplice import asplice_alg
from plg import plg_alg
"""
Explanation: Stacker-Crane Experiments
The Euclidean Stacker-Crane problem (ESCP) is a generalization of the Euclidean Travelling Salesman Problem. In the ESCP we are given pickup-delivery pairs and aim to find delivery-pickup pairs to form a minimal tour. The SPLICE algorithm was proven to almost-surely provide an asymptotically optimal solution, where pickups and deliveries are each sampled from respective distributions.
The SPLICE algorithm, however, relies on a Euclidean Bipartite matching between all pairs, an O(n^3) operation (though complex approximations exist). Here we test 2 algorithms that we propose, PLG and ASPLICE each of which relies on a probabilistic quad tree, a quad tree that terminates decomposition where (number of points in cell)/(total points) < p_hat for every cell. Intuitively this means that most delivery points are close to pickups.
PLG
In PLG we simply follow a pickup-delivery pair, if the delivery falls in a cell with an unmatched pickup then link to that, otherwise connect to any unmatched pickup anywhere.
This is an almost-surely asymtotically near-optimal algorithm when pickup and delivery distributions are identical, meaning that we can chose p_hat so that as number of points -> infinity, the ratio of PLG cost to optimal cost approaches some (1+eps) where eps may be made as small as desired. This algorithm runs in linear time. For non-idential distributions, this is still a constant factor approximation that depends on how similar the distributions are (in both a Wasserstein and Total Variation distances way.)
ASPLICE
The ASPLICE algorithm is functionally similar to PLG, except that the connection between cells is not random. This algorithm first connects all delivery-pickups possible within cells and then assigns excess delivery and pickup pairs based on solving the Transportation Problem, and then merging subtours. This algorithm is guaranteed to outperform PLG in probability, and gains all the analysis of SPLICE, as it approximates the algorithm. The primary difference is that solving EBMP on all points is much more expensive than solving the Transportation Problem on excess in cells.
The SPLICE algorithm is impractically slow for over 250 pairs (over a minute to calculate), whereas ASPLICE, even using an LP solver (which is far from a fast approach), can handle over 1000 pairs with ease.
End of explanation
"""
def run_experiments(gen_pd_edges, n_pairs_li, n_reps, p_hat_li, verbose=False, **kwargs):
"""
Parameters:
n_pairs_li - a list of the number of pairs to generate
n_reps - the number of repetitions of experiment with
that number of pairs
gen_pd_edges - function the generates n pairs
include_pqt_time - whether or not the time to compute the
pqt should be included
verbose - whether or not to print the repetitions to stdout
"""
data = defaultdict(list)
def add_datum_kv((k,v)):
data[k].append(v)
return (k,v)
# Run experiment
for n_pairs in n_pairs_li:
for rep in xrange(n_reps):
if verbose:
print("Number of pairs: {} Rep: {} at ({})"\
.format(n_pairs,rep, time.strftime(
"%H:%M %S", time.gmtime())
)
)
sys.stdout.flush()
# Generate pairs
pd_edges = gen_pd_edges(n_pairs)
time_stamp = int(time.time()*1000)
# Run SPLICE
start_time = time.clock()
_, splice_cost = splice_alg(pd_edges)
splice_runtime = time.clock() - start_time
splice_datum = {'alg': splice_alg.__name__,
'timestamp': time_stamp,
'n_pairs': n_pairs,
'cost': splice_cost,
'p_hat': float('inf'),
'alg_runtime': splice_runtime,
'pqt_runtime': None,
'rep': rep}
# Add datum
map(add_datum_kv, splice_datum.iteritems())
for p_hat in p_hat_li:
# Generate PQT
start_time = time.clock()
pqt = PQTDecomposition().from_points(pd_edges.keys(),
p_hat=p_hat)
pqt_runtime = time.clock() - start_time
# Run ASPLICE
start_time = time.clock()
_, asplice_cost = asplice_alg(pd_edges, pqt=pqt)
asplice_runtime = time.clock() - start_time
# Add datum
asplice_datum = {'alg': asplice_alg.__name__,
'timestamp': time_stamp,
'n_pairs': n_pairs,
'cost': asplice_cost,
'p_hat': p_hat,
'alg_runtime': asplice_runtime,
'pqt_runtime': pqt_runtime,
'rep': rep}
map(add_datum_kv, asplice_datum.iteritems())
# Run PLG
start_time = time.clock()
_, plg_cost = plg_alg(pd_edges, pqt=pqt)
plg_runtime = time.clock() - start_time
# Add datum
plg_datum = {'alg': plg_alg.__name__,
'timestamp': time_stamp,
'n_pairs': n_pairs,
'cost': plg_cost,
'p_hat': p_hat,
'alg_runtime': plg_runtime,
'pqt_runtime': pqt_runtime,
'rep': rep}
map(add_datum_kv, plg_datum.iteritems())
return DataFrame(data)
"""
Explanation: Define experiment
Here we run PLG, SPLICE and ASPLICE over pairs of points generated function. This creates a DataFrame with entries containing the results. The timestamp corresponds to creation of the pairs, so it may be used to compare results on the same dataset instance.
End of explanation
"""
def gen_pd_edges(gen_p_fn, gen_d_fn, n_pairs=50):
# Generates random pd pair from distributions
# Must be hashable, so gen_p_fn cannot returns a list or np.array
return {gen_p_fn(): gen_d_fn() \
for i in xrange(n_pairs)}
def gen_pt_gmm(means, covs, mix_weights):
while True:
# Which Gaussian to sample from
ridx = np.random.choice(len(mix_weights), p=mix_weights)
# Sample point
pt = np.random.multivariate_normal(means[ridx], covs[ridx])
# Only accept point if it lies in the unit square
if 0.<=pt[0]<=1. and 0.<=pt[1]<=1.:
return tuple(pt)
else:
continue
# Define GMMs
gmm1_params = { 'means': [[0.5, 0.5],
[0.8,0.1]],
'covs': [[[0.01, 0.], [0., 0.01]],
[[0.015, 0.], [0., 0.02]]],
'mix_weights': [0.6, 0.4]
}
gmm2_params = { 'means': [[0.51, 0.55],
[0.78, 0.10]],
'covs': [[[0.01, 0.00], [0.00, 0.01]],
[[0.015, 0.00], [0.00, 0.02]]],
'mix_weights': [0.6, 0.4]
}
gmm3_params = { 'means': [[0.21, 0.30],
[0.78, 0.84],
[0.78, 0.10]],
'covs': [[[0.01, 0.00], [0.00, 0.01]],
[[0.015, 0.00], [0.00, 0.02]],
[[0.015, 0.00], [0.00, 0.02]]],
'mix_weights': [0.5, 0.3, 0.2]
}
# Setup gen_pd functions
def gen_pt_unif():
return (random.random(), random.random())
def gen_pd_unif(n_pairs):
return gen_pd_edges(gen_pt_unif, gen_pt_unif, n_pairs)
def gen_pt_gmm1():
return gen_pt_gmm(**gmm1_params)
def gen_pt_gmm2():
return gen_pt_gmm(**gmm2_params)
def gen_pt_gmm3():
return gen_pt_gmm(**gmm3_params)
def gen_pd_gmm_close(n_pairs):
return gen_pd_edges(gen_pt_gmm1, gen_pt_gmm2, n_pairs)
def gen_pd_gmm_far(n_pairs):
return gen_pd_edges(gen_pt_gmm1, gen_pt_gmm3, n_pairs)
"""
Explanation: Functions to generate pairs
End of explanation
"""
# Setup parameters
experiment1 = {
'n_pairs_li': list(xrange(10, 100, 3)) \
+ list(xrange(100, 150, 5)) \
+ list(xrange(150, 200, 10)),
'p_hat_li': [0.01, 0.1],
'n_reps': 3,
'gen_pd_edges': gen_pd_unif,
'save_path': "results/comparison/uniform/",
'name': "uni",
'verbose': True
}
experiment2 = {
'n_pairs_li': list(xrange(10, 100, 3)) \
+ list(xrange(100, 150, 5)) \
+ list(xrange(150, 200, 10)),
'p_hat_li': [0.01, 0.1],
'n_reps': 3,
'gen_pd_edges': gen_pd_gmm_close,
'save_path': "results/comparison/gmm_close/",
'name': "close",
'verbose': True
}
experiment3 = {
'n_pairs_li': list(xrange(10, 100, 3)) \
+ list(xrange(100, 150, 5)) \
+ list(xrange(150, 200, 10)),
'p_hat_li': [0.01, 0.1],
'n_reps': 3,
'gen_pd_edges': gen_pd_gmm_far,
'save_path': "results/comparison/gmm_far/",
'name': "far",
'verbose': True
}
# Choose the experiment
experiment = experiment1
"""
Explanation: Setup and Run Experiment
End of explanation
"""
save_path = "{}scatter_{}.pdf" \
.format(experiment['save_path'],
experiment['name'])
fig, ax = plt.subplots()
pds = experiment['gen_pd_edges'](1000)
ax.scatter(*zip(*pds.keys()), color='r', edgecolors='none', s=2)
ax.scatter(*zip(*pds.values()), color='b', edgecolors='none', s=2)
ax.set_xlim([0.,1.])
ax.set_ylim([0.,1.])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig(save_path, bbox_inches='tight')
# Run the experiment
df = run_experiments(**experiment)
# Show the last 6 rows of results
df.tail(6)
"""
Explanation: Show sample scatter plot
End of explanation
"""
save_path = "{}avg_cost_{}.pdf" \
.format(experiment['save_path'],
experiment['name'])
with plt.style.context('seaborn-white'):
mean_df = df.groupby(['alg', 'p_hat', 'n_pairs']) \
[['cost','alg_runtime','pqt_runtime']] \
.mean() \
.add_prefix('mean_') \
.reset_index()
fig, ax = plt.subplots()
#ax.set_yscale('log')
group_plts = mean_df.groupby(['alg', 'p_hat'])
for i,(name, group) in enumerate(group_plts):
alg,p_hat = name
if alg == splice_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_cost'],
label=r"SPLICE")#,
#color=cmap(i / float(len(group_plts))))
elif alg == asplice_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_cost'],
label=r"ASPLICE $\hat{{p}}={:.3}$".format(p_hat))#,
#color=cmap(i / float(len(group_plts))))
elif alg == plg_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_cost'],
label=r"PLG $\hat{{p}}={:.3}$".format(p_hat))#,
#color=cmap(i / float(len(group_plts))))
ax.set_xlabel("Number of pd Pairs", fontsize=15)
ax.set_ylabel("Cost", fontsize=15)
ax.set_title('Algorithm Cost vs Number of Pairs')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
plt.savefig(save_path, bbox_inches='tight')
"""
Explanation: Comparing Average Cost vs Number of Pairs
End of explanation
"""
save_path = "{}avg_runtime_{}.pdf" \
.format(experiment['save_path'],
experiment['name'])
with plt.style.context('seaborn-white'):
fig, ax = plt.subplots()
ax.set_yscale('log')
group_plts = mean_df.groupby(['alg', 'p_hat'])
cmap = mpl.cm.autumn
for i,(name, group) in enumerate(group_plts):
alg,p_hat = name
if alg == splice_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_alg_runtime'],
label=r"SPLICE")
#color=cmap(i / float(len(group_plts))))
elif alg == asplice_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_alg_runtime'],
label=r"ASPLICE $\hat{{p}}={:.3}$".format(p_hat),
linestyle="-")
#color=cmap(i / float(len(group_plts))))
elif alg == plg_alg.__name__:
plt.plot(group['n_pairs'],
group['mean_alg_runtime'],
label=r"PLG $\hat{{p}}={:.3}$".format(p_hat),
linestyle="-")
#color=cmap(i / float(len(group_plts))))
ax.set_xlabel("Number of pd Pairs", fontsize=15)
ax.set_ylabel("Time (s)", fontsize=15)
ax.set_title('Algorithm Runtime vs Number of Pairs')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
plt.savefig(save_path, bbox_inches='tight')
"""
Explanation: Comparing Average Runtimes
We compare the average runtime for different parameters.
End of explanation
"""
grouped = df.groupby(['alg','p_hat'])
# Extract splice costs
splice_costs = df[df['alg'] == splice_alg.__name__].cost
splice_costs[:5]
save_path = "{}avg_ratios_{}.pdf" \
.format(experiment['save_path'],
experiment['name'])
with plt.style.context('seaborn-white'):
fig, ax = plt.subplots()
for i,(key, group) in enumerate(grouped):
alg, p_hat = key
# Compute ratio of alg cost to splice_cost
cost_ratios = group.cost.values / splice_costs.values
# Compute the mean of ratios for each rep
mean_cost_ratios = np.mean(cost_ratios.reshape(-1, experiment['n_reps']), axis=1)
if alg == splice_alg.__name__:
#Skip plotting SPLICE
continue
plt.plot(experiment['n_pairs_li'],
mean_cost_ratios,
label=r"SPLICE",
color=cmap(i / float(len(grouped))))
elif alg == asplice_alg.__name__:
plt.plot(experiment['n_pairs_li'],
mean_cost_ratios,
label=r"ASPLICE $\hat{{p}}={:.3}$".format(p_hat),
linestyle="-")
#,color=cmap(i / float(len(grouped))))
elif alg == plg_alg.__name__:
plt.plot(experiment['n_pairs_li'],
mean_cost_ratios,
label=r"PLG $\hat{{p}}={:.3}$".format(p_hat),
linestyle="-")
#,color=cmap(i / float(len(grouped))))
ax.set_xlabel("Number of pd Pairs", fontsize=15)
ax.set_ylabel("Mean Ratio to SPLICE", fontsize=15)
ax.set_title('Mean Cost Ratio vs Number of Pairs')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fancybox=True, shadow=True)
ax.grid(True)
plt.savefig(save_path, bbox_inches='tight')
"""
Explanation: Compute ratio of Costs
Next we find the ratio of each algorithms cost to splice. We then calculate the average over each number of pairs. In other words we are approximating E[ ALG cost / SPLICE cost ].
End of explanation
"""
|
buntyke/TRo2017
|
Experiments/Exp6/experiment1.ipynb
|
mit
|
# import the modules
import sys
import GPy
import csv
import numpy as np
import cPickle as pickle
import scipy.stats as stats
import sklearn.metrics as metrics
from matplotlib import pyplot as plt
%matplotlib notebook
"""
Explanation: Experiment 6: TRo Journal
In this experiment, the generalization of cloth models to unseen postures of the mannequin is verified. The evaluation is performed using RMSE, NRMSE, Pearson correlation as the parameters. In this notebook, the test inference is performed for pre-trained MRD cloth models.
End of explanation
"""
# function to compute reconstruction error
def reconstructionError(model, valData, testData, mKey, kKey, optimizeFlag=False):
nSamplesVal = valData[mKey].shape[0]
nSamplesTest = testData[mKey].shape[0]
nDimIn = valData[kKey].shape[1]
nDimOut = valData[mKey].shape[1]
qDim = model.X.mean.shape[1]
# computing reconstruction error for test1, test2 with variances
predictVal = np.zeros((nSamplesVal,nDimOut))
predictTest = np.zeros((nSamplesTest,nDimOut))
for n in range(nSamplesVal):
yIn = valData[kKey][n,:]
yTrueOut = valData[mKey][n,:]
[xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=False)
yOut = model.predict(xPredict.mean, Yindex=1)
sys.stdout.write('.')
predictVal[n,:] = yOut[0]
sys.stdout.write('\n')
for n in range(nSamplesTest):
yIn = testData[kKey][n,:]
yTrueOut = testData[mKey][n,:]
[xPredict, infX] = model.Y0.infer_newX(yIn[None,:], optimize=optimizeFlag)
yOut = model.predict(xPredict.mean, Yindex=1)
sys.stdout.write('.')
predictTest[n,:] = yOut[0]
sys.stdout.write('\n')
results = {}
valResults = {}
testResults = {}
valResults['pred'] = predictVal
testResults['pred'] = predictTest
valErrors = np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values'))
testErrors = np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values'))
valNormErrors = np.divide(np.sqrt(metrics.mean_squared_error(valData[mKey],predictVal,multioutput='raw_values')),
valData[mKey].max(axis=0) - valData[mKey].min(axis=0))
testNormErrors = np.divide(np.sqrt(metrics.mean_squared_error(testData[mKey],predictTest,multioutput='raw_values')),
testData[mKey].max(axis=0) - testData[mKey].min(axis=0))
valCorr = np.zeros((1,nDimOut))
testCorr = np.zeros((1,nDimOut))
for d in range(dims[1]):
valCorr[0,d],_ = stats.pearsonr(valData[mKey][:,d],predictVal[:,d])
testCorr[0,d],_ = stats.pearsonr(testData[mKey][:,d],predictTest[:,d])
valResults['rmse'] = valErrors
testResults['rmse'] = testErrors
valResults['nrmse'] = valNormErrors
testResults['nrmse'] = testNormErrors
valResults['corr'] = valCorr
testResults['corr'] = testCorr
results['train'] = valResults
results['test'] = testResults
return results
"""
Explanation: Plotting and Analysis Functions
End of explanation
"""
nShr = 4
nPos = 6
names = []
dims = [1,7500,8]
keys = ['Time','Cloud','TopCoord']
for nS in range(nShr):
for nP in range(nPos):
names.append('K1S%dP%dT1' % (nS+1,nP+1))
# create directory for results
dName = '../Results/Exp6'
if not os.path.exists(dName):
os.makedirs(dName)
# load dataset
Data = pickle.load(open('../Data/Data.p','rb'))
# loop over the kinect keys
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'
keys = [kinectKey,mocapKey]
expName = '%s%s' % (kinectExt,mocapExt)
for sInd in range(nShr):
for pInd in range(nPos):
valData = {}
testData = {}
testInd = sInd*nPostures+pInd
valInd = sInd*nPostures+(pInd+1)%nPostures
print 'Cycle:%d,%d' % (sInd+1,pInd+1)
print names[valInd],names[testInd]
for key in keys:
valData[key] = Data[names[valInd]][key]
testData[key] = Data[names[testInd]][key]
# load the trained MRD model
mrdModel = pickle.load(open('../Models/Model%d%d.p' % (sInd+1,pInd+1),'rb'))
# apply inference to test and val data
results = reconstructionError(mrdModel,valData,testData,mocapKey,kinectKey,optimizeFlag=True)
# save results to file
pickle.dump(results,open('../Results/Exp6/MRDRes%d%d.p' % (sInd+1,pInd+1),'wb'))
"""
Explanation: Data Loading
End of explanation
"""
|
jpilgram/phys202-2015-work
|
assignments/assignment10/ODEsEx03.ipynb
|
mit
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
"""
Explanation: Ordinary Differential Equations Exercise 3
Imports
End of explanation
"""
g = 9.81 # m/s^2
l = 0.5 # length of pendulum, in meters
tmax = 50. # seconds
t = np.linspace(0, tmax, int(100*tmax))
"""
Explanation: Damped, driven nonlinear pendulum
The equations of motion for a simple pendulum of mass $m$, length $l$ are:
$$
\frac{d^2\theta}{dt^2} = \frac{-g}{\ell}\sin\theta
$$
When a damping and periodic driving force are added the resulting system has much richer and interesting dynamics:
$$
\frac{d^2\theta}{dt^2} = \frac{-g}{\ell}\sin\theta - a \omega - b \sin(\omega_0 t)
$$
In this equation:
$a$ governs the strength of the damping.
$b$ governs the strength of the driving force.
$\omega_0$ is the angular frequency of the driving force.
When $a=0$ and $b=0$, the energy/mass is conserved:
$$E/m =g\ell(1-\cos(\theta)) + \frac{1}{2}\ell^2\omega^2$$
Basic setup
Here are the basic parameters we are going to use for this exercise:
End of explanation
"""
#I worked with James A and Hunter T.
def derivs(y, t, a, b, omega0):
"""Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
"""
# YOUR CODE HERE
#raise NotImplementedError()
theta = y[0]
omega = y[1]
dtheta =omega
dw = -(g/l)*np.sin(theta)-a*omega-b*np.sin(omega0*t)
return [dtheta, dw]
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
"""Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
"""
# YOUR CODE HERE
#raise NotImplementedError()
if y.ndim==1:
theta = y[0]
omega = y[1]
if y.ndim==2:
theta = y[:,0]
omega = y[:,1]
E = g*l*(1-np.cos(theta))+0.5*l**2*omega**2
return (E)
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
"""
Explanation: Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
End of explanation
"""
# YOUR CODE HERE
#raise NotImplementedError()
y0 = [np.pi,0]
solution = odeint(derivs, y0, t, args = (0,0,0), atol = 1e-5, rtol = 1e-4)
# YOUR CODE HERE
#raise NotImplementedError()
plt.plot(t,energy(solution), label="$Energy/mass$")
plt.title('Simple Pendulum Engery')
plt.xlabel('time')
plt.ylabel('$Engery/Mass$')
plt.ylim(9.2,10.2);
# YOUR CODE HERE
#raise NotImplementedError()
theta= solution[:,0]
omega = solution[:,1]
plt.plot(t ,theta, label = "$\Theta (t)$")
plt.plot(t, omega, label = "$\omega (t)$")
plt.ylim(-0.5,5)
plt.legend()
plt.title('Simple Pendulum $\Theta (t)$ and $\omega (t)$')
plt.xlabel('Time');
assert True # leave this to grade the two plots and their tuning of atol, rtol.
"""
Explanation: Simple pendulum
Use the above functions to integrate the simple pendulum for the case where it starts at rest pointing vertically upwards. In this case, it should remain at rest with constant energy.
Integrate the equations of motion.
Plot $E/m$ versus time.
Plot $\theta(t)$ and $\omega(t)$ versus time.
Tune the atol and rtol arguments of odeint until $E/m$, $\theta(t)$ and $\omega(t)$ are constant.
Anytime you have a differential equation with a a conserved quantity, it is critical to make sure the numerical solutions conserve that quantity as well. This also gives you an opportunity to find other bugs in your code. The default error tolerances (atol and rtol) used by odeint are not sufficiently small for this problem. Start by trying atol=1e-3, rtol=1e-2 and then decrease each by an order of magnitude until your solutions are stable.
End of explanation
"""
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
"""Integrate the damped, driven pendulum and make a phase plot of the solution."""
# YOUR CODE HERE
#raise NotImplementedError()
y0 =[-np.pi+0.1,0]
solution = odeint(derivs, y0, t, args = (a,b,omega0), atol = 1e-5, rtol = 1e-4)
theta=solution[:,0]
omega=solution[:,1]
plt.plot(theta, omega, color="k")
plt.title('Damped and Driven Pendulum Motion')
plt.xlabel('$\Theta (t)$')
plt.ylabel('$\omega (t)$')
plt.xlim(-2*np.pi, 2*np.pi)
plt.ylim(-10,10);
"""
Explanation: Damped pendulum
Write a plot_pendulum function that integrates the damped, driven pendulum differential equation for a particular set of parameters $[a,b,\omega_0]$.
Use the initial conditions $\theta(0)=-\pi + 0.1$ and $\omega=0$.
Decrease your atol and rtol even futher and make sure your solutions have converged.
Make a parametric plot of $[\theta(t),\omega(t)]$ versus time.
Use the plot limits $\theta \in [-2 \pi,2 \pi]$ and $\theta \in [-10,10]$
Label your axes and customize your plot to make it beautiful and effective.
End of explanation
"""
plot_pendulum(0.5, 0.0, 0.0)
"""
Explanation: Here is an example of the output of your plot_pendulum function that should show a decaying spiral.
End of explanation
"""
# YOUR CODE HERE
#raise NotImplementedError()
interact(plot_pendulum, a=(0.0,1.0,0.1), b=(0.0,10.0,0.1), omega0 = (0.0,10.0,0.1));
"""
Explanation: Use interact to explore the plot_pendulum function with:
a: a float slider over the interval $[0.0,1.0]$ with steps of $0.1$.
b: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
omega0: a float slider over the interval $[0.0,10.0]$ with steps of $0.1$.
End of explanation
"""
|
tensorflow/docs
|
site/en/guide/distributed_training.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
"""
Explanation: Distributed training with TensorFlow
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/distributed_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/distributed_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Overview
tf.distribute.Strategy is a TensorFlow API to distribute training across multiple GPUs, multiple machines, or TPUs. Using this API, you can distribute your existing models and training code with minimal code changes.
tf.distribute.Strategy has been designed with these key goals in mind:
Easy to use and support multiple user segments, including researchers, machine learning engineers, etc.
Provide good performance out of the box.
Easy switching between strategies.
You can distribute training using tf.distribute.Strategy with a high-level API like Keras Model.fit, as well as custom training loops (and, in general, any computation using TensorFlow).
In TensorFlow 2.x, you can execute your programs eagerly, or in a graph using tf.function. tf.distribute.Strategy intends to support both these modes of execution, but works best with tf.function. Eager mode is only recommended for debugging purposes and not supported for tf.distribute.TPUStrategy. Although training is the focus of this guide, this API can also be used for distributing evaluation and prediction on different platforms.
You can use tf.distribute.Strategy with very few changes to your code, because the underlying components of TensorFlow have been changed to become strategy-aware. This includes variables, layers, models, optimizers, metrics, summaries, and checkpoints.
In this guide, you will learn about various types of strategies and how you can use them in different situations. To learn how to debug performance issues, check out the Optimize TensorFlow GPU performance guide.
Note: For a deeper understanding of the concepts, watch the deep-dive presentation—Inside TensorFlow: tf.distribute.Strategy. This is especially recommended if you plan to write your own training loop.
Set up TensorFlow
End of explanation
"""
mirrored_strategy = tf.distribute.MirroredStrategy()
"""
Explanation: Types of strategies
tf.distribute.Strategy intends to cover a number of use cases along different axes. Some of these combinations are currently supported and others will be added in the future. Some of these axes are:
Synchronous vs asynchronous training: These are two common ways of distributing training with data parallelism. In sync training, all workers train over different slices of input data in sync, and aggregating gradients at each step. In async training, all workers are independently training over the input data and updating variables asynchronously. Typically sync training is supported via all-reduce and async through parameter server architecture.
Hardware platform: You may want to scale your training onto multiple GPUs on one machine, or multiple machines in a network (with 0 or more GPUs each), or on Cloud TPUs.
In order to support these use cases, TensorFlow has MirroredStrategy, TPUStrategy, MultiWorkerMirroredStrategy, ParameterServerStrategy, CentralStorageStrategy, as well as other strategies available. The next section explains which of these are supported in which scenarios in TensorFlow. Here is a quick overview:
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | CentralStorageStrategy | ParameterServerStrategy |
| :----------------------- | :----------------- | :------------ | :---------------------------- | :----------------------- | :------------------------ |
| Keras Model.fit | Supported | Supported | Supported | Experimental support | Experimental support |
| Custom training loop | Supported | Supported | Supported | Experimental support | Experimental support |
| Estimator API | Limited Support | Not supported | Limited Support | Limited Support | Limited Support |
Note: Experimental support means the APIs are not covered by any compatibilities guarantees.
Warning: Estimator support is limited. Basic training and evaluation are experimental, and advanced features—such as scaffold—are not implemented. You should be using Keras or custom training loops if a use case is not covered. Estimators are not recommended for new code. Estimators run v1.Session-style code which is more difficult to write correctly, and can behave unexpectedly, especially when combined with TF 2 code. Estimators do fall under our compatibility guarantees, but will receive no fixes other than security vulnerabilities. Go to the migration guide for details.
MirroredStrategy
tf.distribute.MirroredStrategy supports synchronous distributed training on multiple GPUs on one machine. It creates one replica per GPU device. Each variable in the model is mirrored across all the replicas. Together, these variables form a single conceptual variable called MirroredVariable. These variables are kept in sync with each other by applying identical updates.
Efficient all-reduce algorithms are used to communicate the variable updates across the devices. All-reduce aggregates tensors across all the devices by adding them up, and makes them available on each device. It’s a fused algorithm that is very efficient and can reduce the overhead of synchronization significantly. There are many all-reduce algorithms and implementations available, depending on the type of communication available between devices. By default, it uses the NVIDIA Collective Communication Library (NCCL) as the all-reduce implementation. You can choose from a few other options or write your own.
Here is the simplest way of creating MirroredStrategy:
End of explanation
"""
mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1"])
"""
Explanation: This will create a MirroredStrategy instance, which will use all the GPUs that are visible to TensorFlow, and NCCL—as the cross-device communication.
If you wish to use only some of the GPUs on your machine, you can do so like this:
End of explanation
"""
mirrored_strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
"""
Explanation: If you wish to override the cross device communication, you can do so using the cross_device_ops argument by supplying an instance of tf.distribute.CrossDeviceOps. Currently, tf.distribute.HierarchicalCopyAllReduce and tf.distribute.ReductionToOneDevice are two options other than tf.distribute.NcclAllReduce, which is the default.
End of explanation
"""
strategy = tf.distribute.MultiWorkerMirroredStrategy()
"""
Explanation: TPUStrategy
tf.distribute.TPUStrategy lets you run your TensorFlow training on Tensor Processing Units (TPUs). TPUs are Google's specialized ASICs designed to dramatically accelerate machine learning workloads. They are available on Google Colab, the TPU Research Cloud, and Cloud TPU.
In terms of distributed training architecture, TPUStrategy is the same MirroredStrategy—it implements synchronous distributed training. TPUs provide their own implementation of efficient all-reduce and other collective operations across multiple TPU cores, which are used in TPUStrategy.
Here is how you would instantiate TPUStrategy:
Note: To run any TPU code in Colab, you should select TPU as the Colab runtime. Refer to the Use TPUs guide for a complete example.
python
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=tpu_address)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.TPUStrategy(cluster_resolver)
The TPUClusterResolver instance helps locate the TPUs. In Colab, you don't need to specify any arguments to it.
If you want to use this for Cloud TPUs:
You must specify the name of your TPU resource in the tpu argument.
You must initialize the TPU system explicitly at the start of the program. This is required before TPUs can be used for computation. Initializing the TPU system also wipes out the TPU memory, so it's important to complete this step first in order to avoid losing state.
MultiWorkerMirroredStrategy
tf.distribute.MultiWorkerMirroredStrategy is very similar to MirroredStrategy. It implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to tf.distribute.MirroredStrategy, it creates copies of all variables in the model on each device across all workers.
Here is the simplest way of creating MultiWorkerMirroredStrategy:
End of explanation
"""
communication_options = tf.distribute.experimental.CommunicationOptions(
implementation=tf.distribute.experimental.CommunicationImplementation.NCCL)
strategy = tf.distribute.MultiWorkerMirroredStrategy(
communication_options=communication_options)
"""
Explanation: MultiWorkerMirroredStrategy has two implementations for cross-device communications. CommunicationImplementation.RING is RPC-based and supports both CPUs and GPUs. CommunicationImplementation.NCCL uses NCCL and provides state-of-art performance on GPUs but it doesn't support CPUs. CollectiveCommunication.AUTO defers the choice to Tensorflow. You can specify them in the following way:
End of explanation
"""
central_storage_strategy = tf.distribute.experimental.CentralStorageStrategy()
"""
Explanation: One of the key differences to get multi worker training going, as compared to multi-GPU training, is the multi-worker setup. The 'TF_CONFIG' environment variable is the standard way in TensorFlow to specify the cluster configuration to each worker that is part of the cluster. Learn more in the setting up TF_CONFIG section of this document.
For more details about MultiWorkerMirroredStrategy, consider the following tutorials:
Multi-worker training with Keras Model.fit
Multi-worker training with a custom training loop
ParameterServerStrategy
Parameter server training is a common data-parallel method to scale up model training on multiple machines. A parameter server training cluster consists of workers and parameter servers. Variables are created on parameter servers and they are read and updated by workers in each step. Check out the Parameter server training tutorial for details.
In TensorFlow 2, parameter server training uses a central coordinator-based architecture via the tf.distribute.experimental.coordinator.ClusterCoordinator class.
In this implementation, the worker and parameter server tasks run tf.distribute.Servers that listen for tasks from the coordinator. The coordinator creates resources, dispatches training tasks, writes checkpoints, and deals with task failures.
In the programming running on the coordinator, you will use a ParameterServerStrategy object to define a training step and use a ClusterCoordinator to dispatch training steps to remote workers. Here is the simplest way to create them:
python
strategy = tf.distribute.experimental.ParameterServerStrategy(
tf.distribute.cluster_resolver.TFConfigClusterResolver(),
variable_partitioner=variable_partitioner)
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy)
To learn more about ParameterServerStrategy, check out the Parameter server training with Keras Model.fit and a custom training loop tutorial.
Note: You will need to configure the 'TF_CONFIG' environment variable if you use TFConfigClusterResolver. It is similar to 'TF_CONFIG' in MultiWorkerMirroredStrategy but has additional caveats.
In TensorFlow 1, ParameterServerStrategy is available only with an Estimator via tf.compat.v1.distribute.experimental.ParameterServerStrategy symbol.
Note: This strategy is experimental as it is currently under active development.
CentralStorageStrategy
tf.distribute.experimental.CentralStorageStrategy does synchronous training as well. Variables are not mirrored, instead they are placed on the CPU and operations are replicated across all local GPUs. If there is only one GPU, all variables and operations will be placed on that GPU.
Create an instance of CentralStorageStrategy by:
End of explanation
"""
default_strategy = tf.distribute.get_strategy()
"""
Explanation: This will create a CentralStorageStrategy instance which will use all visible GPUs and CPU. Update to variables on replicas will be aggregated before being applied to variables.
Note: This strategy is experimental, as it is currently a work in progress.
Other strategies
In addition to the above strategies, there are two other strategies which might be useful for prototyping and debugging when using tf.distribute APIs.
Default Strategy
The Default Strategy is a distribution strategy which is present when no explicit distribution strategy is in scope. It implements the tf.distribute.Strategy interface but is a pass-through and provides no actual distribution. For instance, Strategy.run(fn) will simply call fn. Code written using this strategy should behave exactly as code written without any strategy. You can think of it as a "no-op" strategy.
The Default Strategy is a singleton—and one cannot create more instances of it. It can be obtained using tf.distribute.get_strategy outside any explicit strategy's scope (the same API that can be used to get the current strategy inside an explicit strategy's scope).
End of explanation
"""
# In optimizer or other library code
# Get currently active strategy
strategy = tf.distribute.get_strategy()
strategy.reduce("SUM", 1., axis=None) # reduce some values
"""
Explanation: This strategy serves two main purposes:
It allows writing distribution-aware library code unconditionally. For example, in tf.optimizers you can use tf.distribute.get_strategy and use that strategy for reducing gradients—it will always return a strategy object on which you can call the Strategy.reduce API.
End of explanation
"""
if tf.config.list_physical_devices('GPU'):
strategy = tf.distribute.MirroredStrategy()
else: # Use the Default Strategy
strategy = tf.distribute.get_strategy()
with strategy.scope():
# Do something interesting
print(tf.Variable(1.))
"""
Explanation: Similar to library code, it can be used to write end users' programs to work with and without distribution strategy, without requiring conditional logic. Here's a sample code snippet illustrating this:
End of explanation
"""
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
model.compile(loss='mse', optimizer='sgd')
"""
Explanation: OneDeviceStrategy
tf.distribute.OneDeviceStrategy is a strategy to place all variables and computation on a single specified device.
python
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
This strategy is distinct from the Default Strategy in a number of ways. In the Default Strategy, the variable placement logic remains unchanged when compared to running TensorFlow without any distribution strategy. But when using OneDeviceStrategy, all variables created in its scope are explicitly placed on the specified device. Moreover, any functions called via OneDeviceStrategy.run will also be placed on the specified device.
Input distributed through this strategy will be prefetched to the specified device. In the Default Strategy, there is no input distribution.
Similar to the Default Strategy, this strategy could also be used to test your code before switching to other strategies which actually distribute to multiple devices/machines. This will exercise the distribution strategy machinery somewhat more than the Default Strategy, but not to the full extent of using, for example, MirroredStrategy or TPUStrategy. If you want code that behaves as if there is no strategy, then use the Default Strategy.
So far you've learned about different strategies and how you can instantiate them. The next few sections show the different ways in which you can use them to distribute your training.
Use tf.distribute.Strategy with Keras Model.fit
tf.distribute.Strategy is integrated into tf.keras, which is TensorFlow's implementation of the Keras API specification. tf.keras is a high-level API to build and train models. By integrating into the tf.keras backend, it's seamless for you to distribute your training written in the Keras training framework using Model.fit.
Here's what you need to change in your code:
Create an instance of the appropriate tf.distribute.Strategy.
Move the creation of Keras model, optimizer and metrics inside strategy.scope. Thus the code in the model's call(), train_step(), and test_step() methods will all be distributed and executed on the accelerator(s).
TensorFlow distribution strategies support all types of Keras models—Sequential, Functional, and subclassed.
Here is a snippet of code to do this for a very simple Keras model with one Dense layer:
End of explanation
"""
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(10)
model.fit(dataset, epochs=2)
model.evaluate(dataset)
"""
Explanation: This example uses MirroredStrategy, so you can run this on a machine with multiple GPUs. strategy.scope() indicates to Keras which strategy to use to distribute the training. Creating models/optimizers/metrics inside this scope allows you to create distributed variables instead of regular variables. Once this is set up, you can fit your model like you would normally. MirroredStrategy takes care of replicating the model's training on the available GPUs, aggregating gradients, and more.
End of explanation
"""
import numpy as np
inputs, targets = np.ones((100, 1)), np.ones((100, 1))
model.fit(inputs, targets, epochs=2, batch_size=10)
"""
Explanation: Here a tf.data.Dataset provides the training and eval input. You can also use NumPy arrays:
End of explanation
"""
mirrored_strategy.num_replicas_in_sync
# Compute a global batch size using a number of replicas.
BATCH_SIZE_PER_REPLICA = 5
global_batch_size = (BATCH_SIZE_PER_REPLICA *
mirrored_strategy.num_replicas_in_sync)
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100)
dataset = dataset.batch(global_batch_size)
LEARNING_RATES_BY_BATCH_SIZE = {5: 0.1, 10: 0.15, 20:0.175}
learning_rate = LEARNING_RATES_BY_BATCH_SIZE[global_batch_size]
"""
Explanation: In both cases—with Dataset or NumPy—each batch of the given input is divided equally among the multiple replicas. For instance, if you are using the MirroredStrategy with 2 GPUs, each batch of size 10 will be divided among the 2 GPUs, with each receiving 5 input examples in each step. Each epoch will then train faster as you add more GPUs. Typically, you would want to increase your batch size as you add more accelerators, so as to make effective use of the extra computing power. You will also need to re-tune your learning rate, depending on the model. You can use strategy.num_replicas_in_sync to get the number of replicas.
End of explanation
"""
with mirrored_strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
optimizer = tf.keras.optimizers.SGD()
"""
Explanation: What's supported now?
| Training API | MirroredStrategy | TPUStrategy | MultiWorkerMirroredStrategy | ParameterServerStrategy | CentralStorageStrategy |
| ----------------- | ------------------ | ------------- | ----------------------------- | ------------------------- | ------------------------ |
| Keras Model.fit | Supported | Supported | Supported | Experimental support | Experimental support |
Examples and tutorials
Here is a list of tutorials and examples that illustrate the above integration end-to-end with Keras Model.fit:
Tutorial: Training with Model.fit and MirroredStrategy.
Tutorial: Training with Model.fit and MultiWorkerMirroredStrategy.
Guide: Contains an example of using Model.fit and TPUStrategy.
Tutorial: Parameter server training with Model.fit and ParameterServerStrategy.
Tutorial: Fine-tuning BERT for many tasks from the GLUE benchmark with Model.fit and TPUStrategy.
TensorFlow Model Garden repository containing collections of state-of-the-art models implemented using various strategies.
Use tf.distribute.Strategy with custom training loops
As demonstrated above, using tf.distribute.Strategy with Keras Model.fit requires changing only a couple lines of your code. With a little more effort, you can also use tf.distribute.Strategy with custom training loops.
If you need more flexibility and control over your training loops than is possible with Estimator or Keras, you can write custom training loops. For instance, when using a GAN, you may want to take a different number of generator or discriminator steps each round. Similarly, the high level frameworks are not very suitable for Reinforcement Learning training.
The tf.distribute.Strategy classes provide a core set of methods to support custom training loops. Using these may require minor restructuring of the code initially, but once that is done, you should be able to switch between GPUs, TPUs, and multiple machines simply by changing the strategy instance.
Below is a brief snippet illustrating this use case for a simple training example using the same Keras model as before.
First, create the model and optimizer inside the strategy's scope. This ensures that any variables created with the model and optimizer are mirrored variables.
End of explanation
"""
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch(
global_batch_size)
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
"""
Explanation: Next, create the input dataset and call tf.distribute.Strategy.experimental_distribute_dataset to distribute the dataset based on the strategy.
End of explanation
"""
loss_object = tf.keras.losses.BinaryCrossentropy(
from_logits=True,
reduction=tf.keras.losses.Reduction.NONE)
def compute_loss(labels, predictions):
per_example_loss = loss_object(labels, predictions)
return tf.nn.compute_average_loss(per_example_loss, global_batch_size=global_batch_size)
def train_step(inputs):
features, labels = inputs
with tf.GradientTape() as tape:
predictions = model(features, training=True)
loss = compute_loss(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
@tf.function
def distributed_train_step(dist_inputs):
per_replica_losses = mirrored_strategy.run(train_step, args=(dist_inputs,))
return mirrored_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,
axis=None)
"""
Explanation: Then, define one step of the training. Use tf.GradientTape to compute gradients and optimizer to apply those gradients to update your model's variables. To distribute this training step, put it in a function train_step and pass it to tf.distribute.Strategy.run along with the dataset inputs you got from the dist_dataset created before:
End of explanation
"""
for dist_inputs in dist_dataset:
print(distributed_train_step(dist_inputs))
"""
Explanation: A few other things to note in the code above:
You used tf.nn.compute_average_loss to compute the loss. tf.nn.compute_average_loss sums the per example loss and divides the sum by the global_batch_size. This is important because later after the gradients are calculated on each replica, they are aggregated across the replicas by summing them.
You also used the tf.distribute.Strategy.reduce API to aggregate the results returned by tf.distribute.Strategy.run. tf.distribute.Strategy.run returns results from each local replica in the strategy, and there are multiple ways to consume this result. You can reduce them to get an aggregated value. You can also do tf.distribute.Strategy.experimental_local_results to get the list of values contained in the result, one per local replica.
When you call apply_gradients within a distribution strategy scope, its behavior is modified. Specifically, before applying gradients on each parallel instance during synchronous training, it performs a sum-over-all-replicas of the gradients.
Finally, once you have defined the training step, you can iterate over dist_dataset and run the training in a loop:
End of explanation
"""
iterator = iter(dist_dataset)
for _ in range(10):
print(distributed_train_step(next(iterator)))
"""
Explanation: In the example above, you iterated over the dist_dataset to provide input to your training. You are also provided with the tf.distribute.Strategy.make_experimental_numpy_dataset to support NumPy inputs. You can use this API to create a dataset before calling tf.distribute.Strategy.experimental_distribute_dataset.
Another way of iterating over your data is to explicitly use iterators. You may want to do this when you want to run for a given number of steps as opposed to iterating over the entire dataset. The above iteration would now be modified to first create an iterator and then explicitly call next on it to get the input data.
End of explanation
"""
|
muneebalam/scrapenhl2
|
examples/Shot Rates After Faceoffs.ipynb
|
mit
|
team = team_info.team_as_id('WSH')
season = 2017
pbp = teams.get_team_pbp(season, team)
toi = teams.get_team_toi(season, team)
"""
Explanation: The purpose of this script is to generate shot counts for skaters after faceoffs.
For example, CA after 5 and 10 seconds for Nicklas Backstrom after defensive-zone faceoff wins.
End of explanation
"""
# Filter to 5v5
toi = manip.filter_for_five_on_five(toi) \
[['Game', 'Team1', 'Team2', 'Team3', 'Team4', 'Team5', 'Time']] \
.drop_duplicates() # sometimes these appear
toi = toi.melt(id_vars=['Time', 'Game'], value_name='PlayerID') \
.drop('variable', axis=1)
toi.head()
"""
Explanation: This is how we'll approach the problem:
Filter TOI to 5v5 and go wide to long on players
Isolate faceoffs from the PBP
Get faceoff zones
Filter for 5v5
Get faceoff zones
Join faceoffs to TOI and code how many seconds after a draw that sec is
Join faceoffs, have an indicator of last faceoff
Fill forward on last faceoff time
Calculate difference between time and last faceoff time; filter for time <= 15
Isolate shots from the PBP
Join to TOI
Filter for 5v5
Join to first dataframe on game, player, and time
Group by player, time since draw, draw zone and type, and which team took the shot, and sum
Repeat for each team (not done here)
5v5 TOI
End of explanation
"""
# Get faceoffs
draws = manip.filter_for_event_types(pbp, 'Faceoff')
# Select only needed columns
draws = draws[['Game', 'Team', 'Period', 'MinSec', 'X', 'Y']]
# Convert period and mm:ss to time elapsed in game
draws = onice.add_times_to_file(draws, periodcol='Period', timecol='MinSec', time_format='elapsed')
draws = draws.drop({'Period', 'MinSec'}, axis=1).assign(Season=season)
# Get zones
directions = manip.get_directions_for_xy_for_season(season, team)
draws = manip.infer_zones_for_faceoffs(draws, directions, 'X', 'Y', '_Secs', focus_team='WSH')
draws = draws.drop({'X', 'Y'}, axis=1)
# Simplify zone notation to just N, O, D
draws.loc[:, 'Zone'] = draws.EventLoc.str.slice(0, 1)
draws = draws.drop('EventLoc', axis=1)
# Combine with team column to change N, D, O, to NW, NL, DW, DL, OW, OL
draws.loc[:, 'WL'] = draws.Team.apply(lambda x: 'W' if x == team else 'L')
draws.loc[:, 'ZS'] = draws.Zone + draws.WL
draws = draws.drop({'WL', 'Zone', 'Team'}, axis=1)
draws.head()
# Join to TOI
draws_joined = draws.rename(columns={'_Secs': 'Time'}) \
.merge(toi, how='right', on=['Game', 'Time'])
# Add last faceoff indicator and fill forward
draws_joined = draws_joined.sort_values(['Game', 'PlayerID', 'Time'])
draws_joined.loc[pd.notnull(draws_joined.ZS), 'LastDraw'] = draws_joined.Time
draws_joined.loc[:, 'LastDraw'] = draws_joined[['Game', 'PlayerID', 'LastDraw']] \
.groupby(['Game', 'PlayerID']).ffill()
draws_joined.loc[:, 'ZS'] = draws_joined[['Game', 'PlayerID', 'ZS']] \
.groupby(['Game', 'PlayerID']).ffill()
draws_joined.loc[:, 'TimeSinceLastDraw'] = draws_joined.Time - draws_joined.LastDraw
draws_joined = draws_joined[pd.notnull(draws_joined.TimeSinceLastDraw)] \
.query("TimeSinceLastDraw > 0")
draws_joined.head()
"""
Explanation: Link times to time since last draw and faceoff zones
End of explanation
"""
# Get shot attempts
cfca = manip.filter_for_corsi(pbp)
# Select only needed columns
cfca = cfca[['Game', 'Team', 'Period', 'MinSec']]
# Convert period and mm:ss to time elapsed in game
cfca = onice.add_times_to_file(cfca, periodcol='Period', timecol='MinSec', time_format='elapsed')
cfca = cfca.drop({'Period', 'MinSec'}, axis=1).rename(columns={'_Secs': 'Time'})
# Add on-ice players
cfca = cfca.merge(toi, how='left', on=['Game', 'Time'])
# Change Team to CF or CA
cfca.loc[:, 'Team'] = cfca.Team.apply(lambda x: 'CF' if x == team else 'CA')
cfca.head()
# Join to faceoffs df
joined = draws_joined.merge(cfca, how='left', on=['Game', 'Time', 'PlayerID'])
# Get counts of time after each draw
time_counts = joined[['ZS', 'PlayerID', 'TimeSinceLastDraw']] \
.assign(TOI=1) \
.groupby(['ZS', 'PlayerID', 'TimeSinceLastDraw'], as_index=False) \
.count()
# Get counts of shots
shot_counts = joined[['ZS', 'PlayerID', 'TimeSinceLastDraw', 'Team']] \
.dropna() \
.assign(Count=1) \
.groupby(['ZS', 'PlayerID', 'TimeSinceLastDraw', 'Team'], as_index=False) \
.count() \
.pivot_table(index=['ZS', 'PlayerID', 'TimeSinceLastDraw'], columns='Team', values='Count') \
.reset_index()
shot_counts.loc[:, 'CA'] = shot_counts.CA.fillna(0)
shot_counts.loc[:, 'CF'] = shot_counts.CF.fillna(0)
alljoined = time_counts \
.merge(shot_counts, how='left', on=['ZS', 'PlayerID', 'TimeSinceLastDraw']) \
.fillna(0)
alljoined.head()
"""
Explanation: Shot counts
End of explanation
"""
alljoined.loc[:, 'Player'] = players.playerlst_as_str(alljoined.PlayerID)
alljoined = alljoined.drop('PlayerID', axis=1)
alljoined.loc[:, 'CF60'] = alljoined.CF * 3600 / alljoined.TOI
alljoined.loc[:, 'CA60'] = alljoined.CA * 3600 / alljoined.TOI
alljoined.to_csv('time_since_last_draw_data.csv', index=False)
alljoined.head()
comp_players = ('Nicklas Backstrom', 'Evgeny Kuznetsov', 'Lars Eller', 'Jay Beagle')
def plot_cumulative_shot_lines(df, zone, metric, ax, *comp_players):
for p in comp_players:
df1 = df.query('ZS == "{0:s}" & Player == "{1:s}"'.format(zone, p)).sort_values('TimeSinceLastDraw')
df1.loc[:, 'TOI'] = df1.TOI.cumsum()
df1.loc[:, metric] = df1[metric].cumsum()
df1.loc[:, '{0:s}60'.format(metric)] = df1[metric] * 3600 / df1.TOI
ax.plot(df1.TimeSinceLastDraw, df1['{0:s}60'.format(metric)], label=p)
ax.set_title('Cumulative {0:s}60 after {1:s}'.format(metric, zone))
def plot_comparison(df, metric, *comp_players):
fig, axes = subplots(2, 3, sharex=True, sharey=True, figsize=[12, 8])
axes = axes.flatten()
df2 = df.query('TimeSinceLastDraw <= 15')
for i, zone in enumerate(['OW', 'OL', 'NW', 'NL', 'DW', 'DL']):
if metric == 'N':
plot_cumulative_ns(df2, zone, axes[i], *comp_players)
else:
plot_cumulative_shot_lines(df2, zone, metric, axes[i], *comp_players)
legend(loc=1)
plot_comparison(alljoined, 'CF', *comp_players)
plot_comparison(alljoined, 'CA', *comp_players)
def plot_cumulative_ns(df, zone, ax, *comp_players):
for p in comp_players:
df1 = df.query('ZS == "{0:s}" & Player == "{1:s}"'.format(zone, p)).sort_values('TimeSinceLastDraw')
ax.plot(df1.TimeSinceLastDraw, df1.TOI, label=p)
ax.set_title('Post-faceoff time in {0:s}'.format(zone))
plot_comparison(alljoined, 'N', *comp_players)
"""
Explanation: Finally, we can replace Player IDs with names, export, and graph.
End of explanation
"""
# Find shift starts
shifts = toi.sort_values(['Game', 'PlayerID', 'Time'])
# If shift yields diff time in same game and player, then it's a shift start
shifts.loc[:, 'PrevT'] = shifts.Time.shift(1)
shifts.loc[:, 'PrevP'] = shifts.PlayerID.shift(1)
shifts.loc[:, 'PrevG'] = shifts.Game.shift(1)
shifts.loc[(shifts.PlayerID == shifts.PrevP) & (shifts.Game == shifts.PrevG) & (shifts.Time != shifts.PrevT + 1),
'ShiftIndex'] = 1
shifts.loc[:, 'ShiftIndex'] = shifts.ShiftIndex.fillna(0)
shifts.loc[:, 'ShiftIndex'] = shifts.ShiftIndex.cumsum()
shifts = shifts.drop({'PrevT', 'PrevP', 'PrevG'}, axis=1)
shifts.head()
# Calculate amount of time at each point in shift
starttimes = shifts[['Time', 'ShiftIndex', 'PlayerID']] \
.groupby(['ShiftIndex', 'PlayerID'], as_index=False).min() \
.rename(columns={'Time': 'StartTime'})
shifts2 = shifts.merge(starttimes, how='left', on=['ShiftIndex', 'PlayerID'])
shifts2.loc[:, 'TimeSinceLastDraw'] = shifts2.Time - shifts2.StartTime + 1
counts = shifts2[['TimeSinceLastDraw', 'PlayerID']] \
.assign(TOI=1) \
.groupby(['TimeSinceLastDraw', 'PlayerID'], as_index=False) \
.count() \
.assign(ZS='Other')
counts.loc[:, 'Player'] = players.playerlst_as_str(counts.PlayerID)
counts = counts.drop('PlayerID', axis=1)
counts.head()
# Add to original
alljoined2 = pd.concat([alljoined[['Player', 'ZS', 'TimeSinceLastDraw', 'TOI']], counts])
alljoined2 = alljoined2.sort_values(['Player', 'ZS', 'TimeSinceLastDraw'])
# Convert to percentages
totals = alljoined2.drop('ZS', axis=1) \
.groupby(['Player', 'TimeSinceLastDraw'], as_index=False) \
.sum() \
.rename(columns={'TOI': 'TotalTOI'})
alljoined3 = alljoined2.merge(totals, how='left', on=['Player', 'TimeSinceLastDraw'])
alljoined3.loc[:, 'TOI'] = alljoined3.TOI / alljoined3.TotalTOI
alljoined3.head()
def plot_stacked_area(df, p, ax, limit=20):
zones = ['OW', 'OL', 'NW', 'NL', 'DW', 'DL'][::-1] # for DZ at bottom
# Set colors
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
colors = [mplc.to_rgba(color_cycle[0], alpha=0.5), color_cycle[0],
mplc.to_rgba(color_cycle[1], alpha=0.5), color_cycle[1],
mplc.to_rgba(color_cycle[2], alpha=0.5), color_cycle[2]]
struct = pd.DataFrame({'TimeSinceLastDraw': range(1, limit+1, 1)}).assign(Player=p)
struct = struct.merge(df[['Player', 'TimeSinceLastDraw', 'ZS', 'TOI']],
how='left', on=['Player', 'TimeSinceLastDraw'])
struct = struct.pivot_table(index=['Player', 'TimeSinceLastDraw'], columns='ZS', values='TOI').reset_index()
struct = struct.fillna(method='ffill')
ax.stackplot(struct.TimeSinceLastDraw, [struct[zone] for zone in zones], labels=zones, colors=colors)
ax.set_title('TOI into shift by 5v5 shift\nstart for {0:s}'.format(p))
ax.set_xlabel('Seconds into shift')
legend(loc=2)
def plot_tois(df, *comp_players):
fig, axes = subplots(1, 4, sharex=True, sharey=True, figsize=[12, 4])
axes = axes.flatten()
for i, p in enumerate(comp_players):
plot_stacked_area(df, p, axes[i])
legend(loc=2, bbox_to_anchor=(1, 1))
axes[0].set_ylabel('% of 5v5 shifts')
plot_tois(alljoined3, *comp_players)
"""
Explanation: It also might be interesting to take a look at what fraction of a player's 5v5 TOI is accounted for by the time after faceoffs.
End of explanation
"""
|
zhouqifanbdh/liupengyuan.github.io
|
chapter1/homework/localization/3-22/201611680049(3).ipynb
|
mit
|
name = input('请输入你的姓名')
print('你好',name)
print('请输入出生的月份与日期')
month = int(input('月份:'))
date = int(input('日期:'))
if month == 4:
if date < 20:
print(name, '你是白羊座')
else:
print(name,'你是非常有性格的金牛座')
if month == 5:
if date < 21:
print(name, '你是非常有性格的金牛座')
else:
print(name,'你是双子座')
if month == 6:
if date < 22:
print(name, '你是双子座')
else:
print(name,'你是巨蟹座')
if month == 7:
if date < 23:
print(name, '你是巨蟹座')
else:
print(name,'你是狮子座')
if month == 8:
if date < 23:
print(name, '你是狮子座')
else:
print(name,'你是处女座')
if month == 9:
if date < 24:
print(name, '你是处女座')
else:
print(name,'你是天秤座')
if month == 10:
if date < 24:
print(name, '你是天秤座')
else:
print(name,'你是天蝎座')
if month == 11:
if date < 23:
print(name, '你是天蝎座')
else:
print(name,'你是射手座')
if month == 12:
if date < 22:
print(name, '你是射手座')
else:
print(name,'你是摩羯座')
if month == 1:
if date < 20:
print(name, '你是摩羯座')
else:
print(name,'你是水瓶座')
if month == 2:
if date < 19:
print(name, '你是水瓶座')
else:
print(name,'你是双鱼座')
if month == 3:
if date < 22:
print(name, '你是双鱼座')
else:
print(name,'你是白羊座')
"""
Explanation: 练习 1:写程序,可由键盘读入用户姓名例如Mr. right,让用户输入出生的月份与日期,判断用户星座,假设用户是金牛座,则输出,Mr. right,你是非常有性格的金牛座!。
End of explanation
"""
m = int(input('请输入一个整数,回车结束'))
n = int(input('请输入一个整数,不为零'))
intend = input('请输入计算意图,如 + * %')
if m<n:
min_number = m
else:
min_number = n
total = min_number
if intend == '+':
if m<n:
while m<n:
m = m + 1
total = total + m
print(total)
else:
while m > n:
n = n + 1
total = total + n
print(total)
elif intend == '*':
if m<n:
while m<n:
m = m + 1
total = total * m
print(total)
else:
while m > n:
n = n + 1
total = total * n
print(total)
elif intend == '%':
print(m % n)
else:
print(m // n)
"""
Explanation: 练习 2:写程序,可由键盘读入两个整数m与n(n不等于0),询问用户意图,如果要求和则计算从m到n的和输出,如果要乘积则计算从m到n的积并输出,如果要求余数则计算m除以n的余数的值并输出,否则则计算m整除n的值并输出。
End of explanation
"""
number = int(input('现在北京的PM2.5指数是多少?请输入整数'))
if number > 500:
print('应该打开空气净化器,戴防雾霾口罩')
elif 300 < number < 500:
print('尽量呆在室内不出门,出门佩戴防雾霾口罩')
elif 200 < number < 300:
print('尽量不要进行户外活动')
elif 100 < number < 200:
print('轻度污染,可进行户外活动,可不佩戴口罩')
else:
print('无须特别注意')
"""
Explanation: 练习 3:写程序,能够根据北京雾霾PM2.5数值给出对应的防护建议。如当PM2.5数值大于500,则应该打开空气净化器,戴防雾霾口罩等。
End of explanation
"""
print('空行是我')
print('空行是我')
print('空行是我')
print( )
print('我是空行')
"""
Explanation: 尝试性练习:写程序,能够在屏幕上显示空行。
End of explanation
"""
word = input('请输入一个单词,回车结束')
if word.endswith('s') or word.endswith('sh') or word.endswith('ch') or word.endswith('x'):
print(word,'es',sep = '')
elif word.endswith('y'):
if word.endswith('ay') or word.endswith('ey') or word.endswith('iy') or word.endswith('oy') or word.endswith('uy'):
print(word,'s',sep = '')
else:
word = word[:-1]
print(word,'ies',sep = '')
elif word.endswith('f'):
word = word[:-1]
print(word,'ves',sep = '')
elif word.endswith('fe'):
word = word[:-2]
print(word,'ves',sep = '')
elif word.endswith('o'):
print('词尾加s或者es')
else:
print(word,'s',sep = '')
"""
Explanation: 练习 4:英文单词单数转复数,要求输入一个英文动词(单数形式),能够得到其复数形式,或给出单数转复数形式的建议
End of explanation
"""
|
googledatalab/notebooks
|
tutorials/Stackdriver Monitoring/Time-shifted data.ipynb
|
apache-2.0
|
from datalab.stackdriver import monitoring as gcm
# set_datalab_project_id('my-project-id')
"""
Explanation: Time-shifted Data
In this tutorial, we show how to transform the time-series data in the following ways:
* split time-series with a lot of data points into mutiple segments, and
* time shift the above segments so that they have the same timestamps.
The above transformations allow you to easily compare the data over the last hour/day/week over the previous intervals. This helps you understand if your system's current behavior continues to match the past behavior.
Note: This tutorial reads in metric data from the Monitoring API, or a Google Cloud Storage bucket:
* If the variable 'common_prefix' is set, the data is read from the Monitoring API.
* If the variable 'common_prefix' is not set, the data is loaded from a shared Cloud Storage bucket. See here to learn more about the Storage API.
Load the monitoring module and set the default project
If there is no default project set already, you must do so using 'set_datalab_project_id'.
End of explanation
"""
import collections
# Initialize the query for CPU utilization over the last week, and read in its metadata.
query_cpu = gcm.Query('compute.googleapis.com/instance/cpu/utilization', hours=7*24)
cpu_metadata = query_cpu.metadata()
# Count the occurrences of each prefix, and display the top 5.
instance_prefix_counts = collections.Counter(
timeseries.metric.labels['instance_name'].rsplit('-', 1)[0]
for timeseries in cpu_metadata)
instance_prefix_counts.most_common(5)
"""
Explanation: Find the most common instance name prefixes
The prefix from an instance name is calculated by splitting on the last '-' character. All instances with the same prefixes are grouped together to get the prefix counts.
End of explanation
"""
# Set this variable to read data from your own project.
common_prefix = None # 'my-instance-prefix'
if common_prefix is None:
print('No prefix specified. The data will be read from a Cloud Storage bucket.')
else:
print('You selected the prefix: "%s"' % (common_prefix,))
"""
Explanation: Select the instance name prefix to filter on
In this cell, you can select an instance name prefix to filter on. If you do not set this variable, then the data is read from a Cloud Storage bucket.
You can look at the most frequent prefix in the previous cell. It is recommended that you select a prefix with the following properties:
* the instances have the CPU Utilization metric data for at least the last 5 days
* the instances span multiple zones
End of explanation
"""
import StringIO
import pandas
import datalab.storage as storage
if common_prefix is None:
print('Reading in data from a Cloud Storage Bucket')
# Initialize the bucket name, and item key.
bucket_name = 'cloud-datalab-samples'
per_zone_data = 'stackdriver-monitoring/timeseries/per-zone-weekly-20161010.csv'
# Load the CSV from the bucket, and intialize the dataframe using it.
per_zone_data_item = storage.Bucket(bucket_name).item(per_zone_data)
per_zone_data_string = StringIO.StringIO(per_zone_data_item.read_from())
per_zone_cpu_data = pandas.DataFrame.from_csv(per_zone_data_string)
else:
print('Reading in data from the Monitoring API')
# Filter the query to instances with the specified prefix.
query_cpu = query_cpu.select_metrics(instance_name_prefix=common_prefix)
# Aggregate to hourly intervals per zone.
query_cpu = query_cpu.align(gcm.Aligner.ALIGN_MEAN, hours=1)
query_cpu = query_cpu.reduce(gcm.Reducer.REDUCE_MEAN, 'resource.zone')
# Get the time series data as a dataframe, with a single-level header.
per_zone_cpu_data = query_cpu.as_dataframe(label='zone')
per_zone_cpu_data.tail(5)
"""
Explanation: Load the time series data
Based on the value of 'common_prefix' in the previous cell, the time series data is loaded from the Monitoring API, or a shared Cloud Storage bucket.
In both cases, we load the time series of the CPU Utilization metric over the last week, aggregated to hourly intervals per zone.
End of explanation
"""
import collections
# Extract the number of days in the dataframe.
num_days = len(per_zone_cpu_data.index)/24
# Split the big dataframe into daily dataframes.
daily_dataframes = [per_zone_cpu_data.iloc[24*i: 24*(i+1)]
for i in xrange(num_days)]
# Reverse the list to have today's data in the first index.
daily_dataframes.reverse()
# Display the last five rows from today's data.
daily_dataframes[0].tail(5)
"""
Explanation: Split the data into daily chunks
Here, we split the data over daily boundaries.
End of explanation
"""
TODAY = 'Today'
# Helper function to make a readable day name based on offset from today.
def make_day_name(offset):
if offset == 0:
return TODAY
elif offset == 1:
return 'Yesterday'
return '%d days ago' % (offset,)
"""
Explanation: Initialize a helper function
Here, we initialize a helper function to create human readable names for days.
End of explanation
"""
# Extract the zone names.
all_zones = per_zone_cpu_data.columns.tolist()
# Use the last day's timestamps as the index, and initialize a dataframe per zone.
last_day_index = daily_dataframes[0].index
zone_to_shifted_df = {zone: pandas.DataFrame([], index=last_day_index)
for zone in all_zones}
for i, dataframe in enumerate(daily_dataframes):
# Shift the dataframe to line up with the start of the last day.
dataframe = dataframe.tshift(freq=last_day_index[0] - dataframe.index[0])
current_day_name = make_day_name(i)
# Insert each daily dataframe as a column into the dataframe.
for zone in all_zones:
zone_to_shifted_df[zone][current_day_name] = dataframe[zone]
# Display the first five rows from the first zone.
zone_to_shifted_df[all_zones[0]].head(5)
"""
Explanation: Time-shift all dataframes to line up with the last day
The pandas method tshift lets you shift a dataframe by a specified offset. We use this to shift the index of all days to match the timestamps in the latest day.
The data for each zone is inserted in a differenct dataframe, where the rows are timestamps and columns are specific days.
End of explanation
"""
for zone, dataframe in zone_to_shifted_df.iteritems():
dataframe.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1))
"""
Explanation: Compare the CPU utilization day-over-day
End of explanation
"""
for zone, dataframe in zone_to_shifted_df.iteritems():
# Initialize the dataframe by extracting the column with data for today.
compare_to_avg_df = dataframe.loc[:, [TODAY]]
# Add a column with the weekly avg.
compare_to_avg_df['Weekly avg.'] = dataframe.mean(axis=1)
# Plot this dataframe.
compare_to_avg_df.plot(title=zone).legend(loc="upper left", bbox_to_anchor=(1,1))
"""
Explanation: Compare today's CPU Utilization to the weekly average
In order to compare the metric data for today, with the average of the week, we create new dataframes with the following columns:
* Today's data: From the original data for TODAY
* Average over the week: From the mean across all the days
End of explanation
"""
|
nproctor/phys202-2015-work
|
assignments/assignment05/InteractEx01.ipynb
|
mit
|
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.html import widgets
from IPython.display import display
"""
Explanation: Interact Exercise 01
Import
End of explanation
"""
def print_sum(a, b):
"""Print the sum of the arguments a and b."""
print(a+b)
"""
Explanation: Interact basics
Write a print_sum function that prints the sum of its arguments a and b.
End of explanation
"""
interact(print_sum, a=(-10.,10.,.1),
b=widgets.IntSlider(min=-8,max=8,step=2));
assert True # leave this for grading the print_sum exercise
"""
Explanation: Use the interact function to interact with the print_sum function.
a should be a floating point slider over the interval [-10., 10.] with step sizes of 0.1
b should be an integer slider the interval [-8, 8] with step sizes of 2.
End of explanation
"""
def print_string(s, length=False):
"""Print the string s and optionally its length."""
print(s)
if length==True:
print(len(s))
"""
Explanation: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
End of explanation
"""
interact(print_string, s="Hello World!", length=True);
assert True # leave this for grading the print_string exercise
"""
Explanation: Use the interact function to interact with the print_string function.
s should be a textbox with the initial value "Hello World!".
length should be a checkbox with an initial value of True.
End of explanation
"""
|
zaqwes8811/micro-apps
|
self_driving/deps/Kalman_and_Bayesian_Filters_in_Python_master/Appendix-G-Designing-Nonlinear-Kalman-Filters.ipynb
|
mit
|
from __future__ import division, print_function
%matplotlib inline
#format the book
import book_format
book_format.set_style()
"""
Explanation: Table of Contents
Designing Nonlinear Kalman Filters
End of explanation
"""
import matplotlib.pyplot as plt
circle1=plt.Circle((-4, 0), 5, color='#004080',
fill=False, linewidth=20, alpha=.7)
circle2=plt.Circle((4, 0), 5, color='#E24A33',
fill=False, linewidth=5, alpha=.7)
fig = plt.gcf()
ax = fig.gca()
plt.axis('equal')
plt.xlim((-10, 10))
plt.ylim((-10, 10))
plt.plot ([-4, 0], [0, 3], c='#004080')
plt.plot ([4, 0], [0, 3], c='#E24A33')
plt.text(-4, -.5, "A", fontsize=16, horizontalalignment='center')
plt.text(4, -.5, "B", fontsize=16, horizontalalignment='center')
ax.add_artist(circle1)
ax.add_artist(circle2)
plt.show()
"""
Explanation: Introduction
Author's note: I was initially planning to have a design nonlinear chapter that compares various approaches. This may or may not happen, but for now this chapter has no useful content and I suggest not reading it.
We see that the Kalman filter reasonably tracks the ball. However, as already explained, this is a silly example; we can predict trajectories in a vacuum with arbitrary precision; using a Kalman filter in this example is a needless complication.
Kalman Filter with Air Drag
I will dispense with the step 1, step 2, type approach and proceed in a more natural style that you would use in a non-toy engineering problem. We have already developed a Kalman filter that does excellently at tracking a ball in a vacuum, but that does not incorporate the effects of air drag into the model. We know that the process model is implemented with $\textbf{F}$, so we will turn our attention to that immediately.
Notionally, the computation that $\textbf{F}$ computes is
$$x' = Fx$$
With no air drag, we had
$$
\mathbf{F} = \begin{bmatrix}
1 & \Delta t & 0 & 0 & 0 \
0 & 1 & 0 & 0 & 0 \
0 & 0 & 1 & \Delta t & \frac{1}{2}{\Delta t}^2 \
0 & 0 & 0 & 1 & \Delta t \
0 & 0 & 0 & 0 & 1
\end{bmatrix}
$$
which corresponds to the equations
$$
\begin{aligned}
x &= x + v_x \Delta t \
v_x &= v_x \
\
y &= y + v_y \Delta t + \frac{a_y}{2} {\Delta t}^2 \
v_y &= v_y + a_y \Delta t \
a_y &= a_y
\end{aligned}
$$
From the section above we know that our new Euler equations must be
$$
\begin{aligned}
x &= x + v_x \Delta t \
v_x &= v_x \
\
y &= y + v_y \Delta t + \frac{a_y}{2} {\Delta t}^2 \
v_y &= v_y + a_y \Delta t \
a_y &= a_y
\end{aligned}
$$
Realistic 2D Position Sensors
The position sensor in the last example are not very realistic. In general there is no 'raw' sensor that provides (x,y) coordinates. We have GPS, but GPS already uses a Kalman filter to create a filtered output; we should not be able to improve the signal by passing it through another Kalman filter unless we incorporate additional sensors to provide additional information. We will tackle that problem later.
Consider the following set up. In an open field we put two transmitters at a known location, each transmitting a signal that we can detect. We process the signal and determine how far we are from that signal, with some noise. First, let's look at a visual depiction of that.
End of explanation
"""
import sympy
from sympy import init_printing
init_printing(use_latex='png')
phi, x = sympy.symbols('\phi, x')
phi
"""
Explanation: Here I have attempted to show transmitter A, drawn in red, at (-4,0) and a second one B, drawn in blue, at (4,0). The red and blue circles show the range from the transmitters to the robot, with the width illustrating the effect of the $1\sigma$ angular error for each transmitter. Here I have given the blue transmitter more error than the red one. The most probable position for the robot is where the two circles intersect, which I have depicted with the red and blue lines. You will object that we have two intersections, not one, but we will see how we deal with that when we design the measurement function.
This is a very common sensor set up. Aircraft still use this system to navigate, where it is called DME (Distance Measuring Equipment). Today GPS is a much more common navigation system, but I have worked on an aircraft where we integrated sensors like this into our filter along with the GPS, INS, altimeters, etc. We will tackle what is called multi-sensor fusion later; for now we will just address this simple configuration.
The first step is to design our state variables. We will assume that the robot is traveling in a straight direction with constant velocity. This is unlikely to be true for a long period of time, but is acceptable for short periods of time. This does not differ from the previous problem - we will want to track the values for the robot's position and velocity. Hence,
$$\mathbf{x} =
\begin{bmatrix}x\v_x\y\v_y\end{bmatrix}$$
The next step is to design the state transition function. This also will be the same as the previous problem, so without further ado,
$$
\mathbf{x}' = \begin{bmatrix}1& \Delta t& 0& 0\0& 1& 0& 0\0& 0& 1& \Delta t\ 0& 0& 0& 1\end{bmatrix}\mathbf{x}$$
The next step is to design the control inputs. We have none, so we set ${\mathbf{B}}=0$.
The next step is to design the measurement function $\mathbf{z} = \mathbf{Hx}$. We can model the measurement using the Pythagorean theorem.
$$
z_a = \sqrt{(x-x_A)^2 + (y-y_A)^2} + v_a\[1em]
z_b = \sqrt{(x-x_B])^2 + (y-y_B)^2} + v_b
$$
where $v_a$ and $v_b$ are white noise.
We see an immediate problem. The Kalman filter is designed for linear equations, and this is obviously nonlinear. In the next chapters we will look at several ways to handle nonlinear problems in a robust way, but for now we will do something simpler. If we know the approximate position of the robot than we can linearize these equations around that point. I could develop the generalized mathematics for this technique now, but instead let me just present the worked example to give context to that development.
Instead of computing $\mathbf{H}$ we will compute the partial derivative of $\mathbf{H}$ with respect to the robot's position $\mathbf{x}$. You are probably familiar with the concept of partial derivative, but if not, it just means how $\mathbf{H}$ changes with respect to the robot's position. It is computed as the partial derivative of $\mathbf{H}$ as follows:
$$\frac{\partial \mathbf{h}}{\partial \mathbf{x}} =
\begin{bmatrix}
\frac{\partial h_1}{\partial x_1} & \frac{\partial h_1}{\partial x_2} &\dots \
\frac{\partial h_2}{\partial x_1} & \frac{\partial h_2}{\partial x_2} &\dots \
\vdots & \vdots
\end{bmatrix}
$$
Let's work the first partial derivative. We want to find
$$\frac{\partial }{\partial x} \sqrt{(x-x_A)^2 + (y-y_A)^2}
$$
Which we compute as
$$
\begin{aligned}
\frac{\partial h_1}{\partial x} &= ((x-x_A)^2 + (y-y_A)^2))^\frac{1}{2} \
&= \frac{1}{2}\times 2(x-x_a)\times ((x-x_A)^2 + (y-y_A)^2))^{-\frac{1}{2}} \
&= \frac{x_r - x_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}}
\end{aligned}
$$
We continue this computation for the partial derivatives of the two distance equations with respect to $x$, $y$, $dx$ and $dy$, yielding
$$\frac{\partial\mathbf{h}}{\partial\mathbf{x}}=
\begin{bmatrix}
\frac{x_r - x_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}} & 0 &
\frac{y_r - y_A}{\sqrt{(x_r-x_A)^2 + (y_r-y_A)^2}} & 0 \
\frac{x_r - x_B}{\sqrt{(x_r-x_B)^2 + (y_r-y_B)^2}} & 0 &
\frac{y_r - y_B}{\sqrt{(x_r-x_B)^2 + (y_r-y_B)^2}} & 0 \
\end{bmatrix}
$$
That is pretty painful, and these are very simple equations. Computing the Jacobian can be extremely difficult or even impossible for more complicated systems. However, there is an easy way to get Python to do the work for you by using the SymPy module [1]. SymPy is a Python library for symbolic mathematics. The full scope of its abilities are beyond this book, but it can perform algebra, integrate and differentiate equations, find solutions to differential equations, and much more. We will use it to compute our Jacobian!
First, a simple example. We will import SymPy, initialize its pretty print functionality (which will print equations using LaTeX). We will then declare a symbol for NumPy to use.
End of explanation
"""
sympy.diff('sqrt(phi)')
"""
Explanation: Notice how we use a latex expression for the symbol phi. This is not necessary, but if you do it will render as LaTeX when output. Now let's do some math. What is the derivative of $\sqrt{\phi}$?
End of explanation
"""
sympy.factor('phi**3 -phi**2 + phi - 1')
"""
Explanation: We can factor equations.
End of explanation
"""
from sympy import symbols, Matrix
phi = symbols('\phi')
phi
x, y, xa, xb, ya, yb, dx, dy = symbols('x y x_a x_b y_a y_b dx dy')
H = Matrix([[sympy.sqrt((x-xa)**2 + (y-ya)**2)],
[sympy.sqrt((x-xb)**2 + (y-yb)**2)]])
state = Matrix([x, dx, y, dy])
H.jacobian(state)
"""
Explanation: SymPy has a remarkable list of features, and as much as I enjoy exercising its features we cannot cover them all here. Instead, let's compute our Jacobian.
End of explanation
"""
from math import sin, cos, atan2
def H_of(pos, pos_A, pos_B):
""" Given the position of our object at 'pos' in 2D, and two
transmitters A and B at positions 'pos_A' and 'pos_B', return
the partial derivative of H
"""
theta_a = atan2(pos_a[1] - pos[1], pos_a[0] - pos[0])
theta_b = atan2(pos_b[1] - pos[1], pos_b[0] - pos[0])
return np.array([[0, -cos(theta_a), 0, -sin(theta_a)],
[0, -cos(theta_b), 0, -sin(theta_b)]])
"""
Explanation: In a nutshell, the entry (0,0) contains the difference between the x coordinate of the robot and transmitter A's x coordinate divided by the distance between the robot and A. (2,0) contains the same, except for the y coordinates of the robot and transmitters. The bottom row contains the same computations, except for transmitter B. The 0 entries account for the velocity components of the state variables; naturally the range does not provide us with velocity.
The values in this matrix change as the robot's position changes, so this is no longer a constant; we will have to recompute it for every time step of the filter.
If you look at this you may realize that this is just a computation of x/dist and y/dist, so we can switch this to a trigonometic form with no loss of generality:
$$\frac{\partial\mathbf{h}}{\partial\mathbf{x}}=
\begin{bmatrix}
-\cos{\theta_A} & 0 & -\sin{\theta_A} & 0 \
-\cos{\theta_B} & 0 & -\sin{\theta_B} & 0
\end{bmatrix}
$$
However, this raises a huge problem. We are no longer computing $\mathbf{H}$, but $\Delta\mathbf{H}$, the change of $\mathbf{H}$. If we passed this into our Kalman filter without altering the rest of the design the output would be nonsense. Recall, for example, that we multiply $\mathbf{Hx}$ to generate the measurements that would result from the given estimate of $\mathbf{x}$ But now that $\mathbf{H}$ is linearized around our position it contains the change in the measurement function.
We are forced, therefore, to use the change in $\mathbf{x}$ for our state variables. So we have to go back and redesign our state variables.
Please note this is a completely normal occurrence in designing Kalman filters. The textbooks present examples like this as fait accompli, as if it is trivially obvious that the state variables needed to be velocities, not positions. Perhaps once you do enough of these problems it would be trivially obvious, but at that point why are you reading a textbook? I find myself reading through a presentation multiple times, trying to figure out why they made a choice, finally to realize that it is because of the consequences of something on the next page. My presentation is longer, but it reflects what actually happens when you design a filter. You make what seem reasonable design choices, and as you move forward you discover properties that require you to recast your earlier steps. As a result, I am going to somewhat abandon my step 1, step 2, etc., approach, since so many real problems are not quite that straightforward.
If our state variables contain the velocities of the robot and not the position then how do we track where the robot is? We can't. Kalman filters that are linearized in this fashion use what is called a nominal trajectory - i.e. you assume a position and track direction, and then apply the changes in velocity and acceleration to compute the changes in that trajectory. How could it be otherwise? Recall the graphic showing the intersection of the two range circles - there are two areas of intersection. Think of what this would look like if the two transmitters were very close to each other - the intersections would be two very long crescent shapes. This Kalman filter, as designed, has no way of knowing your true position from only distance measurements to the transmitters. Perhaps your mind is already leaping to ways of working around this problem. If so, stay engaged, as later sections and chapters will provide you with these techniques. Presenting the full solution all at once leads to more confusion than insight, in my opinion.
So let's redesign our state transition function. We are assuming constant velocity and no acceleration, giving state equations of
$$
\dot{x}' = \dot{x} \
\ddot{x}' = 0 \
\dot{y}' = \dot{y} \
\dot{y}' = 0$$
This gives us the the state transition function of
$$
\mathbf{F} = \begin{bmatrix}0 &1 & 0& 0\0& 0& 0& 0\0& 0& 0& 1\ 0& 0& 0& 0\end{bmatrix}$$
A final complication comes from the measurements that we pass in. $\mathbf{Hx}$ is now computing the change in the measurement from our nominal position, so the measurement that we pass in needs to be not the range to A and B, but the change in range from our measured range to our nominal position.
There is a lot here to take in, so let's work through the code bit by bit. First we will define a function to compute $\frac{\partial\mathbf{h}}{\partial\mathbf{x}}$ for each time step.
End of explanation
"""
from numpy.random import randn
class DMESensor(object):
def __init__(self, pos_a, pos_b, noise_factor=1.0):
self.A = pos_a
self.B = pos_b
self.noise_factor = noise_factor
def range_of(self, pos):
""" returns tuple containing noisy range data to A and B
given a position 'pos'
"""
ra = math.sqrt((self.A[0] - pos[0])**2 + (self.A[1] - pos[1])**2)
rb = math.sqrt((self.B[0] - pos[0])**2 + (self.B[1] - pos[1])**2)
return (ra + randn()*self.noise_factor,
rb + randn()*self.noise_factor)
"""
Explanation: Now we need to create our simulated sensor.
End of explanation
"""
import kf_book.book_plots as bp
from filterpy.kalman import KalmanFilter
import math
import numpy as np
pos_a = (100, -20)
pos_b = (-100, -20)
f1 = KalmanFilter(dim_x=4, dim_z=2)
f1.F = np.array ([[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]], dtype=float)
f1.R *= 1.
f1.Q *= .1
f1.x = np.array([[1, 0, 1, 0]], dtype=float).T
f1.P = np.eye(4) * 5.
# initialize storage and other variables for the run
count = 30
xs, ys = [], []
pxs, pys = [], []
# create the simulated sensor
d = DMESensor(pos_a, pos_b, noise_factor=3.)
# pos will contain our nominal position since the filter does not
# maintain position.
pos = [0, 0]
for i in range(count):
# move (1,1) each step, so just use i
pos = [i, i]
# compute the difference in range between the nominal track
# and measured ranges
ra,rb = d.range_of(pos)
rx,ry = d.range_of((pos[0] + f1.x[0, 0], pos[1] + f1.x[2, 0]))
z = np.array([[ra - rx], [rb - ry]])
# compute linearized H for this time step
f1.H = H_of (pos, pos_a, pos_b)
# store stuff so we can plot it later
xs.append(f1.x[0, 0]+i)
ys.append(f1.x[2, 0]+i)
pxs.append(pos[0])
pys.append(pos[1])
# perform the Kalman filter steps
f1.predict()
f1.update(z)
bp.plot_filter(xs, ys)
bp.plot_track(pxs, pys)
plt.legend(loc=2)
plt.show()
"""
Explanation: Finally, we are ready for the Kalman filter code. I will position the transmitters at x=-100 and 100, both with y=-20. This gives me enough space to get good triangulation from both as the robot moves. I will start the robot at (0,0) and move by (1,1) each time step.
End of explanation
"""
import kf_book.ekf_internal as ekf
ekf.plot_ball()
"""
Explanation: Linearizing the Kalman Filter
Now that we have seen an example of linearizing the Kalman filter we are in a position to better understand the math.
We start by assuming some function $\mathbf f$
Example: A falling Ball
author's note: ignore this section for now.
In the Designing Kalman Filters chapter I first considered tracking a ball in a vacuum, and then in the atmosphere. The Kalman filter performed very well for vacuum, but diverged from the ball's path in the atmosphere. Let us look at the output; to avoid littering this chapter with code from that chapter I have placed it all in the file `ekf_internal.py'.
End of explanation
"""
from sympy.abc import *
from sympy import *
init_printing(pretty_print=True, use_latex='mathjax')
x1 = (0.0034*g*exp(-x/22000)*((x)**2))/(2*b) - g
x2 = (a*g*exp(-x/c)*(Derivative(x)**2))/(2*b) - g
#pprint(x1)
#pprint(Derivative(x)*Derivative(x,n=2))
#pprint(diff(x2, x))
"""
Explanation: We can artificially force the Kalman filter to track the ball by making $Q$ large. That would cause the filter to mistrust its prediction, and scale the kalman gain $K$ to strongly favor the measurments. However, this is not a valid approach. If the Kalman filter is correctly predicting the process we should not 'lie' to the filter by telling it there are process errors that do not exist. We may get away with that for some problems, in some conditions, but in general the Kalman filter's performance will be substandard.
Recall from the Designing Kalman Filters chapter that the acceleration is
$$a_x = (0.0039 + \frac{0.0058}{1+\exp{[(v-35)/5]}})vv_x \
a_y = (0.0039 + \frac{0.0058}{1+\exp{[(v-35)/5]}})vv_y- g
$$
These equations will be very unpleasant to work with while we develop this subject, so for now I will retreat to a simpler one dimensional problem using this simplified equation for acceleration that does not take the nonlinearity of the drag coefficient into account:
$$\begin{aligned}
\ddot{y} &= \frac{0.0034ge^{-y/20000}\dot{y}^2}{2\beta} - g \
\ddot{x} &= \frac{0.0034ge^{-x/20000}\dot{x}^2}{2\beta}
\end{aligned}$$
Here $\beta$ is the ballistic coefficient, where a high number indicates a low drag.
This is still nonlinear, so we need to linearize this equation at the current state point. If our state is position and velocity, we need an equation for some arbitrarily small change in $\mathbf{x}$, like so:
$$ \begin{bmatrix}\Delta \dot{x} \ \Delta \ddot{x} \ \Delta \dot{y} \ \Delta \ddot{y}\end{bmatrix} =
\large\begin{bmatrix}
\frac{\partial \dot{x}}{\partial x} &
\frac{\partial \dot{x}}{\partial \dot{x}} &
\frac{\partial \dot{x}}{\partial y} &
\frac{\partial \dot{x}}{\partial \dot{y}} \
\frac{\partial \ddot{x}}{\partial x} &
\frac{\partial \ddot{x}}{\partial \dot{x}}&
\frac{\partial \ddot{x}}{\partial y}&
\frac{\partial \dot{x}}{\partial \dot{y}}\
\frac{\partial \dot{y}}{\partial x} &
\frac{\partial \dot{y}}{\partial \dot{x}} &
\frac{\partial \dot{y}}{\partial y} &
\frac{\partial \dot{y}}{\partial \dot{y}} \
\frac{\partial \ddot{y}}{\partial x} &
\frac{\partial \ddot{y}}{\partial \dot{x}}&
\frac{\partial \ddot{y}}{\partial y}&
\frac{\partial \dot{y}}{\partial \dot{y}}
\end{bmatrix}\normalsize
\begin{bmatrix}\Delta x \ \Delta \dot{x} \ \Delta \dot{y} \ \Delta \ddot{y}\end{bmatrix}$$
The equations do not contain both an x and a y, so any partial derivative with both in it must be equal to zero. We also know that $\large\frac{\partial \dot{x}}{\partial x}\normalsize = 0$ and that $\large\frac{\partial \dot{x}}{\partial \dot{x}}\normalsize = 1$, so our matrix ends up being
$$\mathbf{F} = \begin{bmatrix}0&1&0&0 \
\frac{0.0034e^{-x/22000}\dot{x}^2g}{44000\beta}&0&0&0
\end{bmatrix}$$
$$\begin{aligned}\ddot{x} &= -\frac{1}{2}C_d\rho A \dot{x}\
\ddot{y} &= -\frac{1}{2}C_d\rho A \dot{y}-g\end{aligned}$$
End of explanation
"""
|
taylort7147/udacity-projects
|
boston_housing/boston_housing.ipynb
|
mit
|
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import visuals as vs # Supplementary code
from sklearn.cross_validation import ShuffleSplit
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis = 1)
# Success
print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape)
"""
Explanation: Machine Learning Engineer Nanodegree
Model Evaluation & Validation
Project 1: Predicting Boston Housing Prices
Welcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Getting Started
In this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a good fit could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.
The dataset for this project originates from the UCI Machine Learning Repository. The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:
- 16 data points have an 'MEDV' value of 50.0. These data points likely contain missing or censored values and have been removed.
- 1 data point has an 'RM' value of 8.78. This data point can be considered an outlier and has been removed.
- The features 'RM', 'LSTAT', 'PTRATIO', and 'MEDV' are essential. The remaining non-relevant features have been excluded.
- The feature 'MEDV' has been multiplicatively scaled to account for 35 years of market inflation.
Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
End of explanation
"""
# TODO: Minimum price of the data
minimum_price = np.min(prices)
# TODO: Maximum price of the data
maximum_price = np.max(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print "Statistics for Boston housing dataset:\n"
print "Minimum price: ${:,.2f}".format(minimum_price)
print "Maximum price: ${:,.2f}".format(maximum_price)
print "Mean price: ${:,.2f}".format(mean_price)
print "Median price ${:,.2f}".format(median_price)
print "Standard deviation of prices: ${:,.2f}".format(std_price)
"""
Explanation: Data Exploration
In this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.
Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into features and the target variable. The features, 'RM', 'LSTAT', and 'PTRATIO', give us quantitative information about each data point. The target variable, 'MEDV', will be the variable we seek to predict. These are stored in features and prices, respectively.
Implementation: Calculate Statistics
For your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since numpy has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.
In the code cell below, you will need to implement the following:
- Calculate the minimum, maximum, mean, median, and standard deviation of 'MEDV', which is stored in prices.
- Store each calculation in their respective variable.
End of explanation
"""
# TODO: Import 'r2_score'
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
# TODO: Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
"""
Explanation: Question 1 - Feature Observation
As a reminder, we are using three features from the Boston housing dataset: 'RM', 'LSTAT', and 'PTRATIO'. For each data point (neighborhood):
- 'RM' is the average number of rooms among homes in the neighborhood.
- 'LSTAT' is the percentage of homeowners in the neighborhood considered "lower class" (working poor).
- 'PTRATIO' is the ratio of students to teachers in primary and secondary schools in the neighborhood.
Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an increase in the value of 'MEDV' or a decrease in the value of 'MEDV'? Justify your answer for each.
Hint: Would you expect a home that has an 'RM' value of 6 be worth more or less than a home that has an 'RM' value of 7?
Answer:
'RM': An increase in 'RM' would lead to an increase in the value of 'MEDV', since houses with more rooms are generally larger and require more land and material.
'LSTAT': An increase in 'LTSTAT' would lead to a decrease in the value of 'MEDV' because housing in an area considered more "lower-class" would tend to be affordable to members of the lower-class - meaning the prices would tend downward.
'PTRATIO': I would expect an increase in 'PTRATIO' would lead to a decrease in 'MEDV'. Think inversely, an increase in student-to-teacher ratio means a decrease in teacher-to-student ratio. One might consider an area with a lower teacher-to-student ratio to have lower academic standards. Areas with lower academic standards are less attractive to homebuyers who might be looking to start a family.
Developing a Model
In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.
Implementation: Define a Performance Metric
It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the coefficient of determination, R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions.
The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the target variable. A model with an R<sup>2</sup> of 0 always fails to predict the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the features. A model can be given a negative R<sup>2</sup> as well, which indicates that the model is no better than one that naively predicts the mean of the target variable.
For the performance_metric function in the code cell below, you will need to implement the following:
- Use r2_score from sklearn.metrics to perform a performance calculation between y_true and y_predict.
- Assign the performance score to the score variable.
End of explanation
"""
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)
"""
Explanation: Question 2 - Goodness of Fit
Assume that a dataset contains five data points and a model made the following predictions for the target variable:
| True Value | Prediction |
| :-------------: | :--------: |
| 3.0 | 2.5 |
| -0.5 | 0.0 |
| 2.0 | 2.1 |
| 7.0 | 7.8 |
| 4.2 | 5.3 |
Would you consider this model to have successfully captured the variation of the target variable? Why or why not?
Run the code cell below to use the performance_metric function and calculate this model's coefficient of determination.
End of explanation
"""
# TODO: Import 'train_test_split'
from sklearn.cross_validation import train_test_split
# TODO: Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split(features, prices, random_state=14, train_size=0.8)
# Success
print "Training and testing split was successful."
"""
Explanation: Answer:
It would depend on what the data represents, and how you measure success. The coeficient of determination is 0.923, meaning that only 7.7% of the variance is unexplained by the model. In most cases, I would think this would meet success criteria.
Implementation: Shuffle and Split Data
Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.
For the code cell below, you will need to implement the following:
- Use train_test_split from sklearn.cross_validation to shuffle and split the features and prices data into training and testing sets.
- Split the data into 80% training and 20% testing.
- Set the random_state for train_test_split to a value of your choice. This ensures results are consistent.
- Assign the train and testing splits to X_train, X_test, y_train, and y_test.
End of explanation
"""
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
"""
Explanation: Question 3 - Training and Testing
What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?
Hint: What could go wrong with not having a way to test your model?
Answer:
Having both training and testing subsets allows you to both train your model, and measure how well your model performs. If you didn't have a way to test your model, but rather used all of the data available to train it, then you wouldn't have a way to check if your model is over-fitting the data, and you wouldn't have a true estimate of its performance.
Analyzing Model Performance
In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing 'max_depth' parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.
Learning Curves
The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination.
Run the code cell below and use these graphs to answer the following question.
End of explanation
"""
vs.ModelComplexity(X_train, y_train)
"""
Explanation: Question 4 - Learning the Data
Choose one of the graphs above and state the maximum depth for the model. What happens to the score of the training curve as more training points are added? What about the testing curve? Would having more training points benefit the model?
Hint: Are the learning curves converging to particular scores?
Answer:
I have chosen the graph of the model with a maximum depth of 10. The training score decreases as more points are added. For the testing curve, it sharply increases until about 50 points, but as more points are added, the the magnitude of this increase is lower. Adding more training points would not benefit this model, as it appears to have learned all that it could by around 250 training points.
Complexity Curves
The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the learning curves, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the performance_metric function.
Run the code cell below and use this graph to answer the following two questions.
End of explanation
"""
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import make_scorer
from sklearn.grid_search import GridSearchCV
def fit_model(X, y):
""" Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y]. """
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
# TODO: Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': [1,2,3,4,5,6,7,8,9,10]}
# TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# TODO: Create the grid search object
grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
"""
Explanation: Question 5 - Bias-Variance Tradeoff
When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?
Hint: How do you know when a model is suffering from high bias or high variance?
Answer:
When the model is trained with a maximum depth of 1, the model suffers from high bias, since both the training and test scores are very low. This means that the model is not complex enough to learn general trends. With a maximum depth of 10, the model suffers from high variance. This is evident by the high accuracy of the training data, but the low accuracy of the test data. It appears that the model is too complex, over-fitting the data. The trends it finds are too specific. It is not able to generalize the trends, so when new information is presented, it is not able to predict the target.
Question 6 - Best-Guess Optimal Model
Which maximum depth do you think results in a model that best generalizes to unseen data? What intuition lead you to this answer?
Answer:
It looks like a maximum depth of 3 provides the best generalization of the unseen data. I say this because of three observations:
1. With a maximum depth of 3, both the training and test learning curves tightly converge on a high score with increased training points.
2. The test complexity curve begins to peak at around 3 or 4, and then starts to head downward again.
3. The uncertainty for a complexity of 3 is better than that of 4, which is why I chose 3 over 4. With less uncertainty, the R<sup>2</sup> is better, meaning the model fits the data better.
Evaluating Model Performance
In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from fit_model.
Question 7 - Grid Search
What is the grid search technique and how it can be applied to optimize a learning algorithm?
Answer:
The grid search technique involves sweeping over a defined set of parameters, each with a set of values to try. The algorithm trains the model with each combination of parameters and outputs the parameter set with the best cross-validation score.
Question 8 - Cross-Validation
What is the k-fold cross-validation training technique? What benefit does this technique provide for grid search when optimizing a model?
Hint: Much like the reasoning behind having a testing set, what could go wrong with using grid search without a cross-validated set?
Answer:
The K-fold cross-validation training technique involves splitting a data set into K bins. The algorithm trains and tests the data K times, using each bin as the test data exactly once, and the remaining K-1 bins as the training data. The algorithm then averages the results for each train/test set. For grid search, using K-fold cross-validation ensures that you train over every aspect of the data, so that your search is not sensative to any given parameter. By this, I mean that there may be some inherent trend in the training set that will favor a certain value for some parameter. This trend may not generalize to the entire data set. By using K-fold cross validation, you test every parameter on every piece of data, which minimizes this effect.
Implementation: Fitting a Model
Your final implementation requires that you bring everything together and train a model using the decision tree algorithm. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the 'max_depth' parameter for the decision tree. The 'max_depth' parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called supervised learning algorithms.
For the fit_model function in the code cell below, you will need to implement the following:
- Use DecisionTreeRegressor from sklearn.tree to create a decision tree regressor object.
- Assign this object to the 'regressor' variable.
- Create a dictionary for 'max_depth' with the values from 1 to 10, and assign this to the 'params' variable.
- Use make_scorer from sklearn.metrics to create a scoring function object.
- Pass the performance_metric function as a parameter to the object.
- Assign this scoring function to the 'scoring_fnc' variable.
- Use GridSearchCV from sklearn.grid_search to create a grid search object.
- Pass the variables 'regressor', 'params', 'scoring_fnc', and 'cv_sets' as parameters to the object.
- Assign the GridSearchCV object to the 'grid' variable.
End of explanation
"""
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])
"""
Explanation: Making Predictions
Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a decision tree regressor, the model has learned what the best questions to ask about the input data are, and can respond with a prediction for the target variable. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.
Question 9 - Optimal Model
What maximum depth does the optimal model have? How does this result compare to your guess in Question 6?
Run the code block below to fit the decision tree regressor to the training data and produce an optimal model.
End of explanation
"""
# Produce a matrix for client data
client_data = [[5, 17, 15], # Client 1
[4, 32, 22], # Client 2
[8, 3, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
"""
Explanation: Answer:
The optimal maximum depth for the decision tree model is 4. I considered this as an option in question 6, but ultimately went with 3. However, the learning curve was not derived for a maximum depth of 4, which might have swayed my decision. In the end, this value agrees with the complexity curve calculated for the maximum depth parameter.
Question 10 - Predicting Selling Prices
Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:
| Feature | Client 1 | Client 2 | Client 3 |
| :---: | :---: | :---: | :---: |
| Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |
| Neighborhood poverty level (as %) | 17% | 32% | 3% |
| Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |
What price would you recommend each client sell his/her home at? Do these prices seem reasonable given the values for the respective features?
Hint: Use the statistics you calculated in the Data Exploration section to help justify your response.
Run the code block below to have your optimized model make predictions for each client's home.
End of explanation
"""
vs.PredictTrials(features, prices, fit_model, client_data)
"""
Explanation: Answer:
Predicted selling price for Client 1's home: \$408,240.00
Predicted selling price for Client 2's home: \$231,238.64
Predicted selling price for Client 3's home: \$929,345.45
These prices do fall within the range given by the data set. Client 1 and 2's price fall within 2 standard deviations of the mean, but Client 3's price lies toward the outer edge of 3 standard deviations from the mean (i.e., is close to the top 1% of home prices in the data set). However, it can be seen that Client 3's house has "good" values for all three metrics that we are looking at. The trends for all clients agree with what was predicted to be the using 'LSTAT', 'RM', and 'PTRATIO'.
Sensitivity
An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. Run the code cell below to run the fit_model function ten times with different training and testing sets to see how the prediction for a specific client changes with the data it's trained on.
End of explanation
"""
|
antoniomezzacapo/qiskit-tutorial
|
community/teach_me_qiskit_2018/w_state/W State 3 - Monty Hall Problem Solver.ipynb
|
apache-2.0
|
# useful additional packages
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import time
from pprint import pprint
# importing Qiskit
from qiskit import Aer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
IBMQ.load_accounts()
"""
Explanation: <img src="../../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
A Monty Hall Problem Solver
The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.
For more information about how to use the IBM Q experience (QX), consult the tutorials, or check out the community.
Contributors
Pierre Decoodt, Université Libre de Bruxelles
End of explanation
"""
#can be very slow when number of shoots increases
backend = Aer.get_backend('qasm_simulator')
# using IBMQ
# backend = IBMQ.get_backend('ibmq_qasm_simulator')
print("Your choice for the backend is: ", backend.name())
"""
Explanation: Introduction
The Monty Hall problem has been approached by various formulations related to quantum game theory (notably Fittney and Abbott 2002, D'Ariano et al., 2002). It is no wonder that Alice and Bob come now on stage, the first as the game master, the second as the player.
In this tutorial, a quantum model of the Monty Hall's game adapted for the ibmqx5 architecture will be built in three phases.
To begin, a circuit comprising two sets of three qubits entangled in a state W will be used to materialize the actions of Alice and Bob and tested in a simulation of the game.
In a second step, three Toffoli gates will be added to this circuit to establish a solution in which the measurement of a single qubit a sufficient number of run will be enough to determine the chances of gain Bob according to whether he prefers to stick with his first intention or switch.
In a final step, Bob's mood for sticking or switching will gradually change from one experiment to the next to see how this can modify his probability of winning.
Let's start by choosing the backend first. A simulator is always preferable during the first tests.
End of explanation
"""
# Define a F_gate
def F_gate(circ,q,i,j,n,k) :
theta = np.arccos(np.sqrt(1/(n-k+1)))
circ.ry(-theta,q[j])
circ.cz(q[i],q[j])
circ.ry(theta,q[j])
circ.barrier(q[i])
# Define the cxrv gate which uses reverse CNOT instead of CNOT
def cxrv(circ,q,i,j) :
circ.h(q[i])
circ.h(q[j])
circ.cx(q[j],q[i])
circ.h(q[i])
circ.h(q[j])
circ.barrier(q[i],q[j])
# create quantum register
q = QuantumRegister(16)
# create classical register
c = ClassicalRegister(16)
# create quantim circuit
twin = QuantumCircuit(q, c)
# First W state
twin.x(q[14])
F_gate(twin,q,14,3,3,1)
F_gate(twin,q,3,2,3,2)
twin.cx(q[3],q[14])
twin.cx(q[2],q[3])
#Second W state
twin.x(q[12])
F_gate(twin,q,12,5,3,1)
F_gate(twin,q,5,6,3,2)
cxrv(twin,q,5,12)
twin.cx(q[6],q[5])
#Coin tossing
twin.h(q[0])
twin.h(q[1])
switch1 = QuantumCircuit(q, c, name='switch1')
#Stick or switch
switch1.h(q[13])
for i in range (4) :
switch1.measure(q[i] , c[i]);
for i in range (5,7) :
switch1.measure(q[i] , c[i]);
for i in range (12,15) :
switch1.measure(q[i] , c[i]);
"""
Explanation: The game circuit
The first circuit presented here established first two W states with the help of the $ F $ gates described in the tutorial "W State 1 - Multi-Qubit Systems". These W states serve as true random number generators.
The first W state involving the qubits $q_l$, $q_c$ and $q_r$ represents Alice's choice of the door behind which to hide the car (in the following two equations, the subscrits $l$, $c$ and $r$ represents respectively the left, center and right door):
$$ |W \rangle \:=\:\frac{1}{\sqrt{3}} \: (|1_l 0_c 0_r \rangle \: + \: |0_l 1_c 0_r\rangle \: +\: |0_l 0_c 1_r\rangle) $$
The second W state involving the qubits $q_l^{}$, $q_c^{}$ and $q_r^{*}$ represents Bob's initial choice regarding the door behind which the car would hide:
$$ |W^{} \rangle \: =\: \frac{1}{\sqrt{3}} \: (|1_{l}^{} 0_{c}^{} 0_{r}^{} \rangle \: + \: |0_{l}^{} 1_{c}^{} 0_{r}^{}\rangle \: +\: |0_{l}^{} 0_{c}^{} 1_{r}^{}\rangle) $$
A second true random number generator uses a two-qubit state to simulate coin tossing:
$$ H^{\otimes 2}|0_{a}0_{b}\rangle=|+{a}\rangle \:|+{b}\rangle=\frac{|0_{a}\rangle|0_{b}\rangle+|0_{a}\rangle|1_{b}\rangle+|1_{a}\rangle|0_{b}\rangle+|1_{a}\rangle|1_{b}\rangle}{2}$$
As explained in the tutorial "W State 2 - Let's Make a Deal", its role is to simulate Alice's choice of the door to open between two doors hiding a goat, when necessary.
A last circuit establishes a flag $q_s$ representing Bob's decision to stick with his initial choice (ground state) or to switch (excited state). A Hadamard gate applied to $q_s$ ensures the equiprobability of these possibilities.
End of explanation
"""
# create AliceBob circuit
AliceBob = twin+switch1
Label = ["left", "central", "right"]
wstates = 0
while wstates != 1:
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print("Alice vs Bob", "backend=", backend, "starting time", time_exp)
result = execute(AliceBob, backend=backend, shots=1)
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print("Alice vs Bob", "backend=", backend, " end time", time_exp)
cstr = str(result.result().get_counts(AliceBob))
nb_of_cars = int(cstr[3]) + int(cstr[14]) + int(cstr[15])
nb_of_doors = int(cstr[12]) + int(cstr[11]) + int(cstr[5])
wstates = nb_of_cars * nb_of_doors
print(" ")
print('Alice: One car and two goats are now hidden behind these doors.')
print(' Which door do you choose?')
print(" ")
"Chosing the left door"
if int(cstr[5]) == 1:
Doorchosen = 1
"Chosing the center door"
if int(cstr[11]) == 1:
Doorchosen = 2
"Chosing the right door"
if int(cstr[12]) == 1:
Doorchosen = 3
time.sleep(2)
print('Bob: My choice is the',Label[Doorchosen-1], "door")
print(" ")
randomnb = int(cstr[16]) + int(cstr[17]) %2
if cstr[3] == "1": #car behind left door
Doorwinning = 1
if Doorchosen == 1:
Dooropen = 2 + randomnb
Doorswitch = 3 - randomnb
if Doorchosen == 2:
Dooropen = 3
Doorswitch = 1
if Doorchosen == 3:
Dooropen = 2
Doorswitch = 1
if cstr[14] == "1": #car behind central door
Doorwinning = 2
if Doorchosen == 2:
Dooropen = 1 + 2*randomnb
Doorswitch = 3 - 2*randomnb
if Doorchosen == 1:
Dooropen = 3
Doorswitch = 2
if Doorchosen == 3:
Dooropen = 1
Doorswitch = 2
if cstr[15] == "1": #car behind right door
Doorwinning = 3
if Doorchosen == 3:
Dooropen = randomnb + 1
Doorswitch = 2 - randomnb
if Doorchosen == 1:
Dooropen = 2
Doorswitch = 3
if Doorchosen == 2:
Dooropen = 1
Doorswitch = 3
time.sleep(2)
print('Alice: Now I open the', Label[Dooropen-1], 'door and you see a goat')
time.sleep(2)
print(' You get an opportunity to change your choice!')
time.sleep(2)
print(' Do you want to switch for the',Label[Doorswitch-1], "door?")
print(" ")
time.sleep(2)
switch_flag = int(cstr[4])
"BOB STICKS WITH HIS FIRST CHOICE!"
if switch_flag == 0:
Doorfinal = Doorchosen
print('Bob: I stick with my first choice, the',Label[Doorfinal-1], "door")
"BOB CHANGES HIS MIND!"
if switch_flag == 1:
Doorfinal = Doorswitch
print('Bob: I change my mind and choose the',Label[Doorfinal-1], "door")
"FINAL ANNOUNCE"
if Doorfinal == Doorwinning:
endmessage = 'won the car! Congratulations!'
else:
endmessage = 'won a goat! Sorry!'
time.sleep(2)
print()
print('Alice: You opened the',Label[Doorfinal-1],'door and', endmessage)
print("Game over")
"""
Explanation: Alice and Bob playing
Now we're going to watch Alice and Bob in their re-enactment of the TV show "Let's Make a Deal".
Among the possible scenarios, the one selected is obtained by a one shot execution of the addition of the two circuits described above.
Note that a virtual referee checks if one and only one car has been hidden by Alice and if Bob first chooses one and only one door. The program is rerun until a consistent result is achieved. This verification is only useful on real quantum computer.
From the result of the measurements of the nine qubits involved, the match is reconstituted on a conventional computer.
End of explanation
"""
#Toffoli gates
Toffoli = QuantumCircuit(q, c, name='Toffoli')
Toffoli.ccx(q[3], q[5], q[4])
Toffoli.swap(q[2],q[3])
Toffoli.swap(q[6],q[5])
Toffoli.ccx(q[3], q[5], q[4])
Toffoli.swap(q[3],q[14])
Toffoli.swap(q[12],q[5])
Toffoli.ccx(q[3], q[5], q[4])
"""
Explanation: Toffoli is called on
One could claim that during this match the quantum computer plays against itself. However, it should be noted that, in the initial phase, it could have been used repetitively rather than in a single run, for the successive determination of $|W \rangle$, $|W^{*} \rangle$, coin tossing and the switch flag.
Similarly, two quantum computers such as ibmqx2 and ibmqx4 could have been used, one attributed to Alice and the other to Bob.
To take the step towards the quantum computer playing against itself autonomously and keeping track of the result, we need a circuit extension.
Here we use three Toffoli gates who have as a common target a qubit $q_w$. When $q_w$ is in excited state, it indicates the victory of Bob .
The description of a Toffoli $C3$ gate is available in the "Quantum gates and linear algebra" Jupyter notebook of the Qiskit tutorial.
The following three operations are performed$^1$:
Left door: $$C3\;(q_{l},\;q^{}{l},\;q_w)$$
Central door: $$C3\;(q{c},\;q^{}{c},\;q_w)$$
Right door: $$C3\;(q{r},\;q^{*}_{r},\;q_w)$$
The end result is that $q_w$ is in excited state iff either $q_{l}=q^{}{l}$ or $q{c}=q^{}{c}$ or $q{r}=q^{*}_{r}$.
$^1$ The order of these operations does not matter but is dictated by the architecture of the quantum computer and the $SWAP$ gates needed.
End of explanation
"""
# A general solution with 50% switching strategy
switch_fifty_percent =QuantumCircuit(q,c, name='switch_fifty_percent')
#switch flag
switch_fifty_percent.h(q[13])
switch_fifty_percent.cx(q[13],q[4])
switch_fifty_percent.measure(q[4] , c[4]);
switch_fifty_percent.measure(q[13] , c[13]);
general_solution = twin+Toffoli+switch_fifty_percent
shots = 1024
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print(backend, "shots", shots, "starting time", time_exp)
result = execute(general_solution, backend=backend, shots=shots)
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print(backend, "shots", shots, "end time", time_exp)
plot_histogram(result.result().get_counts(general_solution))
print(result.result().get_counts(general_solution))
observable_stickwon = {'0000000000010000': 1, '0010000000010000': 0, '0010000000000000': 0, '0000000000000000': 0}
observable_switchwon = {'0000000000010000': 0, '0010000000010000': 1, '0010000000000000': 0, '0000000000000000': 0}
observable_stickall = {'0000000000010000': 1, '0010000000010000': 0, '0010000000000000': 0, '0000000000000000': 1}
observable_switchall = {'0000000000010000': 0, '0010000000010000': 1, '0010000000000000': 1, '0000000000000000': 0}
stickwon = result.result().average_data(general_solution,observable_stickwon)
switchwon = result.result().average_data(general_solution,observable_switchwon)
stickall = result.result().average_data(general_solution,observable_stickall)
switchall = result.result().average_data(general_solution,observable_switchall)
print("Proportion sticking: %6.2f " % stickall)
print("Proportion switching: %6.2f " % switchall)
stickwon_stickall = stickwon/stickall
switchwon_switchall = switchwon/switchall
print("Proportion winning when sticking: %6.2f " % stickwon_stickall)
print("Proportion winning when switching: %6.2f " % switchwon_switchall)
"""
Explanation: A general solution where chance leads the game
The addition of the $C3$ gates and a last $cNOT$ gate with $q_s$ as control and $q_w$ as target makes it possible to determine experimentally the probability of winning for systematic sticking and systematic switching strategies. It suffices to measure $q_s$ and $q_w$ a sufficient number of shots.
Indeed, for Bob, switching is enough to turn a defeat into a victory and vice versa.
Let's look at the results of this experiment.
End of explanation
"""
# Illustrating different strategies
xdat = []
ydat = []
observable = {'0000000000000000': 0, '0000000000010000': 1}
shots = 1024
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print(backend, "shots", shots, "starting time", time_exp)
for i in range(9) :
strategies = QuantumCircuit(q, c, name='strategies')
Prob = i/8
lambda_s = 2*np.arcsin(np.sqrt(Prob))
strategies.rx(lambda_s,q[13])
strategies.cx(q[13],q[4])
strategies.measure(q[4] , c[4]);
statploti = "statplot"+str(i)
statploti = QuantumCircuit(q,c, name=statploti)
statploti = twin+Toffoli+strategies
result = execute(statploti, backend=backend, shots=shots)
loop_average=(result.result().average_data(statploti,observable))
print(statploti," Proportion switching: %6.3f" % Prob, " Proportion winning: %6.2f" % loop_average)
ydat.append(loop_average)
xdat.append(Prob)
time_exp = time.strftime('%d/%m/%Y %H:%M:%S')
print(backend, "shots", shots, "end time", time_exp)
plt.plot(xdat, ydat, 'ro')
plt.grid()
plt.ylabel('Probability of Winning', fontsize=12)
plt.xlabel(r'Probability of Switching', fontsize=12)
plt.show()
print("Our Advice: \n")
y_aver = []
for j in range(0,7,3) :
y_aver.append((ydat[j] + ydat[j+1] +ydat[j+2])/3)
if y_aver[0] == max(y_aver) :
print(" Thou Shalt Not Switch")
elif y_aver[2] == max(y_aver) :
print(" Thou Shalt Not Stick")
else:
print(" Just follow the intuition of the moment")
"""
Explanation: The Mood of Bob
After this experimental demonstration of the superiority of the strategy of systematic switching on systematic sticking, in agreement with a theory that is highly counterintuitive but certainly mathematically correct and generally accepted, one could consider that all is said and done.
However, it has not been experimentally demonstrated in this tutorial that there is not yet a better strategy between these two extremes. In order to reach a final conclusion, a series of experiments will follow, in which a gradual variation of the switch probability $P(s)$ is created. This equates to a gradual change in Bob's mood, from a propensity to stick with his first choice to an impetuous need to change his mind when given the opportunity.
To do this, the qubit $q_s$ undergoes an incremental rotation around X-axis $R_x(\lambda)$, according to the formula:
$$\lambda = 2\arcsin \sqrt{P(s)}$$
Since the circuit is completed by a $cNOT$ gate with $q_s$ as control and $q_w$ as target, the measure of $q_w$ alone is sufficient to estimate the probability that Bob will win for a given propensity to switch.
Here are the results of eight experiments with incremental values of $P(s)$, each with a sufficient number of shots.
End of explanation
"""
|
Almaz-KG/MachineLearning
|
ml-for-finance/python-for-financial-analysis-and-algorithmic-trading/02-NumPy/3-Numpy-Operations.ipynb
|
apache-2.0
|
import numpy as np
arr = np.arange(0,10)
arr + arr
arr * arr
arr - arr
# Warning on division by zero, but not an error!
# Just replaced with nan
arr/arr
# Also warning, but not an error instead infinity
1/arr
arr**3
"""
Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
<center>Copyright Pierian Data 2017</center>
<center>For more information, visit us at www.pieriandata.com</center>
NumPy Operations
Arithmetic
You can easily perform array with array arithmetic, or scalar with array arithmetic. Let's see some examples:
End of explanation
"""
#Taking Square Roots
np.sqrt(arr)
#Calcualting exponential (e^)
np.exp(arr)
np.max(arr) #same as arr.max()
np.sin(arr)
np.log(arr)
"""
Explanation: Universal Array Functions
Numpy comes with many universal array functions, which are essentially just mathematical operations you can use to perform the operation across the array. Let's show some common ones:
End of explanation
"""
|
satishgoda/learning
|
python/libs/rxpy/GettingStarted.ipynb
|
mit
|
%%bash
pip install rx
"""
Explanation: Getting Started with RxPY
ReactiveX, or Rx for short, is an API for programming with observable event streams. RxPY is a port of ReactiveX to Python. Learning Rx with Python is particularly interesting since Python removes much of the clutter that comes with statically typed languages. RxPY works with both Python 2 and Python 3 but all examples in this tutorial uses Python 3.4.
Rx is about processing streams of events. With Rx you:
Tell what you want to process (Observable)
How you want to process it (A composition of operators)
What you want to do with the result (Observer)
It's important to understand that with Rx you describe what you want to do with events if and when they arrive. It's all a declarative composition of operators that will do some processing the events when they arrive. If nothing happens, then nothing is processed.
Thus the pattern is that you subscribe to an Observable using an Observer:
python
subscription = Observable.subscribe(observer)
NOTE: Observables are not active in themselves. They need to be subscribed to make something happen. Simply having an Observable lying around doesn't make anything happen.
Install
Use pip to install RxPY:
End of explanation
"""
import rx
from rx import Observable, Observer
"""
Explanation: Importing the Rx module
End of explanation
"""
class MyObserver(Observer):
def on_next(self, x):
print("Got: %s" % x)
def on_error(self, e):
print("Got error: %s" % e)
def on_completed(self):
print("Sequence completed")
xs = Observable.from_iterable(range(10))
d = xs.subscribe(MyObserver())
xs = Observable.from_(range(10))
d = xs.subscribe(print)
"""
Explanation: Generating a sequence
There are many ways to generate a sequence of events. The easiest way to get started is to use the from_iterable() operator that is also called just from_. Other operators you may use to generate a sequence such as just, generate, create and range.
End of explanation
"""
xs = Observable.from_(range(10))
d = xs.filter(
lambda x: x % 2
).subscribe(print)
"""
Explanation: NOTE: The subscribe method takes an observer, or one to three callbacks for handing on_next(), on_error(), and on_completed(). This is why we can use print directly as the observer in the example above, since it becomes the on_next() handler for an anonymous observer.
Filtering a sequence
End of explanation
"""
xs = Observable.from_(range(10))
d = xs.map(
lambda x: x * 2
).subscribe(print)
"""
Explanation: Transforming a sequence
End of explanation
"""
xs = Observable.from_(range(10, 20, 2))
d = xs.map(
lambda x, i: "%s: %s" % (i, x * 2)
).subscribe(print)
"""
Explanation: NOTE: You can also take an index as the second parameter to the mapper function:
End of explanation
"""
xs = Observable.range(1, 5)
ys = Observable.from_("abcde")
zs = xs.merge(ys).subscribe(print)
"""
Explanation: Merge
Merging two observable sequences into a single observable sequence using the merge operator:
End of explanation
"""
from rx.testing import marbles
xs = Observable.from_marbles("a-b-c-|")
xs.to_blocking().to_marbles()
"""
Explanation: The Spacetime of Rx
In the examples above all the events happen at the same moment in time. The events are only separated by ordering. This confuses many newcomers to Rx since the result of the merge operation above may have several valid results such as:
a1b2c3d4e5
1a2b3c4d5e
ab12cd34e5
abcde12345
The only guarantee you have is that 1 will be before 2 in xs, but 1 in xs can be before or after a in ys. It's up the the sort stability of the scheduler to decide which event should go first. For real time data streams this will not be a problem since the events will be separated by actual time. To make sure you get the results you "expect", it's always a good idea to add some time between the events when playing with Rx.
Marbles and Marble Diagrams
As we saw in the previous section it's nice to add some time when playing with Rx and RxPY. A great way to explore RxPY is to use the marbles test module that enables us to play with marble diagrams. The marbles module adds two new extension methods to Observable. The methods are from_marbles() and to_marbles().
Examples:
1. res = rx.Observable.from_marbles("1-2-3-|")
2. res = rx.Observable.from_marbles("1-2-3-x", rx.Scheduler.timeout)
The marble string consists of some special characters:
- = Timespan of 100 ms
x = on_error()
| = on_completed()
All other characters are treated as an on_next() event at the given moment they are found on the string. If you need to represent multi character values, then you can group then with brackets such as "1-(42)-3".
Lets try it out:
End of explanation
"""
xs = Observable.from_marbles("1-2-3-x-5")
ys = Observable.from_marbles("1-2-3-4-5")
xs.merge(ys).to_blocking().to_marbles()
"""
Explanation: It's now easy to also add errors into the even stream by inserting x into the marble string:
End of explanation
"""
from rx.subjects import Subject
stream = Subject()
stream.on_next(41)
d = stream.subscribe(lambda x: print("Got: %s" % x))
stream.on_next(42)
d.dispose()
stream.on_next(43)
"""
Explanation: Subjects and Streams
A simple way to create an observable stream is to use a subject. It's probably called a subject after the Subject-Observer pattern described in the Design Patterns book by the gang of four (GOF).
Anyway, a Subject is both an Observable and an Observer, so you can both subscribe to it and on_next it with events. This makes it an obvious candidate if need to publish values into an observable stream for processing:
End of explanation
"""
|
RogueAstro/keppy
|
docs/examples/HIP67620_example.ipynb
|
mit
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import astropy.units as u
from radial import estimate, dataset
%matplotlib inline
"""
Explanation: The orbital parameters of the binary solar twin HIP 67620
radial is a simple program designed to do a not very trivial task: simulate radial velocities of a star orbited by a massive object or "reverse engineer" radial velocity measurements to estimate the orbital parameters of the system being studied. The formalism behind it is based on https://arxiv.org/abs/1009.1738.
Our objective in this notebook is to use radial velocity data of the solar twin HIP 67620 to estimate the projected mass, separation and other orbital parameters of its companion. We start by importing the necessary packages. Notice that we will specifically import the modules orbit, estimate, and dataset from the radial package.
End of explanation
"""
harps = dataset.RVDataSet(file='../../tests/HIP67620_HARPS.dat', # File name
t_offset=-2.45E6, # Time offset (units of days)
rv_offset='subtract_mean', # RV offset
instrument_name='HARPS',
target_name='HIP 67620',
skiprows=1, # Number of rows to skip in the data file
t_col=5, # Column corresponding to time in the data file
rv_col=6, # Column corresponding to RVs
rv_unc_col=7 # Column corresponding to RV ucnertainties
)
aat = dataset.RVDataSet(file='../../tests/HIP67620_AAT.dat', t_offset=-2.45E6, rv_offset='subtract_mean',
instrument_name='AATPS', target_name='HIP 67620', delimiter=',')
w16 = dataset.RVDataSet(file='../../tests/HIP67620_WF16.dat', t_offset=-5E4, rv_offset='subtract_mean',
instrument_name='W16', target_name='HIP 67620', t_col=1,
rv_col=3, rv_unc_col=4)
"""
Explanation: We then extract the data from the text files located in the tests folder. They will be stored in RVDataSet objects, which are defined in the dataset module.
End of explanation
"""
w16.plot()
"""
Explanation: We can visualize the radial velocities by running the function plot() of a given dataset object. For instance:
End of explanation
"""
# guess is a dictionary, which is a special type of "list" in python
# Instead of being indexed by a number, the items in a dictionary
# are indexed by a key (which is a string)
guess = {'k': 6000,
'period': 4000,
't0': 5000,
'omega': 180 * np.pi / 180,
'ecc': 0.3,
'gamma_0': 0,
'gamma_1': 0,
'gamma_2': 0}
"""
Explanation: Now that we have the data, how do we estimate the orbital parameters of the system? We use the methods and functions inside the estimate module. But first, we need to provide an initial guess for the orbital parameters. They are:
k: radial velocity semi-amplitude $K$ (in m/s)
period: orbital period $T$ (in days)
t0: time of periastron passage $t_0$ (in days)
omega: argument of periapse $\omega$ (in radians)
ecc: eccentricity of the orbit $e$
gamma_X: RV offset $\gamma$ of the dataset number $X$ (in m/s)
A first guess is usually an educated guess based on either a periodogram and/or simple visual inspection of the data.
End of explanation
"""
estim = estimate.FullOrbit(datasets=[w16],
guess=guess,
parametrization='mc10')
plot = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=2)
plt.show()
"""
Explanation: Now we need to instantiate a FullOrbit object with the datasets and our guess, as well as the parametrization option we want to use. Then, we plot it.
End of explanation
"""
result = estim.lmfit_orbit(update_guess=True)
"""
Explanation: We estimate the orbital parameters of the system using the Nelder-Mead optimization algorithm implemented in the lmfit package. This will compute the best solution or, in other words, the one that minimizes the residuals of the fit.
It is probable that the first solutions are not good, and that is fine. Just run the estimation a couple of times until you get the satisfactory result.
End of explanation
"""
pylab.rcParams['font.size'] = 12
fig, gs = estim.plot_rvs(plot_guess=True, fold=False, legend_loc=4)
"""
Explanation: Now let's plot the solution we obtained.
End of explanation
"""
estim.emcee_orbit(nwalkers=12, nsteps=1000, nthreads=4)
"""
Explanation: If the result looks good, that is great: we have the best solution of the orbit. However, we still need to estimate uncertainties for the orbital parameters. We do that using emcee. This is a Markov-Chain Monte Carlo (MCMC) simulation, in which we simulate a bunch of sets of orbital parameters that could still fit the data given the uncertainties of the observations, but are a little bit off from the best solution. They will make up the uncertainties of the fit.
This simulation starts from the best solution and do random walks across the parameter space. We will provide the number of walkers (nwalkers) for the MCMC simulation, as well as the number of steps (nsteps) that each one will take.
How do we know the number of walkers and steps to use? As a general rule of thumb, it is recommended to use at least 2 times the number of parameters for the number of walkers, and as many steps as it takes for the simulation to converge.
Note: We can use multiprocessing in emcee to make the calculations somewhat faster. For that, we need to provide the number of processing threads (in the parameter nthreads) of your computer. Most laptops have 2 or 4 threads.
End of explanation
"""
estim.plot_emcee_sampler()
"""
Explanation: With that done, we plot the walkers to see how the simulation went.
End of explanation
"""
estim.make_chains(500)
"""
Explanation: Let's cut the beginning of the simulation (the first 500 steps) because they correspond to the burn-in phase.
End of explanation
"""
fig = estim.plot_corner()
plt.show()
"""
Explanation: Now we use a corner plot to analyze the posterior distributions of the parameters, as well as the correlations between them.
End of explanation
"""
estim.print_emcee_result(main_star_mass=0.954, # in M_sol units
mass_sigma=0.006)
"""
Explanation: And that should be pretty much it. Finally, we compute the orbital parameters in a human-readable fashion.
End of explanation
"""
|
jgdwyer/ML-convection
|
NN_demo.ipynb
|
apache-2.0
|
from IPython.display import Image
Image('assets/Stephens_and_Bony_2013.png')
"""
Explanation: Using a neural network to emulate the atmospheric convection scheme in a global climate model
(John Dwyer & Paul O'Gorman)
Overview:
Global climate models (GCMs) solve computational fluid PDEs to represent the dynamics and thermodynamics of the atmosphere and ocean. They are important for predicting climate change given some amount of greenhouse gas emissions.
Problem:
State-of-the-art climate models have a horizontal grid resolution of 100km x 100km, far too coarse to represent many physical processes directly. Some approximations, like that for atmospheric convection and cloud formation, are not very accurate, and leads to large uncertainty in future predictions.
End of explanation
"""
# First load packages
import numpy as np
from netCDF4 import Dataset
import matplotlib as mpl
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import os
import time
from sklearn import preprocessing, metrics
from importlib import reload
import scipy.stats
import sknn.mlp
import pickle
%matplotlib inline
pylab.rcParams['figure.figsize'] = (10, 6)
inline_rc = dict(mpl.rcParams)
import src.nnload as nnload
import src.nntrain as nntrain
import src.nnplot as nnplot
"""
Explanation: Solution:
Use machine learning algorithms! Train a neural network to emulate atmospheric convection from observations or very high-resolution (non-global) climate models. Then put the trained algorithm back into the climate model.
My approach:
As a first step, I check to make sure that this approach can work. I train a neural network to learn the atmospheric convection scheme in the global model. Then put the trained scheme back into the model and see if it can replicate the original (physics-based) scheme.
End of explanation
"""
def loaddata(filename, minlev, all_lats=True, indlat=None,
N_trn_examples=None, rainonly=False, noshallow=False,
verbose=True):
"""v2 of the script to load data. See prep_convection_output.py for how
the input filename is generated.
Args:
filename: The file to be loaded. Use convection_50day.pkl or
convection_50day_validation.pkl
minlev: The topmost model level for which to load data. Set to 0. to
load all data
all_lats: Logical value for whether to load data from all latitudes
indlat: If all_lats is false, give the index value [0-31] for the
latitude at which to load data.
rainonly: If true, only return training examples of when it is raining
noshallow: If true, only return training examples of when the shallow
convection scheme does NOT happen. (So, only return examples
with deep convection, or no convection at all)
verbose: If true, prints some basic stats about training set
Returns:
x : 2-d numpy array of input features (m_training examples x
N_input features). If minlev is 0., there will be 60 input
features, the top 30 for temperature and the bottom 30 for
humidity.
y : 2-d numpy array of output targets (m_traning examples x
N_output targets). If minlev is 0., there will be 60 output
features, the top 30 for temp. tendencies and the bottom 30
for q tend.
cv : 1-d array (m_training examples x 1) that gives 1 if convection
occurs and 0 if it does not.
Pout : 1-d arrray (m_training examples x 1) of how much precipitation
occurs in kg/m^2/s (multiply by 3600*24 to convert
precipitation to mm/day)
lat2 : 1-d array of latitude for one hemisphere (since hemispheres
are combined)
lev : The vertical model levels (1 is the surface and 0 is the top
of the atmosphere).
dlev : The difference between model levels, useful for calculating
some derived quantities.
timestep: How large each model timestep is in seconds.
"""
v = dict()
[v['Tin'], v['qin'], v['Tout'], v['qout'], Pout, lat] = \
pickle.load(open(filename, 'rb'), encoding='latin1')
# Use this to calculate the real sigma levels
lev, dlev, indlev = nnload.get_levs(minlev)
# Comine NH & SH data since they are statistically equivalent
varis = ['Tin', 'qin', 'Tout', 'qout']
for var in varis:
[v[var], lat2] = nnload.avg_hem(v[var], lat, axis=1)
# Change shape of data to be N_samp x N_lev
if all_lats:
v[var] = nnload.reshape_all_lats(v[var], indlev)
else:
if indlat is not None:
v[var] = nnload.reshape_one_lat(v[var], indlev, indlat)
# Pout = Pout[indlat,:]
else:
raise TypeError('Need to set an index value for indlat')
# Randomize the order of these events
m = v['Tin'].shape[0]
randind = np.random.permutation(m)
for var in varis:
v[var] = v[var][randind,:]
timestep = 10*60 # 10 minute timestep in seconds
# Converted heating rates to K/day and g/kg/day in prep_convection_output.py
# Concatenate input and output variables together
x = nnload.pack(v['Tin'], v['qin'] , axis=1)
y = nnload.pack(v['Tout'], v['qout'], axis=1)
Pout2 = nnplot.calc_precip(y, dlev)
# The outputs get lined up in prep_convection_output.py
# Print some statistics about rain and limit to when it's raining if True
x, y, Pout2 = nnload.limitrain(x, y, Pout2, rainonly, noshallow=noshallow,
verbose=verbose)
# Limit to only certain events if requested
if N_trn_examples is not None:
if N_trn_examples > y.shape[0]:
warnings.warn('Requested more samples than available. Using the' +
'maximum number available')
N_trn_examples = y.shape[0]
ind = np.arange(N_trn_examples)
x = x[ind,:]
y = y[ind,:]
Pout2 = Pout2[:]
# Store when convection occurs
cv,_ = nnload.whenconvection(y, verbose=verbose)
return (x, y, cv, Pout2, lat2, lev, dlev, timestep)
x_orig, y_orig, cv, Pout, lat, lev, dlev, timestep = loaddata(
'./data/convection_50day.pkl', minlev=0.25,
all_lats=True, indlat=None, rainonly=False)
"""
Explanation: Load data for training and cross-validation
End of explanation
"""
def unpack(data,vari,axis=1):
"""Reverse pack operation to turn ouput matrix into T & q"""
N = int(data.shape[axis]/2)
varipos = {'T':np.arange(N),'q':np.arange(N,2*N)}
out = np.take(data,varipos[vari],axis=axis)
return out
def pack(d1,d2,axis=1):
"""Combines T & q profiles as an input matrix to NN"""
return np.concatenate((d1,d2), axis=axis)
# Initialize & fit scaler
def init_pp(ppi, raw_data):
"""Initialize list of scaler objects"""
if ppi['name'] == 'MinMax':
pp =[preprocessing.MinMaxScaler(feature_range=(-1.0,1.0)), # for temperature
preprocessing.MinMaxScaler(feature_range=(-1.0,1.0))] # and humidity
elif ppi['name'] == 'MaxAbs':
pp =[preprocessing.MaxAbsScaler(), # for temperature
preprocessing.MaxAbsScaler()] # and humidity
elif ppi['name'] == 'StandardScaler':
pp =[preprocessing.StandardScaler(), # for temperature
preprocessing.StandardScaler()] # and humidity
elif ppi['name'] == 'RobustScaler':
pp =[preprocessing.RobustScaler(), # for temperature
preprocessing.RobustScaler()] # and humidity
elif ppi['name'] == 'SimpleY':
pp =[15.,10.] # for temperature
else:
ValueError('Incorrect scaler name')
#Initialize scalers with data
if ppi['method'] == 'individually':
pp[0].fit(unpack(raw_data,'T'))
pp[1].fit(unpack(raw_data,'q'))
elif ppi['method'] == 'alltogether':
pp[0].fit(np.reshape(unpack(raw_data,'T'), (-1,1)))
pp[1].fit(np.reshape(unpack(raw_data,'q'), (-1,1)))
elif ppi['method'] == 'qTindividually':
if ppi['name'] != 'SimpleY':
pp = pp[0]
pp.fit(raw_data)
else:
raise ValueError('Incorrect scaler method')
return pp
# Transform data using initialized scaler
def transform_data(ppi, pp, raw_data):
"""Scales data before ML algorithm"""
if ppi['method'] == 'individually':
T_data = pp[0].transform(unpack(raw_data,'T'))
q_data = pp[1].transform(unpack(raw_data,'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].transform(np.reshape(unpack(raw_data,'T'), (-1,1)))
q_data = pp[1].transform(np.reshape(unpack(raw_data,'q'), (-1,1)))
# Return to original shape (N_samples x N_features) rather than (N_s*N_f x 1)
shp = unpack(raw_data,'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(raw_data, 'T')/pp[0]
q_data = unpack(raw_data, 'q')/pp[1]
else:
all_data = pp.transform(raw_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
print('Given method is ' + ppi['method'])
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
# Apply inverse transformation to unscale data
def inverse_transform_data(ppi, pp, trans_data):
"""Reverse transform_data operation"""
if ppi['method'] == 'individually':
T_data = pp[0].inverse_transform(unpack(trans_data,'T'))
q_data = pp[1].inverse_transform(unpack(trans_data,'q'))
elif ppi['method'] == 'alltogether':
T_data = pp[0].inverse_transform(np.reshape(unpack(trans_data,'T'), (-1,1)))
q_data = pp[1].inverse_transform(np.reshape(unpack(trans_data,'q'), (-1,1)))
# Return to original shape (N_samples x N_features) rather than (N_s*N_f x 1)
shp = unpack(trans_data,'T').shape
T_data = np.reshape(T_data, shp)
q_data = np.reshape(q_data, shp)
elif ppi['method'] == 'qTindividually':
if ppi['name'] == 'SimpleY':
T_data = unpack(trans_data,'T') * pp[0]
q_data = unpack(trans_data,'q') * pp[1]
else:
all_data = pp.inverse_transform(trans_data)
T_data = unpack(all_data, 'T')
q_data = unpack(all_data, 'q')
else:
raise ValueError('Incorrect scaler method')
# Return single transformed array as output
return pack(T_data, q_data)
# Define preprocessing method to use
x_ppi={'name':'StandardScaler','method':'qTindividually'}
y_ppi={'name':'SimpleY' ,'method':'qTindividually'}
# Apply preprocessing to input data
x_pp = init_pp(x_ppi, x_orig)
x = transform_data(x_ppi, x_pp, x_orig)
# Apply preprocessing to output data
y_pp = init_pp(y_ppi, y_orig)
y = transform_data(y_ppi, y_pp, y_orig)
# Make preprocessor string for saving
pp_str = 'X-' + x_ppi['name'] + '-' + x_ppi['method'][:6] + '_'
pp_str = pp_str + 'Y-' + y_ppi['name'] + '-' + y_ppi['method'][:6] + '_'
"""
Explanation: Preprocess Data
End of explanation
"""
def _plot_distribution(z, lat, lev, fig, ax, titlestr,
xl=None, xu=None, bins=None):
"""Plots a stack of histograms of log10(data) at all levels"""
# Initialize the bins and the frequency
num_bins = 100
if bins is None:
bins = np.linspace(np.amin(z), np.amax(z), num_bins+1)
n = np.zeros((num_bins, lev.size))
# Calculate distribution at each level
for i in range(lev.size):
n[:,i], _ = np.histogram(z[:,i], bins=bins)
bins1=bins[:-1]
# Take a logarithm and deal with case where we take log of 0
n = np.log10(n)
n_small = np.amin(n[np.isfinite(n)])
n[np.isinf(n)] = n_small
# Plot histogram
ca = ax.contourf(bins[:-1], lev, n.T)
ax.set_ylim(1,0)
if xl is not None:
ax.set_xlim(xl,xu)
plt.colorbar(ca, ax=ax)
ax.set_ylabel(r'$\sigma$')
ax.set_title(titlestr)
xl,xr = ax.set_xlim()
return xl, xr, bins
# Show how preprocessing scales the input data
fig, ax = plt.subplots(2, 2)
_, _, _ = _plot_distribution(unpack(x_orig, 'T'), lat, lev, fig, ax[0,0],
'Temperature (unscaled) [K]')
_, _, _ = _plot_distribution(unpack(x, 'T'), lat, lev, fig, ax[0,1],
'Temperature (scaled) []')
_, _, _ = _plot_distribution(unpack(x_orig, 'q'), lat, lev, fig, ax[1,0],
'Humidity (unscaled) [g/kg]')
_, _, _ = _plot_distribution(unpack(x, 'q'), lat, lev, fig, ax[1,1],
'Humidity (scaled) []')
fig.suptitle('Distributions of Raw and Preprocessed Inputs', fontsize=20)
"""
Explanation: Show the input data and define helper script for plotting data
End of explanation
"""
fig, ax = plt.subplots(2, 2)
_, _, _ = _plot_distribution(unpack(y_orig, 'T'), lat, lev, fig, ax[0,0],
'Temperature (unscaled) [K]')
_, _, _ = _plot_distribution(unpack(y, 'T'), lat, lev, fig, ax[0,1],
'Temperature (scaled) []')
_, _, _ = _plot_distribution(unpack(y_orig, 'q'), lat, lev, fig, ax[1,0],
'Humidity (unscaled) [g/kg]')
_, _, _ = _plot_distribution(unpack(y, 'q'), lat, lev, fig, ax[1,1],
'Humidity (scaled) []')
fig.suptitle('Distributions of Raw and Preprocessed Outputs', fontsize=20)
"""
Explanation: Preprocessing output values
End of explanation
"""
def store_stats(i, avg_train_error, best_train_error, avg_valid_error,
best_valid_error,**_):
if i==1:
global errors_stored
errors_stored = []
errors_stored.append((avg_train_error, best_train_error, avg_valid_error,
best_valid_error))
def build_nn(method, actv_fnc, hid_neur, learning_rule, pp_str,
batch_size=100, n_iter=None, n_stable=None,
learning_rate=0.01, learning_momentum=0.9,
regularize='L2', weight_decay=0.0, valid_size=0.5,
f_stable=.001):
"""Builds a multi-layer perceptron via the scikit neural network interface"""
# First build layers
layers = [sknn.mlp.Layer(f,units=h) for f,h in zip(actv_fnc,hid_neur)]
# Append a linear output layer
layers.append(sknn.mlp.Layer("Linear"))
mlp = sknn.mlp.Regressor(layers, n_iter=n_iter, batch_size=batch_size,
learning_rule=learning_rule, learning_rate=learning_rate,
learning_momentum=learning_momentum, regularize=regularize,
weight_decay=weight_decay, n_stable=n_stable, valid_size=valid_size,
f_stable=f_stable, callback={'on_epoch_finish': store_stats})
# Create a name for the neural network
# First build names of each layer
layerstr = '_'.join([str(h) + f[0] for h, f in zip(hid_neur, actv_fnc)])
# Get str of appropriate learning rate
if learning_rule == 'momentum':
lrn_str = str(learning_momentum)
else:
lrn_str = str(learning_rate)
# Construct name
mlp_str = pp_str + method[0] + "_" + layerstr + "_" + \
learning_rule[0:3] + lrn_str
# If using regularization, add that to the name too
if weight_decay > 0.0:
mlp_str = mlp_str + 'reg' + str(weight_decay)
# Add the number of iterations too
mlp_str = mlp_str + '_Niter' + str(n_iter)
return mlp, mlp_str
"""
Explanation: Build Neural Network
Functions to build NN
End of explanation
"""
actv_fnc = ['Rectifier', 'Rectifier']
hid_neur = [50, 25]
learning_rule='momentum'
n_iter = 100
r_mlp, r_str = build_nn('regress', actv_fnc, hid_neur, learning_rule,
pp_str, n_iter=n_iter, learning_momentum=0.9,
regularize='L2', weight_decay=1e-4)
"""
Explanation: Build the NN
End of explanation
"""
def train_nn(mlp,mlp_str,x,y, w=None):
"""Train each item in a list of multi-layer perceptrons and then score
on test data. Expects that mlp is a list of MLP objects"""
# Initialize
start = time.time()
# Train the model using training data
mlp.fit(x, y, w)
train_score = mlp.score(x, y)
end = time.time()
print("Training Score: {:.4f} for Model {:s} ({:.1f} seconds)".format(
train_score, mlp_str, end-start))
# This is an N_iter x 4 array...see score_stats
errors = np.asarray(errors_stored)
# Return the fitted models and the scores
return mlp, errors
# Train neural network
r_mlp, r_errors = train_nn(r_mlp, r_str, x, y)
# Save neural network
pickle.dump([r_mlp, r_str, r_errors, x_ppi, y_ppi, x_pp, y_pp, lat, lev, dlev],
open('data/regressors/' + r_str + '.pkl', 'wb'))
"""
Explanation: Train the Neural Network
End of explanation
"""
# Define a stored neural net to load
r_str = 'X-StandardScaler-qTindi_Y-SimpleY-qTindi_r_60R_60R_mom0.9reg1e-05'
# Load the NN
r_mlp_eval, _, errors, x_ppi, y_ppi, x_pp, y_pp, lat, lev, dlev = \
pickle.load(open('./data/regressors/' + r_str + '.pkl', 'rb'))
# Open the ***VALIDATION*** data set
datasource='./data/convection_50day_validation.pkl'
x_unscl, ytrue_unscl, _,_,_,_,_,_ = nnload.loaddata(datasource, minlev=min(lev))
# Scale data using input scalers
x_scl = nnload.transform_data(x_ppi, x_pp, x_unscl)
ytrue_scl = nnload.transform_data(y_ppi, y_pp, ytrue_unscl)
# Apply neural network to get predicted output
ypred_scl = r_mlp_eval.predict(x_scl)
ypred_unscl = nnload.inverse_transform_data(y_ppi, y_pp, ypred_scl)
"""
Explanation: Evaluate the NN
Load a more fully trained neural network as well as new (different) data for testing purposes
Also load validation data
End of explanation
"""
# plot_model_error_over_time(errors, r_str, figpath)
x = np.arange(errors.shape[0])
ytix = [.1e-3, .4e-3, .5e-3, 1e-3, 2e-3, 4e-3, 5e-3,
10e-3, 20e-3, 40e-3, 50e-3, 500e-3, 4]
# Plot error rate vs. iteration number
fig=plt.figure()
# Plot training errors
plt.semilogy(x, np.squeeze(errors[:,0]), alpha=0.5,color='blue',label='Training')
plt.semilogy(x, np.squeeze(errors[:,1]), alpha=0.5,color='blue')
plt.yticks(ytix,ytix)
plt.ylim((np.nanmin(errors), np.nanmax(errors)))
# Use a logarithmic y-axis
plt.semilogy(x, np.squeeze(errors[:,2]), alpha=0.5,label='Testing',color='green')
plt.semilogy(x, np.squeeze(errors[:,3]), alpha=0.5,color='green')
plt.legend()
plt.title('Error for ' + r_str)
plt.xlabel('Iteration Number')
"""
Explanation: Plot error history as a function of iteration number
End of explanation
"""
def do_mean_or_std(method, vari, true, pred, lev, ind):
out_str_dict = {'T':'K/day','q':'g/kg/day'}
methods = {'mean':np.mean,'std':np.std}
plt.subplot(2,2,ind)
m = lambda x: methods[method](unpack(x,vari), axis=0).T
plt.plot(m(true), lev, label='true')
plt.plot(m(pred), lev, label='pred')
plt.ylim(np.amax(lev),np.amin(lev))
plt.ylabel('$\sigma$')
plt.xlabel(out_str_dict[vari])
plt.title(vari + " " + method)
plt.legend()
def plot_means_stds(y3_true, y3_pred, lev):
fig = plt.figure()
do_mean_or_std('mean','T',y3_true,y3_pred, lev, 1)
do_mean_or_std('mean','q',y3_true,y3_pred, lev, 2)
do_mean_or_std('std','T',y3_true,y3_pred, lev, 3)
do_mean_or_std('std','q',y3_true,y3_pred, lev, 4)
def plot_pearsonr(y_true, y_pred, vari, lev, label=None):
r = np.empty(y_true.shape[1])
prob = np.empty(y_true.shape[1])
for i in range(y_true.shape[1]):
r[i], prob[i] = scipy.stats.pearsonr(y_true[:,i],y_pred[:,i])
plt.plot(unpack(r,vari, axis=0), lev, label=label)
plt.ylim([np.amax(lev), np.amin(lev)])
plt.ylabel('$\sigma$')
plt.title('Correlation Coefficient')
# Plot means and standard deviations
plot_means_stds(ytrue_unscl, ypred_unscl, lev)
# Plot correlation coefficient versus height
fig = plt.figure()
plot_pearsonr(ytrue_unscl, ypred_unscl, 'T', lev, label=r'$\Delta$Temperature')
plot_pearsonr(ytrue_unscl, ypred_unscl, 'q', lev, label=r'$\Delta$Humidity')
plt.legend(loc="upper left")
"""
Explanation: Plot Mean Statistics
End of explanation
"""
# Plot histogram showing how well true and predicted values match
def check_output_distribution(yt_unscl, yt_scl, yp_unscl, yp_scl, lat, lev):
# For unscaled variables
fig, ax = plt.subplots(2, 2)
x1, x2, bins = _plot_distribution(unpack(ypred_unscl,'T'), lat, lev, fig, ax[0,1],
r'Pred. $\Delta$Temp [K/day]')
_ , _ , _ = _plot_distribution(unpack(ytrue_unscl,'T'), lat, lev, fig, ax[0,0],
r'True $\Delta$Temp [K/day]', x1, x2, bins)
x1, x2, bins = _plot_distribution(unpack(ypred_unscl,'q'), lat, lev, fig, ax[1,1],
r'Pred. $\Delta$Humidity [g/kg/day]')
_ , _ , _ = _plot_distribution(unpack(ytrue_unscl,'q'), lat, lev, fig, ax[1,0],
r'True $\Delta$Humidity [g/kg/day]', x1, x2, bins)
check_output_distribution(ytrue_unscl, ytrue_scl, ypred_unscl, ypred_scl,
lat, lev)
"""
Explanation: Check the distribution of output values
End of explanation
"""
def calc_precip(y, dlev):
y = unpack(y,'q')
y = y / 1000. # kg/kg/day
return vertical_integral(y, dlev) #mm/day
def vertical_integral(data, dlev):
g = 9.8 #m/s2
data = -1./g * np.sum(data * dlev[:,None].T, axis=1)*1e5
return data
# Plot a scatter plot of true vs predicted precip
P_true = calc_precip(ytrue_unscl, dlev)
P_pred = calc_precip(ypred_unscl, dlev)
# Plot data
plt.scatter(P_true, P_pred, s=5, alpha=0.25)
# Calcualte mins and maxs and set axis bounds appropriately
xmin = np.min(P_true)
xmax = np.max(P_true)
ymin = np.min(P_pred)
ymax = np.max(P_pred)
xymin = np.min([xmin,ymin])
xymax = np.max([xmax,ymax])
# Plot 1-1 line
plt.plot([xymin,xymax], [xymin, xymax], color='k', ls='--')
plt.xlim(xymin, xymax)
plt.ylim(xymin, xymax)
plt.xlabel('True')
plt.ylabel('Predicted')
Plessthan0 = sum(P_pred < 0.0)
Plessthan0pct = 100.*Plessthan0/len(P_pred)
plt.text(0.01,0.95,"Pred. P<0 {:.1f}% of time".format(Plessthan0pct),
transform=plt.gca().transAxes)
"""
Explanation: Make a scatter plot of precipitation
End of explanation
"""
def plot_sample_profile(y_true, y_pred, lev):
"""Plots the vertical profiles of input T & q and
predicted and true output tendencies"""
f, (ax1, ax3) = plt.subplots(1,2)
T = nnload.unpack(x, 'T', axis=0)
q = nnload.unpack(x, 'q', axis=0)
# Plot temperature tendencies
ax1.plot(unpack(y_true, 'T', axis=0), lev, color='red' ,
ls='-' , label=r'$\Delta$T true')
ax1.plot(unpack(y_pred, 'T', axis=0), lev, color='red' ,
ls='--', label=r'$\Delta$T pred')
ax1.set_xlabel(r'$\Delta$Temperature [K/day]')
# Plot humidity tendencies
ax3.plot(unpack(y_true, 'q', axis=0), lev, color='blue',
ls='-' , label=r'$\Delta$q true')
ax3.plot(unpack(y_pred, 'q', axis=0), lev, color='blue',
ls='--', label=r'$\Delta$q pred')
ax3.set_xlabel(r'$\Delta$Humidity [g/kg/day]')
# Set axis properties
for ax in [ax1, ax3]:
ax.set_ylim(1, 0.25)
ax.legend()
ax.grid(True)
fig.suptitle('Sample True and Predicted Outputs', fontsize=20)
samp = np.random.randint(0, x.shape[0])
print('Random sample index value: ' + str(samp))
plot_sample_profile(ytrue_unscl[samp,:], ypred_unscl[samp,:], lev)
"""
Explanation: Some examples of the NN in action
End of explanation
"""
|
flohorovicic/pynoddy
|
docs/notebooks/3-Events.ipynb
|
gpl-2.0
|
from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
%matplotlib inline
"""
Explanation: Geological events in pynoddy: organisation and adpatiation
We will here describe how the single geological events of a Noddy history are organised within pynoddy. We will then evaluate in some more detail how aspects of events can be adapted and their effect evaluated.
End of explanation
"""
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
sys.path.append(repo_path)
import pynoddy
import pynoddy.history
import pynoddy.events
import pynoddy.output
# reload(pynoddy)
# Change to sandbox directory to store results
os.chdir(os.path.join(repo_path, 'sandbox'))
# Path to exmaple directory in this repository
example_directory = os.path.join(repo_path,'examples')
# Compute noddy model for history file
history = 'simple_two_faults.his'
history_ori = os.path.join(example_directory, history)
output_name = 'noddy_out'
# reload(pynoddy.history)
# reload(pynoddy.events)
H1 = pynoddy.history.NoddyHistory(history_ori)
# Before we do anything else, let's actually define the cube size here to
# adjust the resolution for all subsequent examples
H1.change_cube_size(100)
# compute model - note: not strictly required, here just to ensure changed cube size
H1.write_history(history)
pynoddy.compute_model(history, output_name)
"""
Explanation: Loading events from a Noddy history
In the current set-up of pynoddy, we always start with a pre-defined Noddy history loaded from a file, and then change aspects of the history and the single events. The first step is therefore to load the history file and to extract the single geological events. This is done automatically as default when loading the history file into the History object:
End of explanation
"""
H1.events
"""
Explanation: Events are stored in the object dictionary "events" (who would have thought), where the key corresponds to the position in the timeline:
End of explanation
"""
H1.events[3].properties
"""
Explanation: We can see here that three events are defined in the history. Events are organised as objects themselves, containing all the relevant properties and information about the events. For example, the second fault event is defined as:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# get the original dip of the fault
dip_ori = H1.events[3].properties['Dip']
# add 10 degrees to dip
add_dip = -20
dip_new = dip_ori + add_dip
# and assign back to properties dictionary:
H1.events[3].properties['Dip'] = dip_new
# H1.events[2].properties['Dip'] = dip_new1
new_history = "dip_changed"
new_output = "dip_changed_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
# load output from both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', position=0, ax = ax1, colorbar=False, title="Dip = %.0f" % dip_ori, savefig=True, fig_filename ="tmp.eps")
NO2.plot_section('y', position=1, ax = ax2, colorbar=False, title="Dip = %.0f" % dip_new)
plt.show()
"""
Explanation: Changing aspects of geological events
So what we now want to do, of course, is to change aspects of these events and to evaluate the effect on the resulting geological model. Parameters can directly be updated in the properties dictionary:
End of explanation
"""
H1 = pynoddy.history.NoddyHistory(history_ori)
# The names of the two fault events defined in the history file are:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: Changing the order of geological events
The geological history is parameterised as single events in a timeline. Changing the order of events can be performed with two basic methods:
Swapping two events with a simple command
Adjusting the entire timeline with a complete remapping of events
The first method is probably the most useful to test how a simple change in the order of events will effect the final geological model. We will use it here with our example to test how the model would change if the timing of the faults is swapped.
The method to swap two geological events is defined on the level of the history object:
End of explanation
"""
# Now: swap the events:
H1.swap_events(2,3)
# And let's check if this is correctly relfected in the events order now:
print H1.events[2].name
print H1.events[3].name
"""
Explanation: We now swap the position of two events in the kinematic history. For this purpose, a high-level function can directly be used:
End of explanation
"""
new_history = "faults_changed_order.his"
new_output = "faults_out"
H1.write_history(new_history)
pynoddy.compute_model(new_history, new_output)
reload(pynoddy.output)
# Load and compare both models
NO1 = pynoddy.output.NoddyOutput(output_name)
NO2 = pynoddy.output.NoddyOutput(new_output)
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', ax = ax1, colorbar=False, title="Model 1")
NO2.plot_section('y', ax = ax2, colorbar=False, title="Model 2")
plt.show()
"""
Explanation: Now let's create a new history file and evaluate the effect of the changed order in a cross section view:
End of explanation
"""
diff = (NO2.block - NO1.block)
"""
Explanation: Determining the stratigraphic difference between two models
Just as another quick example of a possible application of pynoddy to evaluate aspects that are not simply possible with, for example, the GUI version of Noddy itself. In the last example with the changed order of the faults, we might be interested to determine where in space this change had an effect. We can test this quite simply using the NoddyOutput objects.
The geology data is stored in the NoddyOutput.block attribute. To evaluate the difference between two models, we can therefore simply compute:
End of explanation
"""
fig = plt.figure(figsize = (5,3))
ax = fig.add_subplot(111)
ax.imshow(diff[:,10,:].transpose(), interpolation='nearest',
cmap = "RdBu", origin = 'lower left')
"""
Explanation: And create a simple visualisation of the difference in a slice plot with:
End of explanation
"""
NO1.export_to_vtk(vtk_filename = "model_diff", data = diff)
"""
Explanation: (Adding a meaningful title and axis labels to the plot is left to the reader as simple excercise :-) Future versions of pynoddy might provide an automatic implementation for this step...)
Again, we may want to visualise results in 3-D. We can use the export_to_vtk-function as before, but now assing the data array to be exported as the calulcated differnce field:
End of explanation
"""
|
vadim-ivlev/STUDY
|
handson-data-science-python/DataScience-Python3/CovarianceCorrelation.ipynb
|
mit
|
%matplotlib inline
import numpy as np
from pylab import *
def de_mean(x):
xmean = mean(x)
return [xi - xmean for xi in x]
def covariance(x, y):
n = len(x)
return dot(de_mean(x), de_mean(y)) / (n-1)
pageSpeeds = np.random.normal(3.0, 1.0, 1000)
purchaseAmount = np.random.normal(50.0, 10.0, 1000)
scatter(pageSpeeds, purchaseAmount)
covariance (pageSpeeds, purchaseAmount)
"""
Explanation: Covariance and Correlation
Covariance measures how two variables vary in tandem from their means.
For example, let's say we work for an e-commerce company, and they are interested in finding a correlation between page speed (how fast each web page renders for a customer) and how much a customer spends.
numpy offers covariance methods, but we'll do it the "hard way" to show what happens under the hood. Basically we treat each variable as a vector of deviations from the mean, and compute the "dot product" of both vectors. Geometrically this can be thought of as the angle between the two vectors in a high-dimensional space, but you can just think of it as a measure of similarity between the two variables.
First, let's just make page speed and purchase amount totally random and independent of each other; a very small covariance will result as there is no real correlation:
End of explanation
"""
purchaseAmount = np.random.normal(50.0, 10.0, 1000) / pageSpeeds
scatter(pageSpeeds, purchaseAmount)
covariance (pageSpeeds, purchaseAmount)
"""
Explanation: Now we'll make our fabricated purchase amounts an actual function of page speed, making a very real correlation. The negative value indicates an inverse relationship; pages that render in less time result in more money spent:
End of explanation
"""
def correlation(x, y):
stddevx = x.std()
stddevy = y.std()
return covariance(x,y) / stddevx / stddevy #In real life you'd check for divide by zero here
correlation(pageSpeeds, purchaseAmount)
"""
Explanation: But, what does this value mean? Covariance is sensitive to the units used in the variables, which makes it difficult to interpret. Correlation normalizes everything by their standard deviations, giving you an easier to understand value that ranges from -1 (for a perfect inverse correlation) to 1 (for a perfect positive correlation):
End of explanation
"""
np.corrcoef(pageSpeeds, purchaseAmount)
"""
Explanation: numpy can do all this for you with numpy.corrcoef. It returns a matrix of the correlation coefficients between every combination of the arrays passed in:
End of explanation
"""
purchaseAmount = 100 - pageSpeeds * 3
scatter(pageSpeeds, purchaseAmount)
correlation (pageSpeeds, purchaseAmount)
"""
Explanation: (It doesn't match exactly just due to the math precision available on a computer.)
We can force a perfect correlation by fabricating a totally linear relationship (again, it's not exactly -1 just due to precision errors, but it's close enough to tell us there's a really good correlation here):
End of explanation
"""
|
maubarsom/ORFan-proteins
|
phage_assembly/5_annotation/asm_v1.2/orf_160621/3b_select_reliable_orfs.ipynb
|
mit
|
#Load blast hits
blastp_hits = pd.read_csv("2_blastp_hits.tsv",sep="\t",quotechar='"')
blastp_hits.head()
#Filter out Metahit 2010 hits, keep only Metahit 2014
blastp_hits = blastp_hits[blastp_hits.db != "metahit_pep"]
"""
Explanation: 1. Load blast hits
End of explanation
"""
#Assumes the Fasta file comes with the header format of EMBOSS getorf
fh = open("1_orf/d9539_asm_v1.2_orf.fa")
header_regex = re.compile(r">([^ ]+?) \[([0-9]+) - ([0-9]+)\]")
orf_stats = []
for line in fh:
header_match = header_regex.match(line)
if header_match:
is_reverse = line.rstrip(" \n").endswith("(REVERSE SENSE)")
q_id = header_match.group(1)
#Position in contig
q_cds_start = int(header_match.group(2) if not is_reverse else header_match.group(3))
q_cds_end = int(header_match.group(3) if not is_reverse else header_match.group(2))
#Length of orf in aminoacids
q_len = (q_cds_end - q_cds_start + 1) / 3
orf_stats.append( pd.Series(data=[q_id,q_len,q_cds_start,q_cds_end,("-" if is_reverse else "+")],
index=["q_id","orf_len","q_cds_start","q_cds_end","strand"]))
orf_stats_df = pd.DataFrame(orf_stats)
print(orf_stats_df.shape)
orf_stats_df.head()
#Write orf stats to fasta
orf_stats_df.to_csv("1_orf/orf_stats.csv",index=False)
"""
Explanation: 2. Process blastp results
2.1 Extract ORF stats from fasta file
End of explanation
"""
blastp_hits_annot = blastp_hits.merge(orf_stats_df,left_on="query_id",right_on="q_id")
#Add query coverage calculation
blastp_hits_annot["q_cov_calc"] = (blastp_hits_annot["q_end"] - blastp_hits_annot["q_start"] + 1 ) * 100 / blastp_hits_annot["q_len"]
blastp_hits_annot.sort_values(by="bitscore",ascending=False).head()
assert blastp_hits_annot.shape[0] == blastp_hits.shape[0]
"""
Explanation: 2.2 Annotate blast hits with orf stats
End of explanation
"""
! mkdir -p 4_msa_prots
#Get best hit (highest bitscore) for each ORF
gb = blastp_hits_annot[ (blastp_hits_annot.q_cov > 80) & (blastp_hits_annot.pct_id > 40) & (blastp_hits_annot.e_value < 1) ].groupby("query_id")
reliable_orfs = pd.DataFrame( hits.ix[hits.bitscore.idxmax()] for q_id,hits in gb )[["query_id","db","subject_id","pct_id","q_cov","q_len",
"bitscore","e_value","strand","q_cds_start","q_cds_end"]]
reliable_orfs = reliable_orfs.sort_values(by="q_cds_start",ascending=True)
reliable_orfs
"""
Explanation: 2.3 Extract best hit for each ORF ( q_cov > 0.8 and pct_id > 40% and e-value < 1)
Define these resulting 7 ORFs as the core ORFs for the d9539 assembly.
The homology between the Metahit gene catalogue is very good, and considering the catalogue was curated
on a big set of gut metagenomes, it is reasonable to assume that these putative proteins would come
from our detected circular putative virus/phage genome
Two extra notes:
* Additionally, considering only these 7 ORFs , almost the entire genomic region is covered, with very few non-coding regions, still consistent with the hypothesis of a small viral genome which should be mainly coding
Also, even though the naive ORF finder detected putative ORFs in both positive and negative strands, the supported ORFs only occur in the positive strand. This could be an indication of a ssDNA or ssRNA virus.
End of explanation
"""
reliable_orfs["orf_id"] = ["orf{}".format(x) for x in range(1,reliable_orfs.shape[0]+1) ]
reliable_orfs["cds_len"] = reliable_orfs["q_cds_end"] - reliable_orfs["q_cds_start"] +1
reliable_orfs.sort_values(by="q_cds_start",ascending=True).to_csv("3_filtered_orfs/filt_orf_stats.csv",index=False,header=True)
reliable_orfs.sort_values(by="q_cds_start",ascending=True).to_csv("3_filtered_orfs/filt_orf_list.txt",index=False,header=False,columns=["query_id"])
"""
Explanation: 2.4 Extract selected orfs for further analysis
End of explanation
"""
! ~/utils/bin/seqtk subseq 1_orf/d9539_asm_v1.2_orf.fa 3_filtered_orfs/filt_orf_list.txt > 3_filtered_orfs/d9539_asm_v1.2_orf_filt.fa
"""
Explanation: 2.4.2 Extract fasta
End of explanation
"""
filt_blastp_hits = blastp_hits_annot[ blastp_hits_annot.query_id.apply(lambda x: x in reliable_orfs.query_id.tolist())]
filt_blastp_hits.to_csv("3_filtered_orfs/d9539_asm_v1.2_orf_filt_blastp.tsv",sep="\t",quotechar='"')
filt_blastp_hits.head()
"""
Explanation: 2.4.3 Write out filtered blast hits
End of explanation
"""
|
tomfaulkenberry/MT_flanker
|
exp2/results/.ipynb_checkpoints/SqueakIntro-checkpoint.ipynb
|
gpl-2.0
|
# For reading data files
import os
import glob
import numpy as np # Numeric calculation
import pandas as pd # General purpose data analysis library
import squeak # For mouse data
# For plotting
import matplotlib.pyplot as plt
%matplotlib inline
# Prettier default settings for plots (optional)
import seaborn
seaborn.set_style('darkgrid')
from pylab import rcParams
rcParams['figure.figsize'] = 8, 5
"""
Explanation: Update: I've moved the code from this post, along with resources for designing mouse tracking experiments, and some example data, to a GitHub repository. The best way to learn how to use squeak is to play around with this repository, which also includes the content of this post.
A while ago, I gathered up the python code I've been using to process mouse trajectory data
into a package and gave it the jaunty title squeak.
However, as this was mostly for my own use, I never got around to properly documenting it.
Recently, a few people have asked me for advice on analysing mouse data not collected using MouseTracker - for instance, data generated using my OpenSesame implementation. In response, I've gone through a full example for this post, and written a script that should be able to preprocess any data collected using my OpenSesame implementation. To use any of this, you'll need to have the python language installed, along with some specific scientific packages, and of course squeak itself, which is available using the pip command:
pip install squeak
In this post, I go through the code bit by bit, explaining what specifically is going on.
If you're not used to using python, you don't have to worry to much about understanding all of the syntax,
although python is relatively easy to read as if it was plain English. The full, downloadable script is included at the bottom of the page.
Data Processing
End of explanation
"""
results = []
for datafile in glob.glob('data/*.csv'):
this_data = pd.read_csv(datafile)
results.append(this_data)
data = pd.concat(results)
"""
Explanation: First, we need to load our data.
I'll show how to do this using the .csv files saved by OpenSesame,
as this gives us a chance to see how you can use squeak to handle
trajectory data that's been saved in this exchangable format.
We can combine all of our files into a single data structure
by reading them one at a time, using pd.read_csv,
storing them in a list,
and then merging this list using pd.concat.
End of explanation
"""
data = pd.concat(
[pd.DataFrame(pd.read_csv(datafile))
for datafile in glob.glob('data/*.csv')])
"""
Explanation: A faster and more concise alternative, using python's list comprehension abilities, would look like this instead:
End of explanation
"""
print data.head()
"""
Explanation: Either way, we end up with data in the form shown below.
End of explanation
"""
data['t'] = data.tTrajectory.map(squeak.list_from_string)
data['x'] = data.xTrajectory.map(squeak.list_from_string)
data['y'] = data.yTrajectory.map(squeak.list_from_string)
"""
Explanation: As you can see, there's one row per trial,
and each of the coding variables we recorded in OpenSesame occupy a single column.
The trajectory data, though, is stored in three columns,
"tTrajectory",
"xTrajectory",
and "yTrajectory",
corresponding to time elapsed, x-axis position, and y-axis position, respectively.
Each cell here actually contains a string representation of the list of values in each case, in the form
"[time1, time2, time3, ..., timeN]"
We can parse these using squeak's list_from_string function.
End of explanation
"""
for i in range(len(data)):
x = data.x.iloc[i]
y = data.y.iloc[i]
plt.plot(x, y, color='blue', alpha=.5) # alpha controlls the transparency
plt.show()
"""
Explanation: At this stage, we have our data in a format python can understand, and it looks like this.
End of explanation
"""
data['y'] = data.y * -1 # Reverse y axis
data['x'] = data.x.map(squeak.remap_right) # Flip the leftward responses
data['x'] = data.x.map(squeak.normalize_space)
data['y'] = data.y.map(squeak.normalize_space) * 1.5
for i in range(len(data)):
x = data.x.iloc[i]
y = data.y.iloc[i]
plt.plot(x, y, color='blue', alpha=.5)
plt.text(0, 0, 'START', horizontalalignment='center')
plt.text(1, 1.5, 'END', horizontalalignment='center')
plt.show()
"""
Explanation: We still need to do some preprocessing of the trajectories - OpenSesame logs y-axis coordinates upside down from what we would want, and more importantly, it's conventional to standardise trajectories so they start at [0,0] and end at [1,1.5], and to flip the trials where the left hand side response was chosen the other way around for comparison. Let's do that now.
End of explanation
"""
for i in range(len(data)):
x = data.x.iloc[i]
t = data.t.iloc[i]
plt.plot(t, x, color='blue', alpha=.3)
plt.xlabel('Time (msec)')
plt.ylabel('x axis position')
plt.show()
"""
Explanation: Our next problem as that all of our trials last for different amounts of time.
End of explanation
"""
data['nx'], data['ny'] = zip(*[squeak.even_time_steps(x, y, t) for x, y, t, in zip(data.x, data.y, data.t)])
for i, x in data.nx.iteritems():
plt.plot(x, color='blue', alpha=.3)
plt.xlabel('Normalized time step')
plt.ylabel('x axis position')
plt.show()
"""
Explanation: We can deal with this in one of two ways, both of which I'll demonstrate.
Most analyses standardize the trajectories into 101 time slices, for comparison,
meaning that for every trajectory, sample 50 is halfway through, regardless of how long that actually takes.
(the code looks a little intimidating, and future versions of squeak should include a more concise way of doing this. You don't need to worry too much about what's happening here).
End of explanation
"""
max_time = 5000 # Alternatively, max_time = data.rt.max()
data['rx'] = [squeak.uniform_time(x, t, max_duration=5000) for x, t in zip(data.x, data.t)]
data['ry'] = [squeak.uniform_time(y, t, max_duration=5000) for y, t in zip(data.y, data.t)]
for i in range(len(data)):
x = data.rx.iloc[i]
plt.plot(x.index, x, color='blue', alpha=.3)
plt.xlabel('Time (msec)')
plt.ylabel('x axis position')
plt.show()
"""
Explanation: An alternative approach is to keep the actual timestamp for each sample, so you can analyse the development of the trajectories in real time. To do this, you need to "extend" the data for all of the trials so that they all last for the same amount of time. In this example, we'll extend every trial to 5 seconds (5000 milliseconds).
This can be done by treating all of the time after the participant has clicked on their response as if they instead just kept the cursor right on top of the response until they reach 5 seconds. Again, you can copy this code literally, so don't worry about the details of the syntax here.
End of explanation
"""
# Mouse Stats
data['md'] = data.apply(lambda trial: squeak.max_deviation(trial['nx'], trial['ny']), axis=1)
data['auc'] = data.apply(lambda trial: squeak.auc(trial['nx'], trial['ny']), axis=1)
data['xflips'] = data.nx.map(squeak.count_x_flips)
data['init_time'] = data.ry.map(lambda y: y.index[np.where(y > .05)][0])
# Taking a look at condition means
print data.groupby('condition')['md', 'auc', 'xflips', 'init_time', 'rt'].mean()
"""
Explanation: With all of this done, you're ready to calculate the statistics you'll be using in your analyses. Again, don't worry too much about the syntax here.
The most popular measures, calculated here, are:
Maximum Deviation (MD): The size of the largest distance achieved between the actual trajectory and what it would have looked like if it was perfectly straight.
Area Under the Curve (AUC): The area bounded between the trajectory and the ideal straight line path
X-flips: changes of direction on the x axis
Initiation time: The time taken for the participant to start moving the cursor.
End of explanation
"""
nx = pd.concat(list(data.nx), axis=1).T
ny = pd.concat(list(data.ny), axis=1).T
rx = pd.concat(list(data.rx), axis=1).T
ry = pd.concat(list(data.ry), axis=1).T
"""
Explanation: Finally, we'll save our processed data. First, we split of our processed mouse trajectory columns into seperate data structures, which I'll explore a little more below.
The normalized time data are labelled nx and ny, and are formatted so that each row corresponds to a single trial, and each column is a time point, from 0 to 101. The real time data, rx and ry, are structured analagously, with each column corresponding to a timestamp. By default, these are broken up into 20 msec intervals, and the column headings (20, 40, 60, etc) reflect the actual timestamp.
End of explanation
"""
redundant = ['xTrajectory', 'yTrajectory', 'tTrajectory',
'x', 'y', 't', 'nx', 'ny', 'rx', 'ry']
data = data.drop(redundant, axis=1)
data.head()
# Save data
data.to_csv('processed.csv', index=False)
nx.to_csv('nx.csv', index=False)
ny.to_csv('ny.csv', index=False)
rx.to_csv('rx.csv', index=False)
ry.to_csv('ry.csv', index=False)
"""
Explanation: With that done, we can delete this information from our main data frame, so that it's compact enough to use easily in your data analysis package of choice, before finally saving everything as csv files.
End of explanation
"""
|
msadegh97/machine-learning-course
|
appendix-02-Numpy_Pandas.ipynb
|
gpl-3.0
|
import numpy as np
a = [1,2,3]
a
b = np.array(a)
b
np.arange(1, 10)
np.arange(1, 10, 2)
"""
Explanation: NumPy
NumPy is a Linear Algebra Library for Python.
NumPy’s main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type, indexed by a tuple of positive integers. In NumPy dimensions are called axes. The number of axes is rank.
For example, the coordinates of a point in 3D space [1, 2, 1] is an array of rank 1, because it has one axis. That axis has a length of 3. In the example pictured below, the array has rank 2 (it is 2-dimensional).
Numpy is also incredibly fast, as it has bindings to C libraries.
For easy installing Numpy:
bash
sudo pip3 install numpy
NumPy array
End of explanation
"""
np.zeros(2, dtype=float)
np.zeros((2,3))
"""
Explanation: zeros , ones and eye
np.zeros
Return a new array of given shape and type, filled with zeros.
End of explanation
"""
np.ones(3, )
"""
Explanation: ones
Return a new array of given shape and type, filled with ones.
End of explanation
"""
np.eye(3)
"""
Explanation: eye
Return a 2-D array with ones on the diagonal and zeros elsewhere.
End of explanation
"""
np.linspace(1, 11, 3)
"""
Explanation: linspace
Returns num evenly spaced samples, calculated over the interval [start, stop].
End of explanation
"""
np.random.rand(2)
np.random.rand(2,3,4)
"""
Explanation: Random number and matrix
rand
Random values in a given shape.
End of explanation
"""
np.random.randn(2,3)
"""
Explanation: randn
Return a sample (or samples) from the "standard normal" distribution.
andom.standard_normal Similar, but takes a tuple as its argument.
End of explanation
"""
np.random.random()
"""
Explanation: random
Return random floats in the half-open interval [0.0, 1.0).
End of explanation
"""
np.random.randint(1,50,10)
np.random.randint(1,40)
"""
Explanation: randint
Return n random integers (by default one integer) from low (inclusive) to high (exclusive).
End of explanation
"""
zero = np.zeros([3,4])
print(zero , ' ' ,'shape of a :' , zero.shape)
zero = zero.reshape([2,6])
print()
print(zero)
"""
Explanation: Shape and Reshape
shape return the shape of data and reshape returns an array containing the same data with a new shape
End of explanation
"""
number = np.array([[1,2,],
[3,4]])
number2 = np.array([[1,3],[2,1]])
print('element wise product :\n',number * number2 )
print('matrix product :\n',number.dot(number2)) ## also can use : np.dot(number, number2)
"""
Explanation: Basic Operation
Element wise product and matrix product
End of explanation
"""
numbers = np.random.randint(1,100, 10)
print(numbers)
print('max is :', numbers.max())
print('index of max :', numbers.argmax())
print('min is :', numbers.min())
print('index of min :', numbers.argmin())
print('mean :', numbers.mean())
"""
Explanation: min max argmin argmax mean
End of explanation
"""
number = np.arange(1,10).reshape(3,3)
print(number)
print()
print('exp:\n', np.exp(number))
print()
print('sqrt:\n',np.sqrt(number))
"""
Explanation: Universal function
numpy also has some funtion for mathmatical operation like exp, log, sqrt, abs and etc .
for find more function click here
End of explanation
"""
numbers.dtype
"""
Explanation: dtype
End of explanation
"""
number = np.arange(0,20)
number2 = number
print (number is number2 , id(number), id(number2))
print(number)
number2.shape = (4,5)
print(number)
"""
Explanation: No copy & Shallow copy & Deep copy
No copy
###### Simple assignments make no copy of array objects or of their data.
End of explanation
"""
number = np.arange(0,20)
number2 = number.view()
print (number is number2 , id(number), id(number2))
number2.shape = (5,4)
print('number2 shape:', number2.shape,'\nnumber shape:', number.shape)
print('befor:', number)
number2[0][0] = 2222
print()
print('after:', number)
"""
Explanation: Shallow copy
Different array objects can share the same data. The view method creates a new array object that looks at the same data.
End of explanation
"""
number = np.arange(0,20)
number2 = number.copy()
print (number is number2 , id(number), id(number2))
print('befor:', number)
number2[0] = 10
print()
print('after:', number)
print()
print('number2:',number2)
"""
Explanation: Deep copy
The copy method makes a complete copy of the array and its data.
End of explanation
"""
number = np.arange(1,11)
num = 2
print(' number =', number)
print('\n number .* num =',number * num)
number = np.arange(1,10).reshape(3,3)
number2 = np.arange(1,4).reshape(1,3)
number * number2
number = np.array([1,2,3])
print('number =', number)
print('\nnumber =', number + 100)
number = np.arange(1,10).reshape(3,3)
number2 = np.arange(1,4)
print('number: \n', number)
add = number + number2
print()
print('number2: \n ', number2)
print()
print('add: \n', add)
"""
Explanation: Broadcating
###### One of important concept to understand numpy is Broadcasting
It's very useful for performancing mathmaica operation beetween arrays of different shape.
End of explanation
"""
from time import time
a = np.random.rand(8000000, 1)
c = 0
tic = time()
for i in range(len(a)):
c +=(a[i][0] * a[i][0])
print ('output1:', c)
tak = time()
print('multiply 2 matrix with loop: ', tak - tic)
tic = time()
print('output2:', np.dot(a.T, a))
tak = time()
print('multiply 2 matrix with numpy func: ', tak - tic)
"""
Explanation: If you still doubt Why we use Python and NumPy see it. 😉
End of explanation
"""
import pandas as pd
"""
Explanation: I tried to write essential things in numpy that you can start to code and enjoy it but there are many function that i don't write in this book if you neet more informatino click here
Pandas
pandas is an open source library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
For easy installing Pandas
bash
sudo pip3 install pandas
End of explanation
"""
labels = ['a','b','c']
my_list = [10,20,30]
arr = np.array([10,20,30])
d = {'a':10,'b':20,'c':30}
pd.Series(data=my_list)
pd.Series(data=my_list,index=labels)
pd.Series(d)
"""
Explanation: Series
End of explanation
"""
dataframe = pd.DataFrame(np.random.randn(5,4),columns=['A','B','V','D'])
dataframe.head()
"""
Explanation: Dataframe
Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like container for Series objects. The primary pandas data structure
End of explanation
"""
dataframe['A']
dataframe[['A', 'D']]
"""
Explanation: Selection
End of explanation
"""
dataframe['E'] = dataframe['A'] + dataframe['B']
dataframe
"""
Explanation: creating new column
End of explanation
"""
dataframe.drop('E', axis=1)
dataframe
dataframe.drop('E', axis=1, inplace=True)
dataframe
"""
Explanation: removing a column
End of explanation
"""
dataframe.loc[0]
dataframe.iloc[0]
dataframe.loc[0 , 'A']
dataframe.loc[[0,2],['A', 'C']]
"""
Explanation: Selcting row
End of explanation
"""
dataframe > 0.3
dataframe[dataframe > 0.3 ]
dataframe[dataframe['A']>0.3]
dataframe[dataframe['A']>0.3]['B']
dataframe[(dataframe['A']>0.5) & (dataframe['C'] > 0)]
"""
Explanation: Conditional Selection
End of explanation
"""
layer1 = ['g1','g1','g1','g2','g2','g2']
layer2 = [1,2,3,1,2,3]
hier_index = list(zip(layer1,layer2))
hier_index = pd.MultiIndex.from_tuples(hier_index)
hier_index
dataframe2 = pd.DataFrame(np.random.randn(6,2),index=hier_index,columns=['A','B'])
dataframe2
dataframe2.loc['g1']
dataframe2.loc['g1'].loc[1]
"""
Explanation: Multi-Index and Index Hierarchy
End of explanation
"""
titanic = pd.read_csv('Datasets/titanic.csv')
pd.read
titanic.head()
titanic.drop('Name', axis=1 , inplace = True)
titanic.head()
titanic.to_csv('Datasets/titanic_drop_names.csv')
"""
Explanation: Input and output
End of explanation
"""
titanic.head()
titanic.dropna()
titanic.dropna(axis=1)
titanic.fillna('Fill NaN').head()
"""
Explanation: csv is one of the most important format but Pandas compatible with many other format like html table , sql, json and etc.
Mising data (NaN)
End of explanation
"""
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
df1
df2
df3
"""
Explanation: Concating merging and ...
End of explanation
"""
frames = [df1, df2, df3 ]
pd.concat(frames)
#pd.concat(frames, ignore_index=True)
pd.concat(frames, axis=1)
df1.append(df2)
"""
Explanation: Concatenation
End of explanation
"""
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
pd.merge(left, right, on= 'key')
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
pd.merge(left, right, on=['key1', 'key2'])
pd.merge(left, right, how='outer', on=['key1', 'key2'])
pd.merge(left, right, how='left', on=['key1', 'key2'])
pd.merge(left, right, how='right', on=['key1', 'key2'])
"""
Explanation: Mergeing
End of explanation
"""
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left
right
left.join(right)
"""
Explanation: Joining
End of explanation
"""
|
jhprinz/openpathsampling
|
examples/alanine_dipeptide_tps/AD_tps_1_trajectory.ipynb
|
lgpl-2.1
|
%matplotlib inline
import matplotlib.pyplot as plt
import openpathsampling as paths
import openpathsampling.engines.openmm as peng_omm
from simtk.openmm import app
import simtk.openmm as mm
import simtk.unit as unit
from openmmtools.integrators import VVVRIntegrator
import mdtraj as md
import numpy as np
"""
Explanation: This is the first file to run in the alanine dipeptide TPS example. This teaches you how to:
set up an engine using OpenMM
set up states using MDTraj-based collective variables
obtain a initial trajectory using high temperature MD
equilibrate by using shooting moves until the first decorrelated trajectory
We assume at this point that you are familiar with the basic concepts of OPS. If you find this file confusing, we recommend working through the toy model examples.
Imports
End of explanation
"""
# this cell is all OpenMM specific
forcefield = app.ForceField('amber96.xml', 'tip3p.xml')
pdb = app.PDBFile("../resources/AD_initial_frame.pdb")
system = forcefield.createSystem(
pdb.topology,
nonbondedMethod=app.PME,
nonbondedCutoff=1.0*unit.nanometers,
constraints=app.HBonds,
rigidWater=True,
ewaldErrorTolerance=0.0005
)
hi_T_integrator = VVVRIntegrator(
500*unit.kelvin,
1.0/unit.picoseconds,
2.0*unit.femtoseconds)
hi_T_integrator.setConstraintTolerance(0.00001)
"""
Explanation: Setting up the engine
Now we set things up for the OpenMM simulation. We will need a openmm.System object and an openmm.Integrator object.
To learn more about OpenMM, read the OpenMM documentation. The code we use here is based on output from the convenient web-based OpenMM builder.
End of explanation
"""
template = peng_omm.snapshot_from_pdb("../resources/AD_initial_frame.pdb")
openmm_properties = {'OpenCLPrecision': 'mixed'}
engine_options = {
'n_frames_max': 2000,
'nsteps_per_frame': 10
}
hi_T_engine = peng_omm.Engine(
template.topology,
system,
hi_T_integrator,
openmm_properties=openmm_properties,
options=engine_options
)
hi_T_engine.name = '500K'
hi_T_engine.current_snapshot = template
hi_T_engine.minimize()
"""
Explanation: The storage file will need a template snapshot. In addition, the OPS OpenMM-based Engine has a few properties and options that are set by these dictionaries.
End of explanation
"""
# define the CVs
psi = paths.MDTrajFunctionCV("psi", md.compute_dihedrals, template.topology, indices=[[6,8,14,16]])
phi = paths.MDTrajFunctionCV("phi", md.compute_dihedrals, template.topology, indices=[[4,6,8,14]])
# define the states
deg = 180.0/np.pi
C_7eq = (paths.PeriodicCVDefinedVolume(phi, lambda_min=-180/deg, lambda_max=0/deg,
period_min=-np.pi, period_max=np.pi) &
paths.PeriodicCVDefinedVolume(psi, lambda_min=100/deg, lambda_max=200/deg,
period_min=-np.pi, period_max=np.pi)
).named("C_7eq")
# similarly, without bothering with the labels:
alpha_R = (paths.PeriodicCVDefinedVolume(phi, -180/deg, 0/deg, -np.pi, np.pi) &
paths.PeriodicCVDefinedVolume(psi, -100/deg, 0/deg, -np.pi, np.pi)).named("alpha_R")
"""
Explanation: Defining states
First we define the CVs using the md.compute_dihedrals function. Then we define our states using PeriodicCVDefinedVolume (since our CVs are periodic.)
End of explanation
"""
init_traj_ensemble = paths.AllOutXEnsemble(C_7eq) | paths.AllOutXEnsemble(alpha_R)
# generate trajectory that includes frame in both states
trajectory = hi_T_engine.generate(hi_T_engine.current_snapshot, [init_traj_ensemble])
# create a network so we can use its ensemble to obtain an initial trajectory
# use all-to-all because we don't care if initial traj is A->B or B->A: it can be reversed
tmp_network = paths.TPSNetwork.from_states_all_to_all([C_7eq, alpha_R])
# take the subtrajectory matching the ensemble (only one ensemble, only one subtraj)
subtrajectories = []
for ens in tmp_network.analysis_ensembles:
subtrajectories += ens.split(trajectory)
print subtrajectories
"""
Explanation: Getting a first trajectory
The idea here is a little subtle, but it makes nice use of our generalized path ensemble idea.
We want a path which contains at least one frame in each state. The question is, what ensemble can we use to create such a trajectory?
The first obvious thought would be goal_ensemble = PartInXEnsemble(stateA) & PartInXEnsemble(stateB) (which can, of course, be further generalized to more states). However, while that is the ensemble we want to eventually satisfy, we can't use its can_append to create it, because its can_append always returns True: the trajectory will go on forever!
But we can use a trick: since what we want is the first trajectory that satisfies goal_ensemble, we know that every shorter trajectory will not satisfy it. This means that the shorter trajectories must satisfy the complement of goal_ensemble, and the trajectory we want will be the first trajectory that does not satisfy the complement!
So the trick we'll use is to build the trajectory by using the fact that the shorter trajectories are in the complement of goal_ensemble, which is given by complement = AllOutXEnsemble(stateA) | AllOutXEnsemble(stateB). The generate function will stop when that is no longer true, giving us the trajectory we want. This can be directly generalized to more states.
Note that here we're not even using the can_append function. That happens to be the same as the ensemble itself for this particular ensemble, but conceptually, we're actually using the test of whether a trajectory is in the ensemble at all.
End of explanation
"""
plt.plot(phi(trajectory), psi(trajectory), 'k.-')
plt.plot(phi(subtrajectories[0]), psi(subtrajectories[0]), 'r')
"""
Explanation: Plotting the trajectory
End of explanation
"""
integrator = VVVRIntegrator(
300*unit.kelvin,
1.0/unit.picoseconds,
2.0*unit.femtoseconds
)
integrator.setConstraintTolerance(0.00001)
engine = peng_omm.Engine(
template.topology,
system,
integrator,
openmm_properties=openmm_properties,
options=engine_options
)
engine.name = '300K'
"""
Explanation: Setting up another engine
We'll create another engine that uses a 300K integrator, and equilibrate to a 300K path from the 500K path.
End of explanation
"""
network = paths.TPSNetwork(initial_states=C_7eq, final_states=alpha_R)
scheme = paths.OneWayShootingMoveScheme(network,
selector=paths.UniformSelector(),
engine=engine)
# make subtrajectories into initial conditions (trajectories become a sampleset)
initial_conditions = scheme.initial_conditions_from_trajectories(subtrajectories)
# check that initial conditions are valid and complete (raise AssertionError otherwise)
scheme.assert_initial_conditions(initial_conditions)
sampler = paths.PathSampling(storage=paths.Storage("tps_nc_files/alanine_dipeptide_tps_equil.nc", "w", template),
move_scheme=scheme,
sample_set=initial_conditions)
sampler.live_visualizer = paths.StepVisualizer2D(network, phi, psi, [-3.14, 3.14], [-3.14, 3.14])
# initially, these trajectories are correlated (actually, identical)
# once decorrelated, we have a (somewhat) reasonable 300K trajectory
initial_conditions[0].trajectory.is_correlated(sampler.sample_set[0].trajectory)
# this is a trick to take the first decorrelated trajectory
while (initial_conditions[0].trajectory.is_correlated(sampler.sample_set[0].trajectory)):
sampler.run(1)
# run an extra 10 to decorrelate a little futher
sampler.run(10)
"""
Explanation: Equilibrate TPS
This is, again, a simple path sampling setup. We use the same TPSNetwork we'll use later, and only shooting moves. One the initial conditions are correctly set up, we run one step at a time until the initial trajectory is decorrelated.
This setup of a path sampler always consists of defining a network and a move_scheme. See toy model notebooks for further discussion.
End of explanation
"""
|
Apipie/apipie-rails
|
rel-eng/gem_release.ipynb
|
apache-2.0
|
%autosave 0
%cd ..
"""
Explanation: Release of apipie-rails gem
Requirements
push access to https://github.com/Apipie/apipie-rails
push access to rubygems.org for apipie-rails
sudo yum install python-slugify asciidoc
ensure neither the git push or gem push don't require interractive auth. If you can't use api key or ssh key to auth skip these steps and run them form the shell manually
ensure all checks have passed on the branch you're about to release
Release process
Follow the steps with <Shift>+<Enter> or <Ctrl>+<Enter>,<Down>
If anything fails, fix it and re-run the step if applicable
Release settings
End of explanation
"""
NEW_VERSION = '0.5.20'
LAST_VERSION = '0.5.19'
GIT_REMOTE_UPSTREAM = 'origin'
STABLE_RELEASE = False
WORK_BRANCH = 'stable' if STABLE_RELEASE else 'master'
# Array of strings, e.g. ["21cbsc214g3", "21casc214g3"]
CHERRY_PICKS = []
GEMFILE='Gemfile.rails61'
"""
Explanation: Update the following notebook settings
End of explanation
"""
! git checkout {WORK_BRANCH}
! git fetch {GIT_REMOTE_UPSTREAM}
! git rebase {GIT_REMOTE_UPSTREAM}/{WORK_BRANCH}
"""
Explanation: Ensure the repo is up to date
End of explanation
"""
if STABLE_RELEASE:
for cp in CHERRY_PICKS:
! git cherry-pick -x {cp}
"""
Explanation: Cherry picks for stable release
End of explanation
"""
! BUNDLE_GEMFILE=gemfiles/{GEMFILE} bundle update
! BUNDLE_GEMFILE=gemfiles/{GEMFILE} bundle exec rspec
"""
Explanation: Run tests localy if your setup allows, otherwise ensure the HEAD is green
End of explanation
"""
! sed -i 's/VERSION = .*/VERSION = "{NEW_VERSION}"/' lib/apipie/version.rb
# Parse git changelog
from IPython.display import Markdown as md
from subprocess import check_output
from shlex import split
import re
def format_log_entry(entry):
author = re.search(r'author:(.*)', entry).group(1)
entry = re.sub(r'author:(.*)', '', entry)
entry = re.sub(r'([fF]ixes|[rR]efs)[^-]*-\s*(.*)', r'\2', entry)
entry = '* ' + entry.capitalize()
entry = re.sub(r'\(#([0-9]+)\)', r'[#\1](https://github.com/Apipie/apipie-rails/pull/\1)', entry)
entry = entry + f'({author})'
return entry
def skip(entry):
if re.match(r'Merge pull', entry) or \
re.match(r'^i18n', entry) or \
re.match(r'^Bump to version', entry):
return True
else:
return False
git_log_cmd = 'git log --pretty=format:"%%s author:%%an" v%s..HEAD' % LAST_VERSION
log = check_output(split(git_log_cmd)).decode('utf8').split('\n')
change_log = [format_log_entry(e) for e in log if not skip(e)]
md('\n'.join(change_log))
# Write release notes
from datetime import datetime
import fileinput
import sys
fh = fileinput.input('CHANGELOG.md', inplace=True)
for line in fh:
print(line.rstrip())
if re.match(r'========', line):
print('## [v%s](https://github.com/Apipie/apipie-rails/tree/v%s) (%s)' % (NEW_VERSION, NEW_VERSION, datetime.today().strftime('%Y-%m-%d')))
print('[Full Changelog](https://github.com/Apipie/apipie-rails/compare/v%s...v%s)' % (LAST_VERSION, NEW_VERSION))
for entry in change_log:
print(entry)
print('')
fh.close()
"""
Explanation: Update release related stuff
End of explanation
"""
! git add -u
! git status
! git diff --cached
"""
Explanation: Manual step: Update deps in the gemspec if neccessary
Check what is going to be commited
End of explanation
"""
! git commit -m "Bump to {NEW_VERSION}"
"""
Explanation: Commit changes
End of explanation
"""
! git tag {NEW_VERSION}
"""
Explanation: Tag new version
End of explanation
"""
! BUNDLE_GEMFILE=gemfiles/{GEMFILE} bundle exec rake build
! gem push pkg/apipie-rails-{NEW_VERSION}.gem
"""
Explanation: Build the gem
End of explanation
"""
! git push {GIT_REMOTE_UPSTREAM} {WORK_BRANCH}
! git push --tags {GIT_REMOTE_UPSTREAM} {WORK_BRANCH}
"""
Explanation: PUSH the changes upstream If everything is correct
End of explanation
"""
print('\n')
print('\n'.join(change_log))
print('\n\nhttps://github.com/Apipie/apipie-rails/releases/new?tag=%s' % NEW_VERSION)
"""
Explanation: Now the new release is in upstream repo
Some manual steps follow to improve the UX
New relase on GitHub
Copy the following changelog lines to the description in form on link below
The release title is the new version.
End of explanation
"""
|
mangecoeur/pineapple
|
data/examples/python2.7/Execution.ipynb
|
gpl-3.0
|
def f(x):
return 1.0 / x
def g(x):
return x - 1.0
f(g(1.0))
"""
Explanation: Executing Code
In this notebook we'll look at some of the issues surrounding executing
code in the notebook.
Backtraces
When you interrupt a computation, or if an exception is raised but not
caught, you will see a backtrace of what was happening when the program
halted. The backtrace is color highlighted to help you find the information
you need to debug the problem.
End of explanation
"""
%pdb on
f(g(1.0))
"""
Explanation: Python Debugging
You can also turn on the Python debugger inside a notebook using the
magic invocation %pdb on. When an exception occurs, the debugger
will activate inside the output cell. You can then type commands
and see responses from the stopped state of the program.
Some commands:
- h help
- w print stack trace
- p expr print expressions
- q quit
- r restart
Full documentation on the debugger can be found at Python debugger pdb.
End of explanation
"""
import sys
print('Hello, world!')
sys.stdout.write('We meet again, stdout.')
sys.stderr.write('Error, you appear to have created a black hole.')
"""
Explanation: Output
Normal output is shown after the In[] area. Output written to stdout is shown in one color,
while output written to stderr is shown with a red background.
End of explanation
"""
import time
for i in range(10):
print(i)
time.sleep(0.5)
"""
Explanation: Asynchronous Output
Output written to stdout and stderr shows up immediately in the notebook, you don't have
to wait for the evaluation to finish before you see anything. Here is demo.
End of explanation
"""
import threading
class SummingThread(threading.Thread):
def __init__(self, low, high):
super(SummingThread, self).__init__()
self.low = low
self.high = high
self.total = 0
def run(self):
for i in range(self.low, self.high):
self.total += i
def sequential_sum(n):
total = 0
for i in range(0, n):
total += i
return total
def parallel_sum(n):
thread1 = SummingThread(0, n/2)
thread2 = SummingThread(n/2, n)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
return thread1.total + thread2.total
%timeit sequential_sum(100000)
%timeit parallel_sum(1000000)
"""
Explanation: Threads
You can start multiple threads and use the standard Python threading libraries such as
threads and threading to coordinate between them.
Note that because of the global interpreter lock in CPython two threads
with work to do will never run at the same time.
End of explanation
"""
from time import sleep
from multiprocessing import Pool
def f(p):
low, high = p
total = 0
for i in range(low, high):
total += i
return total
def sequential_sum(n):
total = 0
for i in range(0, n):
total += i
return total
def parallel_sum(n):
p = Pool(2)
results = p.map(f, [[0, n/2], [n/2, n]])
return results[0] + results[1]
if __name__ == "__main__":
%timeit sequential_sum(1000)
%timeit parallel_sum(1000)
"""
Explanation: Multiprocessing
It is possible to use the multiprocessing library inside Pineapple notebooks. The multiprocessing library spawns multiple interpreters
which can actually run in parallel. Of course this is still no guarantee
of higher performance.
End of explanation
"""
|
balarsen/pymc_learning
|
updating_info/Arb_dist.ipynb
|
bsd-3-clause
|
# pymc3.distributions.DensityDist?
import matplotlib.pyplot as plt
import matplotlib as mpl
from pymc3 import Model, Normal, Slice
from pymc3 import sample
from pymc3 import traceplot
from pymc3.distributions import Interpolated
from theano import as_op
import theano.tensor as tt
import numpy as np
from scipy import stats
%matplotlib inline
%load_ext version_information
%version_information pymc3
from sklearn.neighbors.kde import KernelDensity
import numpy as np
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
kde = KernelDensity(kernel='gaussian', bandwidth=0.2).fit(X)
kde.score_samples(X)
plt.scatter(X[:,0], X[:,1])
"""
Explanation: Use an arbitary distribution
NOTE this requires Pymc3 3.1
pymc3.distributions.DensityDist
End of explanation
"""
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha_true = 5
beta0_true = 7
beta1_true = 13
# Size of dataset
size = 100
# Predictor variable
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
# Simulate outcome variable
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
"""
Explanation: Generating data
End of explanation
"""
basic_model = Model()
with basic_model:
# Priors for unknown model parameters
alpha = Normal('alpha', mu=0, sd=1)
beta0 = Normal('beta0', mu=12, sd=1)
beta1 = Normal('beta1', mu=18, sd=1)
# Expected value of outcome
mu = alpha + beta0 * X1 + beta1 * X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=1, observed=Y)
# draw 1000 posterior samples
trace = sample(1000)
traceplot(trace);
"""
Explanation: Model specification
Our initial beliefs about the parameters are quite informative (sd=1) and a bit off the true values.
End of explanation
"""
def from_posterior(param, samples):
smin, smax = np.min(samples), np.max(samples)
width = smax - smin
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
# what was never sampled should have a small probability but not 0,
# so we'll extend the domain and use linear approximation of density on it
x = np.concatenate([[x[0] - 3 * width], x, [x[-1] + 3 * width]])
y = np.concatenate([[0], y, [0]])
return Interpolated(param, x, y)
"""
Explanation: In order to update our beliefs about the parameters, we use the posterior distributions, which will be used as the prior distributions for the next inference. The data used for each inference iteration has to be independent from the previous iterations, otherwise the same (possibly wrong) belief is injected over and over in the system, amplifying the errors and misleading the inference. By ensuring the data is independent, the system should converge to the true parameter values.
Because we draw samples from the posterior distribution (shown on the right in the figure above), we need to estimate their probability density (shown on the left in the figure above). Kernel density estimation (KDE) is a way to achieve this, and we will use this technique here. In any case, it is an empirical distribution that cannot be expressed analytically. Fortunately PyMC3 provides a way to use custom distributions, via Interpolated class.
End of explanation
"""
traces = [trace]
for _ in range(10):
# generate more data
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
model = Model()
with model:
# Priors are posteriors from previous iteration
alpha = from_posterior('alpha', trace['alpha'])
beta0 = from_posterior('beta0', trace['beta0'])
beta1 = from_posterior('beta1', trace['beta1'])
# Expected value of outcome
mu = alpha + beta0 * X1 + beta1 * X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=1, observed=Y)
# draw 10000 posterior samples
trace = sample(1000)
traces.append(trace)
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['alpha', 'beta0', 'beta1']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'alpha': alpha_true, 'beta0': beta0_true, 'beta1': beta1_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.show()
"""
Explanation: Now we just need to generate more data and build our Bayesian model so that the prior distributions for the current iteration are the posterior distributions from the previous iteration. It is still possible to continue using NUTS sampling method because Interpolated class implements calculation of gradients that are necessary for Hamiltonian Monte Carlo samplers.
End of explanation
"""
for _ in range(10):
# generate more data
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
model = Model()
with model:
# Priors are posteriors from previous iteration
alpha = from_posterior('alpha', trace['alpha'])
beta0 = from_posterior('beta0', trace['beta0'])
beta1 = from_posterior('beta1', trace['beta1'])
# Expected value of outcome
mu = alpha + beta0 * X1 + beta1 * X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=1, observed=Y)
# draw 10000 posterior samples
trace = sample(1000)
traces.append(trace)
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['alpha', 'beta0', 'beta1']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'alpha': alpha_true, 'beta0': beta0_true, 'beta1': beta1_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.show()
for _ in range(10):
# generate more data
X1 = np.random.randn(size)
X2 = np.random.randn(size) * 0.2
Y = alpha_true + beta0_true * X1 + beta1_true * X2 + np.random.randn(size)
model = Model()
with model:
# Priors are posteriors from previous iteration
alpha = from_posterior('alpha', trace['alpha'])
beta0 = from_posterior('beta0', trace['beta0'])
beta1 = from_posterior('beta1', trace['beta1'])
# Expected value of outcome
mu = alpha + beta0 * X1 + beta1 * X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=1, observed=Y)
# draw 10000 posterior samples
trace = sample(1000)
traces.append(trace)
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['alpha', 'beta0', 'beta1']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'alpha': alpha_true, 'beta0': beta0_true, 'beta1': beta1_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.show()
"""
Explanation: You can re-execute the last two cells to generate more updates.
What is interesting to note is that the posterior distributions for our parameters tend to get centered on their true value (vertical lines), and the distribution gets thiner and thiner. This means that we get more confident each time, and the (false) belief we had at the beginning gets flushed away by the new data we incorporate.
End of explanation
"""
|
leriomaggio/numpy_euroscipy2015
|
01_numpy_basics.ipynb
|
mit
|
import numpy as np # naming import convention
"""
Explanation: What is Numpy
NumPy is the fundamental package for scientific computing with Python.
It is a package that provide high-performance vector, matrix and higher-dimensional data structures for Python.
It is implemented in C and Fortran so when calculations are vectorized, performance is very good.
So, in a nutshell:
a powerful Python extension for N-dimensional array
a tool for integrating C/C++ and Fortran code
designed for scientific computation: linear algebra and Signal Analysis
If you are a MATLAB® user I do recommend to read Numpy for MATLAB Users.
I'm a supporter of the Open Science Movement, thus I humbly suggest you to take a look at the Science Code Manifesto
Getting Started with Numpy Arrays
NumPy's main object is the homogeneous multidimensional array. It is a table of elements (usually numbers), all of the same type.
In Numpy dimensions are called axes.
The number of axes is called rank.
The most important attributes of an ndarray object are:
ndarray.ndim - the number of axes (dimensions) of the array.
ndarray.shape - the dimensions of the array. For a matrix with n rows and m columns, shape will be (n,m).
ndarray.size - the total number of elements of the array.
ndarray.dtype - numpy.int32, numpy.int16, and numpy.float64 are some examples.
ndarray.itemsize - the size in bytes of elements of the array. For example, elements of type float64 has itemsize 8 (=64/8)
To use numpy need to import the module it using of example:
End of explanation
"""
np.array?
"""
Explanation: Terminology Assumption
In the numpy package the terminology used for vectors, matrices and higher-dimensional data sets is array.
Reference Documentation
On the web: http://docs.scipy.org/
Interactive help:
End of explanation
"""
r = range(10)
print(list(r))
print(type(r)) # NOTE: if this print will return a <type 'list'> it means you're using Py2.7
"""
Explanation: If you're looking for something
Creating numpy arrays
Get acquainted with NumPy
Let's start by creating some numpy.array objects in order to get our hands into the very details of numpy basic data structure.
NumPy is a very flexible library, and provides many ways to create (and initialize) new numpy arrays.
One way is using specific functions dedicated to generate numpy arrays
(usually, array of numbers)[+]
[+] More on data types, later on !-)
First numpy array example: array of numbers
NumPy provides many functions to generate arrays with with specific properties (e.g. size or shape).
We will see later examples in which we will generate ndarray using explicit Python lists.
However, for larger arrays, using Python lists is simply inpractical.
np.arange
In standard Python, we use the range function to generate an iterable object of integers within a specific range (at a specified step, default: 1)
End of explanation
"""
ra = np.arange(10)
print(ra)
print(type(ra))
"""
Explanation: Similarly, in numpy there is the arange function which instead generates a numpy.ndarray
End of explanation
"""
# floating point step-wise range generatation
raf = np.arange(-1, 1, 0.1)
print(raf)
"""
Explanation: However, we are working with the Numerical Python library, so we should expect more when it comes to numbers.
In fact, we can create an array within a floating point step-wise range:
End of explanation
"""
print(f"dtype of 'ra': {ra.dtype}, dtype of 'raf': {raf.dtype}")
"""
Explanation: Properties of numpy array
Apart from the actual content, which is of course different because specified ranges are different, the ra and raf arrays differ by their dtype:
End of explanation
"""
ra.itemsize # bytes per element
ra.nbytes # number of bytes
ra.ndim # number of dimensions
ra.shape # shape, i.e. number of elements per-dimension/axis
## please replicate the same set of operations here for `raf`
# your code here
"""
Explanation: More properties of the numpy array
End of explanation
"""
np.linspace(0, 10, 20)
np.logspace(0, np.e**2, 10, base=np.e)
"""
Explanation: Q: Do you notice any relevant difference?
np.linspace and np.logspace
Like np.arange, in numpy there are other two "similar" functions:
np.linspace
np.logspace
Looking at the examples below, can you spot the difference?
End of explanation
"""
# uniform random numbers in [0,1]
ru = np.random.rand(10)
ru
"""
Explanation: Random Number Generation
np.random.rand & np.random.randn
End of explanation
"""
# standard normal distributed random numbers
rs = np.random.randn(10)
rs
"""
Explanation: Note: numbers and the content of the array may vary
End of explanation
"""
Z = np.zeros((3,3))
print(Z)
O = np.ones((3, 3))
print(O)
E = np.empty(10)
print(E)
# TRY THIS!
np.empty(9)
"""
Explanation: Note: numbers and the content of the array may vary
Q: What if I ask you to generate random numbers in a way that we both obtain the very same numbers? (Provided we share the same CPU architecture)
Zeros and Ones (or Empty)
np.zeros, np.ones, np.empty
Sometimes it may be required to initialise arrays of zeros, or of all ones or finally just rubbish (i.e. empty) of a specific shape:
End of explanation
"""
# a diagonal matrix
np.diag([1,2,3])
# diagonal with offset from the main diagonal
np.diag([1,2,3], k=1)
"""
Explanation: Other specialised Functions
Diagonal Matrices
1. np.diag
End of explanation
"""
# a diagonal matrix with ones on the main diagonal
np.eye(3, dtype='int') # 3 is the
"""
Explanation: Identity Matrix $\mathrm{I} \mapsto$ np.eye
End of explanation
"""
v = np.array([1,2,3,4])
v
print(type(v))
"""
Explanation: Create numpy.ndarray from list
To create new vector or matrix arrays from Python lists we can use the
numpy.array constructor function:
End of explanation
"""
v = np.asarray([1, 2, 3, 4])
v
print(type(v))
"""
Explanation: Alternatively there is also the np.asarray function which easily convert a Python list into a numpy array:
End of explanation
"""
M = np.array([[1, 2], [3, 4]])
M
v.shape, M.shape
"""
Explanation: We can use the very same strategy for higher-dimensional arrays.
E.g. Let's create a matrix from a list of lists:
End of explanation
"""
L = range(100000)
%timeit [i**2 for i in L]
a = np.arange(100000)
%timeit a**2 # This operation is called Broadcasting - more on this later!
%timeit [element**2 for element in a]
"""
Explanation: So, why is it useful then?
So far the numpy.ndarray looks awefully much like a Python list (or nested list).
Why not simply use Python lists for computations instead of creating a new array type?
There are several reasons:
Python lists are very general.
They can contain any kind of object.
They are dynamically typed.
They do not support mathematical functions such as matrix and dot multiplications, etc.
Implementing such functions for Python lists would not be very efficient because of the dynamic typing.
Numpy arrays are statically typed and homogeneous.
The type of the elements is determined when array is created.
Numpy arrays are memory efficient.
Because of the static typing, fast implementation of mathematical functions such as multiplication and addition of numpy arrays can be implemented in a compiled language (C and Fortran is used).
End of explanation
"""
a = np.arange(45)
a
a.shape
A = a.reshape(9, 5)
A
n, m = A.shape
B = A.reshape((1,n*m))
B
"""
Explanation: Exercises: DIY
Simple arrays
Create simple one and two dimensional arrays. First, redo the examples
from above. And then create your own.
Use the functions len, shape and ndim on some of those arrays and
observe their output.
Creating arrays using functions
Experiment with arange, linspace, ones, zeros, eye and diag.
Create different kinds of arrays with random numbers.
Try setting the seed before creating an array with random values
hint: use np.random.seed
Numpy Array Object
NumPy has a multidimensional array object called ndarray. It consists of two parts as follows:
The actual data
Some metadata describing the data
The majority of array operations leave the raw data untouched. The only aspect that changes is the metadata.
<img src="images/ndarray_with_details.png" />
Data vs Metadata (Attributes)
This internal separation between actual data (i.e. the content of the array --> the memory) and metadata (i.e. properties and attributes of the data), allows for example for an efficient memory management.
For example, the shape of an Numpy array can be modified without copying and/or affecting the actual data, which makes it a fast operation even for large arrays.
End of explanation
"""
A = np.array([[1, 2, 3], [4, 5, 6]])
A.ravel()
"""
Explanation: Q: What is the difference (in terms of shape) between B and the original a?
Flattening
Another (quite common) reshaping operation you will end up performing on n-dimensional arrays is flattening.
Flattening means collapsing all the axis into a unique one
np.ravel
numpy.ndarray objects have a ravel method that generates a new version of the array as a 1D vector.
Also this time, the original memory is unaffected, and a pointer with different metadata is returned.
End of explanation
"""
A.ravel('F') # order F (Fortran) is column-major, C (default) row-major
"""
Explanation: By default, the np.ravel performs the operation row-wise á-la-C. Numpy also support a Fortran-style order of indices (i.e. column-major indexing)
End of explanation
"""
A.T
A.T.ravel()
"""
Explanation: Alternatively We can also use the function np.flatten to make a higher-dimensional array into a vector. But this function create a copy of the data.
Transpose
Similarly, we can transpose a matrix
End of explanation
"""
A = np.arange(20).reshape(10, 2)
A = A[np.newaxis, ...] # this is called ellipsis
print(A.shape)
"""
Explanation: Introducing np.newaxis
In addition to shape, we can also manipulate the axis of an array.
(1) We can always add as many axis as we want:
End of explanation
"""
A = A.swapaxes(0, 2) # swap axis 0 with axis 2 --> new shape: (2, 10, 1)
print(A.shape)
"""
Explanation: (2) We can also permute axis:
End of explanation
"""
a = np.arange(10)
print(a)
print(a.dtype)
au = np.arange(10, dtype=np.uint8)
print(au)
print(au.dtype)
"""
Explanation: Again, changin and manipulating the axis will not touch the memory, it will just change parameters (i.e. strides and offset) to navigate data.
Numerical Types and Precision
In NumPy, talking about int or float does not make "real sense". This is mainly for two reasons:
(a) int or float are assumed at the maximum precision available on your machine (presumably int64 and
float64, respectively.
(b) Different precision imply different numerical ranges, and so different memory size (i.e. number of bytes required to represent all the numbers in the corresponding numerical range).
Numpy support the following numerical types:
bool | This stores boolean (True or False) as a bit
int0 | This is a platform integer (normally either int32 or int64)
int8 | This is an integer ranging from -128 to 127
int16 | This is an integer ranging from -32768 to 32767
int32 | This is an integer ranging from -2 ** 31 to 2 ** 31 -1
int64 | This is an integer ranging from -2 ** 63 to 2 ** 63 -1
uint8 | This is an unsigned integer ranging from 0 to 255
uint16 | This is an unsigned integer ranging from 0 to 65535
uint32 | This is an unsigned integer ranging from 0 to 2 ** 32 - 1
uint64 | This is an unsigned integer ranging from 0 to 2 ** 64 - 1
float16 | This is a half precision float with sign bit, 5 bits exponent, and 10 bits mantissa
float32 | This is a single precision float with sign bit, 8 bits exponent, and 23 bits mantissa
float64 or float | This is a double precision float with sign bit, 11 bits exponent, and 52 bits mantissa
complex64 | This is a complex number represented by two 32-bit floats (real and imaginary components)
complex128 | This is a complex number represented by two 64-bit floats (real and imaginary components)
(or complex)
Numerical Types and Representation
The numerical dtype of an array should be selected very carefully, as it directly affects the numerical representation of elements, that is:
the number of bytes used;
the numerical range
We can always specify the dtype of an array when we create one. If we do not, the dtype of the array will be inferred, namely np.int_ or np.float_ depending on the case.
End of explanation
"""
x = np.zeros(4, 'int8') # Integer ranging from -128 to 127
x
"""
Explanation: So, then: What happens if I try to represent a number that is Out of range?
Let's have a go with integers, i.e., int8 and uint8
End of explanation
"""
x[0] = 127
x
x[0] = 128
x
x[1] = 129
x
x[2] = 257 # i.e. (128 x 2) + 1
x
ux = np.zeros(4, 'uint8') # Integer ranging from 0 to 255, dtype also as string!
ux
ux[0] = 255
ux[1] = 256
ux[2] = 257
ux[3] = 513 # (256 x 2) + 1
ux
"""
Explanation: Spoiler Alert: very simple example of indexing in NumPy
Well...it works as expected, doesn't it?
End of explanation
"""
np.iinfo(np.int32)
np.finfo(np.float16)
"""
Explanation: Machine Info and Supported Numerical Representation
Numpy provides two functions to inspect the information of supported integer and floating-point types, namely np.iinfo and np.finfo:
End of explanation
"""
machine_info = np.MachAr()
machine_info.epsilon
machine_info.huge
np.finfo(np.float64).max == machine_info.huge
# TRY THIS!
help(machine_info)
"""
Explanation: In addition, the MachAr class will provide information on the current machine :
End of explanation
"""
a = np.arange(7, dtype=np.uint16)
print('a itemsize: ', a.itemsize)
print('a.dtype.itemsize: ', a.dtype.itemsize)
"""
Explanation: Data Type Object
Data type objects are instances of the numpy.dtype class.
Once again, arrays have a data type.
<br>
To be precise, every element in a NumPy array has the same data type.
The data type object can tell you the size of the data in bytes.
<br>
(Recall: The size in bytes is given by the itemsize attribute of the dtype class)
End of explanation
"""
np.dtype(float)
np.dtype('f')
np.dtype('d')
np.dtype('f8')
np.dtype('U10') # Unicode string of up to 10 chars
"""
Explanation: Character Codes
Character codes are included for backward compatibility with Numeric.
<br>
Numeric is the predecessor of NumPy. Their use is not recommended, but these codes pop up in several places.
Btw, You should instead use the dtype objects.
integer i
Unsigned integer u
Single precision float f
Double precision float d
bool b
complex D
string S
unicode U
dtype contructors
End of explanation
"""
rt = np.dtype([('name', np.str_, 40), ('numitems', np.int32), ('price', np.float32)])
rt['name'] # see the difference with Python 2
rt['numitems']
rt['price']
"""
Explanation: Note: A listing of all data type names can be found by calling np.sctypeDict.keys()
Custom dtype
We can use the np.dtype constructor to create a custom record type.
End of explanation
"""
record_items = np.array([('Meaning of life DVD', 42, 3.14), ('Butter', 13, 2.72)],
dtype=rt)
print(record_items)
"""
Explanation: Instantiate an array of dtype equal to t (record type)
End of explanation
"""
|
gale320/flexx
|
examples/notebooks/EuroScipy 2015 demo.ipynb
|
bsd-2-clause
|
from flexx.webruntime import launch
rt = launch('http://flexx.rtfd.org', 'xul', title='Test title')
"""
Explanation: This is the demo that I used during the EuroScipy 2015 talk on Flexx.
flexx.webruntime
Launch a web runtime. Can be a browser or something that looks like a desktop app.
End of explanation
"""
from flexx.pyscript import py2js
print(py2js('square = lambda x: x**2'))
def foo(n):
res = []
for i in range(n):
res.append(i**2)
return res
print(py2js(foo))
def foo(n):
return [i**2 for i in range(n)]
print(py2js(foo))
"""
Explanation: flexx.pyscript
End of explanation
"""
from flexx import react
@react.input
def name(n='john doe'):
if not isinstance(n, str):
raise ValueError('Name must be a string')
return n.capitalize()
name
@react.connect('name')
def greet(n):
print('hello %s' % n)
name("almar klein")
"""
Explanation: flexx.react
Reactive programming uses signals to communicate between different components of an app, and provides easy ways to react to changes in the values of these signals.
The API for flexx.react consists of a few decorators to turn functions into signals. One signal is the input signal.
End of explanation
"""
@react.connect('first_name', 'last_name')
def greet(first, last):
print('hello %s %s!' % (first, last))
"""
Explanation: A signal can have multiple upstream signals.
End of explanation
"""
class Person(react.HasSignals):
@react.input
def father(f):
assert isinstance(f, Person)
return f
@react.connect('father.last_name')
def last_name(s):
return s
@react.connect('children.*.name')
def child_names(*names):
return ', '.join(name)
"""
Explanation: Dynamism provides great flexibility
End of explanation
"""
from flexx import app, react
app.init_notebook()
class Greeter(app.Pair):
@react.input
def name(s):
return str(s)
class JS:
@react.connect('name')
def _greet(name):
alert('Hello %s!' % name)
greeter = Greeter()
greeter.name('John')
"""
Explanation: flexx.app
End of explanation
"""
|
CopernicusMarineInsitu/INSTACTraining
|
PythonNotebooks/PlatformPlots/Read_TimeSeries_3.ipynb
|
mit
|
%matplotlib inline
import cf
import netCDF4
import matplotlib.pyplot as plt
"""
Explanation: Reading a file using CF module
The main difference with the previous example is the way we will read the data from the file.
Instead of the netCDF4 module, we will use the cf-python package, which implements the CF data model for the reading, writing and processing of data and metadata.
End of explanation
"""
dataurl = "http://thredds.socib.es/thredds/dodsC/mooring/conductivity_and_temperature_recorder/buoy_canaldeibiza-scb_sbe37006/L1/dep0003_buoy-canaldeibiza_scb-sbe37006_L1_latest.nc"
"""
Explanation: The data file is the same.
End of explanation
"""
f = cf.read(dataurl)
print f
"""
Explanation: Read the file
We use the function read. Doing so, we easily obtain a nice summary of the file content.
End of explanation
"""
temperature = f.select('sea_water_temperature')
temperature
"""
Explanation: We see that the file contains 4 variables:
1. temperature
2. salinity
3. conductivity.
Each of them has 4 dimensions: longitude, latitude, time and depth.
Read variable, coordinates and units
From the previous commands we cannot know the name of the variables within the file. But that's not necessary. Temperature can be retrived using its standard name:
End of explanation
"""
print len(temperature)
"""
Explanation: The number of variables which have a standard name corresponding to sea_water_temperature is:
End of explanation
"""
temperature_values = temperature[0].array
temperature_units = temperature[0].units
print temperature_values[0:20]
print 'Temperature units: ' + temperature_units
"""
Explanation: but in other cases (ex: different sensors measuring temperature with data in a common file), one can obtain more than one variable.
To get the temperature values, we select the first element (index = 0 in python, not 1) and convert it into an array.
End of explanation
"""
temperature[0].coords()
"""
Explanation: We inspect the corresponding coordinates:
End of explanation
"""
time = temperature[0].coord('time')
time
"""
Explanation: To extract the time variable:
End of explanation
"""
time_values = temperature[0].coord('time').array
time_units = temperature[0].coord('time').units
print time_values[0:20]
print ' '
print 'Time units: ' + time_units
"""
Explanation: and to get the values:
End of explanation
"""
time2 = netCDF4.num2date(time_values, time_units)
plt.plot(time2, temperature_values)
plt.ylabel(temperature_units, fontsize=20)
plt.show()
"""
Explanation: A simple plot
End of explanation
"""
|
saudijack/unfpyboot
|
Day_02/02_GitDevelopment/VersionControl.ipynb
|
mit
|
ls
"""
Explanation: Version control for fun and profit: the tool you didn't know you needed. From personal workflows to open collaboration
Note: this tutorial is based (mostely blantently copied), and therefore owes a lot, to the excellent materials offered in:
Fernando Perez's original notebook
That notbook owed a lot to the following (including these notes for completeness):
"Git for Scientists: A Tutorial" by John McDonnell
Emanuele Olivetti's lecture notes and exercises from the G-Node summer school on Advanced Scientific Programming in Python.
There is also J.R. Johansson's tutorial on version control that is also written as a fully reproducible notebook and is also aimed at a scientific audience. It has a similar spirit to this one, and is part of his excellent series Lectures on Scientific Computing with Python that is entirely available as IPython Notebooks.
If we're running short on time, we'll do the simple guide
Why should I care about Version Control?
From What Is Version Control? Why Is It Important For Due Diligence?
Version control systems are essential for any form of distributed, collaborative development. Whether it is the history of a wiki page or large software development project, the ability to track each change as it was made, and to reverse changes when necessary can make all the difference between a well managed and controlled process and an uncontrolled ‘first come, first served’ system. It can also serve as a mechanism for due diligence for software projects.
From Wikipedia
“Revision control, also known as version control, source control
or software configuration management (SCM), is the
management of changes to documents, programs, and other
information stored as computer files.”
Reproducibility?
Tracking and recreating every step of your work
In the software world: it's called Version Control!
What do (good) version control tools give you?
Peace of mind (backups)
Freedom (exploratory branching)
Collaboration (synchronization)
Git is an enabling technology: Use version control for everything
Paper writing (never get paper_v5_john_jane_final_oct22_really_final.tex by email again!)
Everyday research
Teaching (never accept an emailed homework assignment again!)
Paper Writing With Git
<!-- offline:
<img src="files/img/Files.png" width="100%">
-->
<img src="https://raw.githubusercontent.com/kialio/python-bootcamp/master/Lectures/10_GitDevelopment/files/img/Files.png" width="100%">
Annotated history of each authors's worfklow (and backup!)
<!-- offline:
<img src="files/fig/History.png" width="100%">
-->
<img src="https://raw.githubusercontent.com/kialio/python-bootcamp/master/Lectures/10_GitDevelopment/files/img/History.png" width="100%">
Why are we using git?
Because that's what folks were using when I came into programming... ¯\(ツ)/¯
I don't know, really. Here are some reasons though:
It's decentralized (ie. everyone has a copy of the repository).
It has multiple, secure connection methods.
You can make commits offline.
It encourages a 'commit-often' mentality.
Easy to make branches and tags.
Any more?
I am not going to be upset if you use something else, but using a system that is decentralized is important. If you need to pick, check out wikipedia.
The plan for this tutorial
This tutorial is structured in the following way: we will begin with a brief overview of key concepts you need to understand in order for git to really make sense. We will then dive into hands-on work: after a brief interlude into necessary configuration we will discuss 5 "stages of git" with scenarios of increasing sophistication and complexity, introducing the necessary commands for each stage:
Local, single-user, linear workflow
Single local user, branching
Using remotes as a single user
Remotes for collaborating in a small team
Full-contact github: distributed collaboration with large teams
In reality, this tutorial only covers stages 1-4, since for #5 there are many software develoment-oriented tutorials and documents of very high quality online. But most scientists start working alone with a few files or with a small team, so I feel it's important to build first the key concepts and practices based on problems scientists encounter in their everyday life and without the jargon of the software world. Once you've become familiar with 1-4, the excellent tutorials that exist about collaborating on github on open-source projects should make sense.
Very high level picture: an overview of key concepts
The commit: a snapshot of work at a point in time
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/commit_anatomy.png">
Credit: ProGit book, by Scott Chacon, CC License.
End of explanation
"""
%%bash
git config --global user.name "Jeremy S. Perkins"
git config --global user.email "jeremyshane@gmail.com"
"""
Explanation: A repository: a group of linked commits
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/threecommits.png" >
And this is pretty much the essence of Git!
First things first: git must be configured before first use
The minimal amount of configuration for git to work without pestering you is to tell it who you are:
End of explanation
"""
%%bash
# Put here your preferred editor. If this is not set, git will honor
# the $EDITOR environment variable
git config --global core.editor /usr/bin/nano # Yes, I still use nano
# On Windows Notepad will do in a pinch, I recommend Notepad++ as a free alternative
# On the mac, you can set nano or emacs as a basic option
# And while we're at it, we also turn on the use of color, which is very useful
git config --global color.ui "auto"
"""
Explanation: And how you will edit text files (it will often ask you to edit messages and other information, and thus wants to know how you like to edit your files):
End of explanation
"""
%%bash
git config --global credential.helper osxkeychain
# Set the cache to timeout after 2 hours (setting is in seconds)
git config --global credential.helper 'cache --timeout=7200'
"""
Explanation: Set git to use the credential memory cache so we don't have to retype passwords too frequently. On OSX, you should run the following (note that this requires git version 1.7.10 or newer):
End of explanation
"""
!git
"""
Explanation: Github offers in its help pages instructions on how to configure the credentials helper for Linux and Windows.
Stage 1: Local, single-user, linear workflow
Simply type git to see a full list of all the 'core' commands. We'll now go through most of these via small practical exercises:
End of explanation
"""
%%bash
rm -rf test
git init test
"""
Explanation: git init: create an empty repository
End of explanation
"""
%%bash
cd test
ls
%%bash
cd test
ls -la
%%bash
cd test
ls -l .git
"""
Explanation: Note: all these cells below are meant to be run by you in a terminal where you change once to the test directory and continue working there.
Since we are putting all of them here in a single notebook for the purposes of the tutorial, they will all be prepended with the first two lines:
%%bash
cd test
that tell IPython to do that each time. But you should ignore those two lines and type the rest of each cell yourself in your terminal.
Let's look at what git did:
End of explanation
"""
%%bash
cd test
echo "My first bit of text" > file1.txt
"""
Explanation: Now let's edit our first file in the test directory with a text editor... I'm doing it programatically here for automation purposes, but you'd normally be editing by hand
End of explanation
"""
%%bash
cd test
git add file1.txt
"""
Explanation: git add: tell git about this new file
End of explanation
"""
%%bash
cd test
git status
"""
Explanation: We can now ask git about what happened with status:
End of explanation
"""
%%bash
cd test
git commit -a -m"This is our first commit"
"""
Explanation: git commit: permanently record our changes in git's database
For now, we are always going to call git commit either with the -a option or with specific filenames (git commit file1 file2...). This delays the discussion of an aspect of git called the index (often referred to also as the 'staging area') that we will cover later. Most everyday work in regular scientific practice doesn't require understanding the extra moving parts that the index involves, so on a first round we'll bypass it. Later on we will discuss how to use it to achieve more fine-grained control of what and how git records our actions.
End of explanation
"""
%%bash
cd test
git log
"""
Explanation: In the commit above, we used the -m flag to specify a message at the command line. If we don't do that, git will open the editor we specified in our configuration above and require that we enter a message. By default, git refuses to record changes that don't have a message to go along with them (though you can obviously 'cheat' by using an empty or meaningless string: git only tries to facilitate best practices, it's not your nanny).
git log: what has been committed so far
End of explanation
"""
%%bash
cd test
echo "And now some more text..." >> file1.txt
"""
Explanation: git diff: what have I changed?
Let's do a little bit more work... Again, in practice you'll be editing the files by hand, here we do it via shell commands for the sake of automation (and therefore the reproducibility of this tutorial!)
End of explanation
"""
%%bash
cd test
git diff
"""
Explanation: And now we can ask git what is different:
End of explanation
"""
%%bash
cd test
git commit -a -m"I have made great progress on this critical matter."
"""
Explanation: The cycle of git virtue: work, commit, work, commit, ...
End of explanation
"""
%%bash
cd test
git log
"""
Explanation: git log revisited
First, let's see what the log shows us now:
End of explanation
"""
%%bash
cd test
git log --oneline --topo-order --graph
"""
Explanation: Sometimes it's handy to see a very summarized version of the log:
End of explanation
"""
%%bash
cd test
# We create our alias (this saves it in git's permanent configuration file):
git config --global alias.slog "log --oneline --topo-order --graph"
# And now we can use it
git slog
"""
Explanation: Git supports aliases: new names given to command combinations. Let's make this handy shortlog an alias, so we only have to type git slog and see this compact log:
End of explanation
"""
%%bash
cd test
git mv file1.txt file-newname.txt
git status
"""
Explanation: git mv and rm: moving and removing files
While git add is used to add fils to the list git tracks, we must also tell it if we want their names to change or for it to stop tracking them. In familiar Unix fashion, the mv and rm git commands do precisely this:
End of explanation
"""
%%bash
cd test
git commit -a -m"I like this new name better"
echo "Let's look at the log again:"
git slog
"""
Explanation: Note that these changes must be committed too, to become permanent! In git's world, until something hasn't been committed, it isn't permanently recorded anywhere.
End of explanation
"""
%%bash
cd test
git status
ls
"""
Explanation: And git rm works in a similar fashion.
Local user, branching
What is a branch? Simply a label for the 'current' commit in a sequence of ongoing commits:
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/masterbranch.png" >
There can be multiple branches alive at any point in time; the working directory is the state of a special pointer called HEAD. In this example there are two branches, master and testing, and testing is the currently active branch since it's what HEAD points to:
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/HEAD_testing.png" >
Once new commits are made on a branch, HEAD and the branch label move with the new commits:
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/branchcommit.png" >
This allows the history of both branches to diverge:
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/mergescenario.png" >
But based on this graph structure, git can compute the necessary information to merge the divergent branches back and continue with a unified line of development:
<!-- offline:

-->
<img src="https://raw.github.com/fperez/reprosw/master/fig/mergeaftermath.png" >
Let's now illustrate all of this with a concrete example. Let's get our bearings first:
End of explanation
"""
%%bash
cd test
git branch experiment
git checkout experiment
%%bash
cd test
echo "Some crazy idea" > experiment.txt
git add experiment.txt
git commit -a -m"Trying something new"
git slog
%%bash
cd test
git checkout master
git slog
%%bash
cd test
git status
%%bash
cd test
echo "fixed this little bug" >> file-newname.txt
git commit -a -m"The mainline keeps moving"
git slog
%%bash
cd test
ls
%%bash
cd test
git merge experiment
git slog
"""
Explanation: We are now going to try two different routes of development: on the master branch we will add one file and on the experiment branch, which we will create, we will add a different one. We will then merge the experimental branch into master.
End of explanation
"""
%%bash
cd test
ls
echo "Let's see if we have any remote repositories here:"
git remote -v
"""
Explanation: Using remotes as a single user
We are now going to introduce the concept of a remote repository: a pointer to another copy of the repository that lives on a different location. This can be simply a different path on the filesystem or a server on the internet.
For this discussion, we'll be using remotes hosted on the GitHub.com service, but you can equally use other services like BitBucket or Gitorious as well as host your own.
End of explanation
"""
%%bash
cd test
git remote add origin https://github.com/kialio/test.git
%%bash
cd test
git push -u origin master
"""
Explanation: Since the above cell didn't produce any output after the git remote -v call, it means we have no remote repositories configured. We will now proceed to do so. Once logged into GitHub, go to the new repository page and make a repository called test. Do not check the box that says Initialize this repository with a README, since we already have an existing repository here. That option is useful when you're starting first at Github and don't have a repo made already on a local computer.
We can now follow the instructions from the next page:
End of explanation
"""
%%bash
cd test
git remote -v
"""
Explanation: Let's see the remote situation again:
End of explanation
"""
%%bash
# Here I clone my 'test' repo but with a different name, test2, to simulate a 2nd computer
git clone https://github.com/kialio/test.git test2
cd test2
pwd
git remote -v
"""
Explanation: We can now see this repository publicly on github.
Let's see how this can be useful for backup and syncing work between two different computers. I'll simulate a 2nd computer by working in a different directory...
End of explanation
"""
%%bash
cd test2 # working on computer #2
echo "More new content on my experiment" >> experiment.txt
git commit -a -m"More work, on machine #2"
"""
Explanation: Let's now make some changes in one 'computer' and synchronize them on the second.
End of explanation
"""
%%bash
cd test2
git push
"""
Explanation: Now we put this new work up on the github server so it's available from the internet
End of explanation
"""
%%bash
cd test
git pull
"""
Explanation: Now let's fetch that work from machine #1:
End of explanation
"""
%%bash
cd test
git branch trouble
git checkout trouble
echo "This is going to be a problem..." >> experiment.txt
git commit -a -m"Changes in the trouble branch"
"""
Explanation: An important aside: conflict management
While git is very good at merging, if two different branches modify the same file in the same location, it simply can't decide which change should prevail. At that point, human intervention is necessary to make the decision. Git will help you by marking the location in the file that has a problem, but it's up to you to resolve the conflict. Let's see how that works by intentionally creating a conflict.
We start by creating a branch and making a change to our experiment file:
End of explanation
"""
%%bash
cd test
git checkout master
echo "More work on the master branch..." >> experiment.txt
git commit -a -m"Mainline work"
"""
Explanation: And now we go back to the master branch, where we change the same file:
End of explanation
"""
%%bash
cd test
git merge trouble
"""
Explanation: So now let's see what happens if we try to merge the trouble branch into master:
End of explanation
"""
%%bash
cd test
cat experiment.txt
"""
Explanation: Let's see what git has put into our file:
End of explanation
"""
%%bash
cd test
cat experiment.txt
"""
Explanation: At this point, we go into the file with a text editor, decide which changes to keep, and make a new commit that records our decision. I've now made the edits, in this case I decided that both pieces of text were useful, but integrated them with some changes:
End of explanation
"""
%%bash
cd test
git commit -a -m"Completed merge of trouble, fixing conflicts along the way"
git slog
"""
Explanation: Let's then make our new commit:
End of explanation
"""
|
ninadhw/ninadhw.github.io
|
notebooks/getting_started_with_keras.ipynb
|
cc0-1.0
|
#
# Import required packages
#
from keras.models import Sequential
from keras.layers import Dense, Activation
from IPython.display import display, Image
import matplotlib.pyplot as plt
%matplotlib inline
import random
"""
Explanation: Getting started with keras
This tutorial is inspired from https://keras.io
Sequential model
Keras uses slightly different approach for initializing and defining layers. This approach is called Sequential model. Sequential model is a linear stack of several layers of neural network to be designed. So to defining each and every layer in the neural network we use Sequential class. This can be done in two different ways as shown below.
End of explanation
"""
#
# Network model can be initialized using following syntax in the constructor itself
#
model1 = Sequential([
Dense(32,input_dim=784),
Activation("relu"),
Dense(10),
Activation("softmax")
])
"""
Explanation: Either define entire neural network inside the constructor of the Sequential class as below,
End of explanation
"""
#
# Layers to the network can be added dynamically
#
model2 = Sequential()
model2.add(Dense(32, input_dim=784))
model2.add(Activation('relu'))
model2.add(Dense(10))
model2.add(Activation('softmax'))
"""
Explanation: Or add layers to the network one by one as per convinience.
End of explanation
"""
model1 = Sequential()
model1.add(Dense(32, input_shape=(784,)))
model2 = Sequential()
model2.add(Dense(32, batch_input_shape=(None, 784)))
# note that batch dimension is "None" here,
# so the model will be able to process batches of any size with each input of length 784.
model3 = Sequential()
model3.add(Dense(32, input_dim=784))
"""
Explanation: The model needs to know what input shape it should expect i.e whether input is 28x28 (746 pixels) image or some numeric text or some other size features.
For this reason, the first layer in a <span style="color:red;font-weight:bold">Sequential model</span> (and only the first, because following layers can do automatic shape inference from the shape of previous layers) needs to receive information about its input shape hence first <span style="color:red;font-weight:bold">model.add</span> function has extra argument of <span style="color:red;font-weight:bold">input_dim</span>.
There are several possible ways to do this:
-- pass an <span style="color:red;font-weight:bold">input_shape</span> argument to the first layer. This is a shape tuple (a tuple of integers or None entries, where None indicates that any positive integer may be expected). In <span style="color:red;font-weight:bold">input_shape</span>, the batch dimension is not included.
e.g. input_shape=(784,10) -> neural network shall have 10 inputs of 784 length each
input_shape=(784,) or input_shape=(784,None) -> neural network shall have any positive number of inputs with 784 length each
-- pass instead a batch_input_shape argument, where the batch dimension is included. This is useful for specifying a fixed batch size (e.g. with stateful RNNs).
-- some 2D layers, such as Dense, support the specification of their input shape via the argument input_dim, and some 3D temporal layers support the arguments input_dim and input_length.
As such, the following three snippets are strictly equivalent:
End of explanation
"""
Image("keras_examples/keras_merge.png")
from keras.layers import Merge
left_branch = Sequential()
left_branch.add(Dense(32, input_dim=784))
right_branch = Sequential()
right_branch.add(Dense(32, input_dim=784))
merged = Merge([left_branch, right_branch], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(10, activation='softmax'))
"""
Explanation: Note that <span style="font-weight:bold">input_dim=784 is same as input_shape=(784,)</span>
The Merge layer
Multiple Sequential instances can be merged into a single output via a Merge layer. The output is a layer that can be added as first layer in a new Sequential model. For instance, here's a model with two separate input branches getting merged:
End of explanation
"""
final_model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
final_model.fit([input_data_1, input_data_2], targets) # we pass one data array per model input
"""
Explanation: Such a two-branch model can then be trained via e.g.:
End of explanation
"""
merged = Merge([left_branch, right_branch], mode=lambda x: x[0] - x[1])
"""
Explanation: The Merge layer supports a number of pre-defined modes:
<ul>
<li>sum (default): element-wise sum</li>
<li>concat: tensor concatenation. You can specify the concatenation axis via the argument concat_axis.</li>
<li>mul: element-wise multiplication</li>
<li>ave: tensor average</li>
<li>dot: dot product. You can specify which axes to reduce along via the argument dot_axes.</li>
<li>cos: cosine proximity between vectors in 2D tensors.</li>
</ul>
You can also pass a function as the mode argument, allowing for arbitrary transformations:
End of explanation
"""
# for a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# for a binary classification problem
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# for a mean squared error regression problem
model.compile(optimizer='rmsprop',
loss='mse')
# for custom metrics
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
def false_rates(y_true, y_pred):
false_neg = ...
false_pos = ...
return {
'false_neg': false_neg,
'false_pos': false_pos,
}
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred, false_rates])
"""
Explanation: Now you know enough to be able to define almost any model with Keras. For complex models that cannot be expressed via Sequential and Merge, you can use the functional API.
Compilation
Before training a model, you need to configure the learning process, which is done via the compile method. It receives three arguments:
<ul>
<li>an optimizer, it is a type of optimizer to be used e.g. gradient descent. This could be the string identifier of an existing optimizer (such as rmsprop or adagrad), or an instance of the Optimizer class. <a href="https://keras.io/optimizers" target="_blank">See: optimizers.</a> </li>
<li>a loss function, it is an error function to be optimized e.g. squered error function or cross-entropy function. This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as categorical_crossentropy or mse), or it can be an objective function. <a href="https://keras.io/objectives" target="_blank">See: objectives.</a></li>
<li>a list of metrics, to evaluate performance of the network. For any classification problem you will want to set this to metrics=['accuracy']. A metric could be the string identifier of an existing metric or a custom metric function. Custom metric function should return either a single tensor value or a dict metric_name -> metric_value. <a href="https://keras.io/metrics" target="_blank">See: metrics.</a></li>
End of explanation
"""
# for a single-input model with 2 classes (binary):
model = Sequential()
model.add(Dense(1, input_dim=784, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
data = np.random.random((1000, 784))
labels = np.random.randint(2, size=(1000, 1))
# train the model, iterating on the data in batches
# of 32 samples
model.fit(data, labels, nb_epoch=10, batch_size=32)
# for a multi-input model with 10 classes:
left_branch = Sequential()
left_branch.add(Dense(32, input_dim=784))
right_branch = Sequential()
right_branch.add(Dense(32, input_dim=784))
merged = Merge([left_branch, right_branch], mode='concat')
model = Sequential()
model.add(merged)
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# generate dummy data
import numpy as np
from keras.utils.np_utils import to_categorical
data_1 = np.random.random((1000, 784))
data_2 = np.random.random((1000, 784))
# these are integers between 0 and 9
labels = np.random.randint(10, size=(1000, 1))
# we convert the labels to a binary matrix of size (1000, 10)
# for use with categorical_crossentropy
labels = to_categorical(labels, 10)
# train the model
# note that we are passing a list of Numpy arrays as training data
# since the model has 2 inputs
model.fit([data_1, data_2], labels, nb_epoch=10, batch_size=32)
"""
Explanation: Training
Keras models are trained on Numpy arrays of input data and labels. For training a model, you will typically use the fit function. <a href="https://keras.io/models/sequential" target="_blank">Read its documentation here.</a>
End of explanation
"""
# %load mnist_mlp.py
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.utils import np_utils
"""
Explanation: Example
Following is an example implementation of multi-layer perceptron on MNIST data set
First initialize all the libraries rerquired
End of explanation
"""
def show_prediction_results(X_test,predicted_labels):
for i,j in enumerate(random.sample(range(len(X_test)),10)):
plt.subplot(5,2,i+1)
plt.axis("off")
plt.title("Predicted labels is "+str(np.argmax(predicted_labels[j])))
plt.imshow(X_test[j].reshape(28,28))
"""
Explanation: Simple function to display testdata with prediction results on the test dataset
End of explanation
"""
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
"""
Explanation: Generating and structuring dataset for training and testing. We will be using 28x28 images from MNIST dataset of about 60000 for training and 10000 for testing. We will use batch size of 128, for classifying 10 numbers in the images. For small computations 20 epochs are used to these can be increased for more accuracy.
End of explanation
"""
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
"""
Explanation: Start building Sequiential model in keras. We will use 3 layer MLP model for modelling the dataset.
End of explanation
"""
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
"""
Explanation: Compiling model is configuring model with performance parameters such as loss function. metric and optimizer
End of explanation
"""
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
# Let's save the model in local file to fetch at later point in time to skip computations
# and directly start testing if need be
model.save_weights('mnist_mlp.hdf5')
with open('mnist_mlp.json', 'w') as f:
f.write(model.to_json())
"""
Explanation: <span style="color:red;font-weight:bold">fit function for model</span> fits the training data to neural network configured before
End of explanation
"""
predicted_labels = model.predict(X_test,verbose=0)
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Let's visualize some results randomly picked from testdata set and predicted labels for them
#
show_prediction_results(X_test,predicted_labels)
"""
Explanation: <span style="color:red;font-weight:bold">predict function for model</span> predicts labels or values for the testing data provided
End of explanation
"""
|
anshbansal/anshbansal.github.io
|
udacity_data_science_notes/intro_data_analysis/lesson_02/Lesson2.ipynb
|
mit
|
import pandas as pd
"""
Explanation: Lesson 2: NumPy and Pandas for 1D Data
01 - Introduction
Will get familiar with 2 libraries - numpy and pandas
Writing Data Analysis code will be much easier.
Code runs faster
Analyse one dimensional data
02 - Gapminder Data
The data in this lesson was obtained from the site gapminder.org. The variables included are:
- Aged 15+ Employment Rate (%)
- Life Expectancy (years)
- GDP/capita (US$, inflation adjusted)
- Primary school completion (% of boys)
- Primary school completion (% of girls)
04 - One-Dimensional Data in NumPy and Pandas
End of explanation
"""
import numpy as np
"""
Explanation: Importing it takes some time
Has many functions like read_csv and uniq that help a lot
End of explanation
"""
employments = pd.read_csv('employment_above_15.csv')
employments[0:5]
#Selecting a column and displaying its first 5 elements
employments.get('1991')[0:5]
employments.get('Country')[0:5]
def max_employment(countries, employment):
i = employment.argmax()
return (countries[i], employment[i])
max_employment(employments.get('Country'), employments.get('2007'))
"""
Explanation: 05 - NumPy Arrays
Both Pandas and NumPy have special data structures for 1 D data
Numpy array is similar to Python list
Similarities
Access element by index
Access a range of elements
Use loops
Differences
Each element should have same type
Can have different types but it was designed for single data type
Convenient functions like mean and std
End of explanation
"""
countries = np.array(['Afghanistan','Albania','Algeria','Angola','Argentina','Armenia'])
employment = np.array([56.700001, 52.700001, 39.400002, 75.800003, 53.599998])
print countries.dtype
print employment.dtype
print np.array([0, 1, 2, 3]).dtype
print np.array([True, False, True]).dtype
print np.array(['AL', 'AK']).dtype
"""
Explanation: Let's look at the element type of few array which numpy calls dtype
End of explanation
"""
print employment.mean()
print employment.std()
print employment.max()
print employment.sum()
"""
Explanation: |S11 means String with maximum length 11.
End of explanation
"""
np.array([1, 2, 3]) + np.array([4, 5, 6])
"""
Explanation: 07 - Vectorized Operations
Numpy supports Vectorized operations
A vector is a list of numbers
Addition of 2 vectors can be done in several ways. Different languages implement it differently
In case of NumPy it is an element wise addition
End of explanation
"""
np.array([1, 2, 3]) * 3
"""
Explanation: 09 - Multiplying by a Scalar
Multiplying by Scalar is scalar multiplied with each element of the array
End of explanation
"""
np.array([1, 2, 3]) + np.array([4, 5, 6])
np.array([1, 2, 3]) + 1
np.array([1, 2, 3]) - np.array([7, 10, 15])
np.array([1, 2, 3]) - 1
np.array([1, 2, 3]) * np.array([4, 5, 6])
np.array([1, 2, 3]) * np.array([2])
#Throws error
#np.array([1, 2, 3]) * np.array([2, 3])
np.array([2, 3]) ** np.array([2, 3])
np.array([5, 6]) ** 2
"""
Explanation: 11 - Calculate Overall Completion Rate
More vectorized operations
End of explanation
"""
female_completion = pd.read_csv('female_completion_rate.csv')
male_completion = pd.read_csv('male_completion_rate.csv')
female_completion[0:5]
male_completion[0:5]
female = np.array([56.0, 23.0, 65.0])
male = np.array([23.0, 45.0, 22.0])
def overall_completion_rate(female_completion, male_completion):
return (female_completion + male_completion) / 2
overall_completion_rate(female, male)
"""
Explanation: See this article for more information about bitwise operations.
In NumPy, a & b performs a bitwise and of a and b. This is not necessarily the same as a logical and, if you wanted to see if matching terms in two integer vectors were non-zero. However, if a and b are both arrays of booleans, rather than integers, bitwise and and logical and are the same thing. If you want to perform a logical and on integer vectors, then you can use the NumPy function np.logical_and(a, b) or convert them into boolean vectors first.
Similarly, a | b performs a bitwise or, and ~a performs a bitwise not. However, if your arrays contain booleans, these will be the same as performing logical or and logical not. NumPy also has similar functions for performing these logical operations on integer-valued arrays.
In the solution, we may want to / (2.) instead of just / (2) . This is because in Python 2, dividing an integer by another integer (2) drops fractions, so if our inputs are also integers, we may end up losing information. If we divide by a float (2.) then we will definitely retain decimal values.
End of explanation
"""
def standardize_data(values):
return (values - values.mean()) / values.std()
"""
Explanation: 13 - Standardizing Data
How does one data point compare to other data point?
One way to do this is to convert the data point to number of standard deviations from the mean
End of explanation
"""
def mean_time_for_paid_students(time_spent, days_to_cancel):
return time_spent[days_to_cancel >= 7].mean()
"""
Explanation: 15 - NumPy Index Arrays
End of explanation
"""
a = np.array([1, 2, 3, 4])
b = a
a += np.array([1, 1, 1, 1]) #Difference here
print b
"""
Explanation: 17 - + vs +=
End of explanation
"""
a = np.array([1, 2, 3, 4])
b = a
a = a + np.array([1, 1, 1, 1]) #Difference here
print b
"""
Explanation:
End of explanation
"""
a = np.array([1, 2, 3, 4, 5])
slice = a[:3]
slice[0] = 100
a
"""
Explanation: 19 - In-Place vs Not In-Place
+= operates in-place while + does not
End of explanation
"""
def variable_correlation(variable1, variable2):
both_above = (variable1 > variable1.mean()) & \
(variable2 > variable2.mean())
both_below = (variable1 < variable1.mean()) & \
(variable2 < variable2.mean())
is_same_direction = both_above | both_below
num_same_direction = is_same_direction.sum()
num_different_direction = len(variable1) - num_same_direction
return (num_same_direction, num_different_direction)
"""
Explanation: slice refers to view of original array
21 - Pandas Series
End of explanation
"""
s = pd.Series([1, 2, 3, 4])
s.describe()
countries = np.array(['Albania', 'Algeria', 'Andorra', 'Angola'])
life_expectancy = np.array([74.7, 75., 83.4, 57.6])
life_expectancy
"""
Explanation: 23 - Series Indexes
End of explanation
"""
life_expectancy = pd.Series([74.7, 75., 83.4, 57.6],
index = ['Albania',
'Algeria',
'Andorra',
'Angola'])
life_expectancy
"""
Explanation: Some people call countries[0] as indexing into array. But the instructor uses position 0 to avoid confusion. This is because in Pandas index and postion are not the same thing
End of explanation
"""
#Access by index
life_expectancy.loc['Angola']
#If we don't specify index then automatically adds index 0, 1, 2, ...
pd.Series([74.7, 75., 83.4, 57.6])
#Access element by position
print life_expectancy.iloc[0]
#same as
print life_expectancy[0]
def max_employment(employment):
max_country = employment.argmax()
max_value = employment.loc[max_country]
return (max_country, max_value)
"""
Explanation: NumPy arrays are souped-up version of Python lists
Pandas Series is like a cross between a list and a dictionary
End of explanation
"""
s1 = pd.Series([1, 2, 3, 4], index = ['a', 'b', 'c', 'd'])
s2 = pd.Series([10, 20, 30, 40], index = ['a', 'b', 'c', 'd'])
s1
s2
s1 + s2
# Index are in different order
s3 = pd.Series([10, 20, 30, 40], index = ['b', 'd', 'a', 'c'])
s3
s1 + s3
"""
Explanation: 25 - Vectorized Operations and Series Indexes
In NumPy arrays addition happens as per position
What happens if we add two Pandas series?
End of explanation
"""
s4 = pd.Series([10, 20, 30, 40], index = ['c', 'd', 'e', 'f'])
s4
s1 + s4
#If we don't want to show NaN in our solution
(s1 + s4).dropna()
"""
Explanation: Matching indexes were used to add the 2 series
End of explanation
"""
#If we want to give a default value
s1.add(s4, fill_value=0)
"""
Explanation: 28 - Filling Missing Values - Solution
End of explanation
"""
names = pd.Series([
'Andre Agassi',
'Barry Bonds',
'Christopher Columbus',
'Daniel Defoe'
])
def reverse_name(name):
split_name = name.split(" ")
return "{}, {}".format(split_name[1], split_name[0])
reverse_name(names.iloc[0])
def reverse_names(names):
return names.apply(reverse_name)
reverse_names(names)
"""
Explanation: 29 - Pandas Series apply
So far we have used built-in functions like mean() and vectorized operations like +
apply takes a Series and a function and returns a new series applying the function on each element of the Series
End of explanation
"""
employment = pd.read_csv('employment_above_15.csv', index_col = 'Country')
female_completion = pd.read_csv('female_completion_rate.csv', index_col = 'Country')
male_completion = pd.read_csv('male_completion_rate.csv', index_col = 'Country')
life_expectancy = pd.read_csv('life_expectancy.csv', index_col = 'Country')
gdp_per_capita = pd.read_csv('gdp_per_capita.csv', index_col = 'Country')
_country = 'United States'
employment_country = employment.loc[_country]
female_completion_country = female_completion.loc[_country]
male_completion_country = male_completion.loc[_country]
life_expectancy_country = life_expectancy.loc[_country]
gdp_per_capita_country = gdp_per_capita.loc[_country]
%pylab inline
employment_country.plot()
female_completion_country.plot()
male_completion_country.plot()
life_expectancy_country.plot()
gdp_per_capita_country.plot()
"""
Explanation: 31 - Plotting in Pandas - Solution
End of explanation
"""
|
AEW2015/PYNQ_PR_Overlay
|
Pynq-Z1/notebooks/examples/opencv_face_detect_webcam.ipynb
|
bsd-3-clause
|
from pynq import Overlay
Overlay("base.bit").download()
"""
Explanation: OpenCV Face Detection Webcam
In this notebook, opencv face detection will be applied to webcam images.
To run all cells in this notebook a webcam and HDMI output monitor are required.
References:
https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
Step 1: Load the overlay
End of explanation
"""
# monitor configuration: 640*480 @ 60Hz
from pynq.drivers.video import HDMI
hdmi_out = HDMI('out', video_mode=HDMI.VMODE_640x480)
hdmi_out.start()
# monitor (output) frame buffer size
frame_out_w = 1920
frame_out_h = 1080
# camera (input) configuration
frame_in_w = 640
frame_in_h = 480
# initialize camera from OpenCV
from pynq.drivers.video import Frame
import cv2
videoIn = cv2.VideoCapture(0)
videoIn.set(cv2.CAP_PROP_FRAME_WIDTH, frame_in_w);
videoIn.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_in_h);
print("capture device is open: " + str(videoIn.isOpened()))
"""
Explanation: Step 2: Initialize Webcam and HDMI Out
End of explanation
"""
# Capture webcam image
import numpy as np
ret, frame_vga = videoIn.read()
# Display webcam image via HDMI Out
if (ret):
frame_1080p = np.zeros((1080,1920,3)).astype(np.uint8)
frame_1080p[0:480,0:640,:] = frame_vga[0:480,0:640,:]
hdmi_out.frame_raw(bytearray(frame_1080p.astype(np.int8)))
else:
print("Error while reading from camera")
"""
Explanation: Step 3: Show input frame on HDMI output
End of explanation
"""
# Output webcam image as JPEG
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
plt.imshow(frame_vga[:,:,[2,1,0]])
plt.show()
"""
Explanation: Step 4: Now use matplotlib to show image inside notebook
End of explanation
"""
import cv2
np_frame = frame_vga
face_cascade = cv2.CascadeClassifier(
'./data/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(
'./data/haarcascade_eye.xml')
gray = cv2.cvtColor(np_frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(np_frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = np_frame[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
"""
Explanation: Step5: Apply the face detection to the input
End of explanation
"""
# Output OpenCV results via HDMI
frame_1080p[0:480,0:640,:] = frame_vga[0:480,0:640,:]
hdmi_out.frame_raw(bytearray(frame_1080p.astype(np.int8)))
"""
Explanation: Step 6: Show results on HDMI output
End of explanation
"""
# Output OpenCV results via matplotlib
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
plt.imshow(np_frame[:,:,[2,1,0]])
plt.show()
"""
Explanation: Step 7: Now use matplotlib to show image inside notebook
End of explanation
"""
videoIn.release()
hdmi_out.stop()
del hdmi_out
"""
Explanation: Step 8: Release camera and HDMI
End of explanation
"""
|
damienstanton/tensorflownotes
|
3_regularization.ipynb
|
mit
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
"""
Explanation: Deep Learning
Assignment 3
Previously in 2_fullyconnected.ipynb, you trained a logistic regression and a neural network model.
The goal of this assignment is to explore regularization techniques.
End of explanation
"""
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
"""
Explanation: First reload the data we generated in notmist.ipynb.
End of explanation
"""
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
"""
Explanation: Reformat into a shape that's more adapted to the models we're going to train:
- data as a flat matrix,
- labels as float 1-hot encodings.
End of explanation
"""
|
phobson/statsmodels
|
examples/notebooks/generic_mle.ipynb
|
bsd-3-clause
|
from __future__ import print_function
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
"""
Explanation: Maximum Likelihood Estimation (Generic models)
This tutorial explains how to quickly implement new maximum likelihood models in statsmodels. We give two examples:
Probit model for binary dependent variables
Negative binomial model for count data
The GenericLikelihoodModel class eases the process by providing tools such as automatic numeric differentiation and a unified interface to scipy optimization functions. Using statsmodels, users can fit new MLE models simply by "plugging-in" a log-likelihood function.
Example 1: Probit model
End of explanation
"""
data = sm.datasets.spector.load_pandas()
exog = data.exog
endog = data.endog
print(sm.datasets.spector.NOTE)
print(data.exog.head())
"""
Explanation: The Spector dataset is distributed with statsmodels. You can access a vector of values for the dependent variable (endog) and a matrix of regressors (exog) like this:
End of explanation
"""
exog = sm.add_constant(exog, prepend=True)
"""
Explanation: Them, we add a constant to the matrix of regressors:
End of explanation
"""
class MyProbit(GenericLikelihoodModel):
def loglike(self, params):
exog = self.exog
endog = self.endog
q = 2 * endog - 1
return stats.norm.logcdf(q*np.dot(exog, params)).sum()
"""
Explanation: To create your own Likelihood Model, you simply need to overwrite the loglike method.
End of explanation
"""
sm_probit_manual = MyProbit(endog, exog).fit()
print(sm_probit_manual.summary())
"""
Explanation: Estimate the model and print a summary:
End of explanation
"""
sm_probit_canned = sm.Probit(endog, exog).fit()
print(sm_probit_canned.params)
print(sm_probit_manual.params)
print(sm_probit_canned.cov_params())
print(sm_probit_manual.cov_params())
"""
Explanation: Compare your Probit implementation to statsmodels' "canned" implementation:
End of explanation
"""
import numpy as np
from scipy.stats import nbinom
def _ll_nb2(y, X, beta, alph):
mu = np.exp(np.dot(X, beta))
size = 1/alph
prob = size/(size+mu)
ll = nbinom.logpmf(y, size, prob)
return ll
"""
Explanation: Notice that the GenericMaximumLikelihood class provides automatic differentiation, so we didn't have to provide Hessian or Score functions in order to calculate the covariance estimates.
Example 2: Negative Binomial Regression for Count Data
Consider a negative binomial regression model for count data with
log-likelihood (type NB-2) function expressed as:
$$
\mathcal{L}(\beta_j; y, \alpha) = \sum_{i=1}^n y_i ln
\left ( \frac{\alpha exp(X_i'\beta)}{1+\alpha exp(X_i'\beta)} \right ) -
\frac{1}{\alpha} ln(1+\alpha exp(X_i'\beta)) + ln \Gamma (y_i + 1/\alpha) - ln \Gamma (y_i+1) - ln \Gamma (1/\alpha)
$$
with a matrix of regressors $X$, a vector of coefficients $\beta$,
and the negative binomial heterogeneity parameter $\alpha$.
Using the nbinom distribution from scipy, we can write this likelihood
simply as:
End of explanation
"""
from statsmodels.base.model import GenericLikelihoodModel
class NBin(GenericLikelihoodModel):
def __init__(self, endog, exog, **kwds):
super(NBin, self).__init__(endog, exog, **kwds)
def nloglikeobs(self, params):
alph = params[-1]
beta = params[:-1]
ll = _ll_nb2(self.endog, self.exog, beta, alph)
return -ll
def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds):
# we have one additional parameter and we need to add it for summary
self.exog_names.append('alpha')
if start_params == None:
# Reasonable starting values
start_params = np.append(np.zeros(self.exog.shape[1]), .5)
# intercept
start_params[-2] = np.log(self.endog.mean())
return super(NBin, self).fit(start_params=start_params,
maxiter=maxiter, maxfun=maxfun,
**kwds)
"""
Explanation: New Model Class
We create a new model class which inherits from GenericLikelihoodModel:
End of explanation
"""
import statsmodels.api as sm
medpar = sm.datasets.get_rdataset("medpar", "COUNT", cache=True).data
medpar.head()
"""
Explanation: Two important things to notice:
nloglikeobs: This function should return one evaluation of the negative log-likelihood function per observation in your dataset (i.e. rows of the endog/X matrix).
start_params: A one-dimensional array of starting values needs to be provided. The size of this array determines the number of parameters that will be used in optimization.
That's it! You're done!
Usage Example
The Medpar
dataset is hosted in CSV format at the Rdatasets repository. We use the read_csv
function from the Pandas library to load the data
in memory. We then print the first few columns:
End of explanation
"""
y = medpar.los
X = medpar[["type2", "type3", "hmo", "white"]]
X["constant"] = 1
"""
Explanation: The model we are interested in has a vector of non-negative integers as
dependent variable (los), and 5 regressors: Intercept, type2,
type3, hmo, white.
For estimation, we need to create two variables to hold our regressors and the outcome variable. These can be ndarrays or pandas objects.
End of explanation
"""
mod = NBin(y, X)
res = mod.fit()
"""
Explanation: Then, we fit the model and extract some information:
End of explanation
"""
print('Parameters: ', res.params)
print('Standard errors: ', res.bse)
print('P-values: ', res.pvalues)
print('AIC: ', res.aic)
"""
Explanation: Extract parameter estimates, standard errors, p-values, AIC, etc.:
End of explanation
"""
print(res.summary())
"""
Explanation: As usual, you can obtain a full list of available information by typing
dir(res).
We can also look at the summary of the estimation results.
End of explanation
"""
res_nbin = sm.NegativeBinomial(y, X).fit(disp=0)
print(res_nbin.summary())
print(res_nbin.params)
print(res_nbin.bse)
"""
Explanation: Testing
We can check the results by using the statsmodels implementation of the Negative Binomial model, which uses the analytic score function and Hessian.
End of explanation
"""
|
shareactorIO/pipeline
|
oreilly.ml/high-performance-tensorflow/notebooks/04_Train_Model_GPU.ipynb
|
apache-2.0
|
import tensorflow as tf
from tensorflow.python.client import timeline
import pylab
import numpy as np
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
tf.logging.set_verbosity(tf.logging.INFO)
tf.reset_default_graph()
num_samples = 100000
from datetime import datetime
version = int(datetime.now().strftime("%s"))
x_train = np.random.rand(num_samples).astype(np.float32)
print(x_train)
noise = np.random.normal(scale=0.01, size=len(x_train))
y_train = x_train * 0.1 + 0.3 + noise
print(y_train)
pylab.plot(x_train, y_train, '.')
x_test = np.random.rand(len(x_train)).astype(np.float32)
print(x_test)
noise = np.random.normal(scale=.01, size=len(x_train))
y_test = x_test * 0.1 + 0.3 + noise
print(y_test)
pylab.plot(x_train, y_train, '.')
with tf.device("/cpu:0"):
W = tf.get_variable(shape=[], name='weights')
print(W)
b = tf.get_variable(shape=[], name='bias')
print(b)
x_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='x_observed')
print(x_observed)
with tf.device("/gpu:0"):
y_pred = W * x_observed + b
print(y_pred)
with tf.device("/gpu:0"):
y_observed = tf.placeholder(shape=[None], dtype=tf.float32, name='y_observed')
print(y_observed)
loss_op = tf.reduce_mean(tf.square(y_pred - y_observed))
optimizer_op = tf.train.GradientDescentOptimizer(0.025)
train_op = optimizer_op.minimize(loss_op)
print("loss:", loss_op)
print("optimizer:", optimizer_op)
print("train:", train_op)
with tf.device("/cpu:0"):
init_op = tf.global_variables_initializer()
print(init_op)
train_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/gpu/%s/train' % version, graph=tf.get_default_graph())
test_summary_writer = tf.summary.FileWriter('/root/tensorboard/linear/gpu/%s/test' % version, graph=tf.get_default_graph())
config = tf.ConfigProto(
log_device_placement=True,
)
config.gpu_options.allow_growth=True
print(config)
sess = tf.Session(config=config)
print(sess)
sess.run(init_op)
print(sess.run(W))
print(sess.run(b))
"""
Explanation: Train Model with GPU (and CPU*)
End of explanation
"""
def test(x, y):
return sess.run(loss_op, feed_dict={x_observed: x, y_observed: y})
test(x=x_test, y=y_test)
loss_summary_scalar_op = tf.summary.scalar('loss', loss_op)
loss_summary_merge_all_op = tf.summary.merge_all()
"""
Explanation: Look at the Model Graph In Tensorboard
http://[ip-address]:6006
Accuracy of Random Weights
End of explanation
"""
%%time
max_steps = 400
run_metadata = tf.RunMetadata()
for step in range(max_steps):
if (step < max_steps):
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train})
else:
test_summary_log, _ = sess.run([loss_summary_merge_all_op, loss_op], feed_dict={x_observed: x_test, y_observed: y_test})
train_summary_log, _ = sess.run([loss_summary_merge_all_op, train_op], feed_dict={x_observed: x_train, y_observed: y_train}, options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
with open('gpu-timeline.json', 'w') as trace_file:
trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
if step % 5 == 0:
print(step, sess.run([W, b]))
train_summary_writer.add_summary(train_summary_log, step)
train_summary_writer.flush()
test_summary_writer.add_summary(test_summary_log, step)
test_summary_writer.flush()
pylab.plot(x_train, y_train, '.', label="target")
pylab.plot(x_train, sess.run(y_pred, feed_dict={x_observed: x_train, y_observed: y_train}), ".", label="predicted")
pylab.legend()
pylab.ylim(0, 1.0)
test(x=x_test, y=y_test)
"""
Explanation: Train Model
End of explanation
"""
from tensorflow.python.saved_model import utils
tensor_info_x_observed = utils.build_tensor_info(x_observed)
print(tensor_info_x_observed)
tensor_info_y_pred = utils.build_tensor_info(y_pred)
print(tensor_info_y_pred)
export_path = "/root/models/linear/gpu/%s" % version
print(export_path)
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
with tf.device("/cpu:0"):
builder = saved_model_builder.SavedModelBuilder(export_path)
prediction_signature = signature_def_utils.build_signature_def(
inputs = {'x_observed': tensor_info_x_observed},
outputs = {'y_pred': tensor_info_y_pred},
method_name = signature_constants.PREDICT_METHOD_NAME)
legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map={'predict':prediction_signature,
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature},
legacy_init_op=legacy_init_op)
builder.save()
"""
Explanation: Look at the Train and Test Loss Summary In Tensorboard
Navigate to the Scalars tab at this URL:
http://[ip-address]:6006
End of explanation
"""
%%bash
ls -l /root/models/linear/gpu/[version]
"""
Explanation: Look at the Model On Disk
You must replace [version] with the version number
End of explanation
"""
from tensorflow.python.framework import graph_io
graph_io.write_graph(sess.graph, "/root/models/optimize_me/", "unoptimized_gpu.pb")
"""
Explanation: HACK: Save Model in Previous Format
We will use this later.
End of explanation
"""
|
bartdevylder/bikecity-tutorial
|
index.ipynb
|
mit
|
print 'Hello world!'
print range(5)
"""
Explanation: Python for Data Science Workshop @VeloCity
1.1 Jupyter Notebook
Jupyter notebook is often used by data scientists who work in Python. It is loosely based on Mathematica and combines code, text and visual output in one page.
Some relevant short cuts:
* SHIFT + ENTER executes 1 block of code called a cell
* Tab-completion is omnipresent after the import of a package has been executed
* SHIFT + TAB gives you extra information on what parameters a function takes
* Repeating SHIFT + TAB multiple times gives you even more information
To get used to these short cuts try them out on the cell below.
End of explanation
"""
import numpy as np
# This is a two-dimensional numpy array
arr = np.array([[1,2,3,4],[5,6,7,8]])
print arr
# The shape is a tuple describing the size of each dimension
print "shape=" + str(arr.shape)
# The numpy reshape method allows one to change the shape of an array, while keeping the underlying data.
# One can leave one dimension unspecified by passing -1, it will be determined from the size of the data.
print "As 4x2 matrix"
print np.reshape(arr, (4,2))
print
print "As 8x1 matrix"
print np.reshape(arr, (-1,1))
print
print "As 2x2x2 array"
print np.reshape(arr, (2,2,-1))
"""
Explanation: 1.2 Numpy arrays
We'll be working often with numpy arrays so here's a very short introduction.
End of explanation
"""
x = np.array([1.,2.,3.])
y = np.array([4.,5.,6.])
print x + y
print x - y
print x * y
print x / y
"""
Explanation: Basic arithmetical operations on arrays of the same shape are done elementwise:
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# three = ?
assert three == 3
"""
Explanation: 1.3 Parts to be implemented
In cells like the following example you are expected to implement some code. The remainder of the tutorial won't work if you skip these.
Sometimes assertions are added as a check.
End of explanation
"""
import cPickle as pickle
past = pickle.load(open('data/past_data.pickle'))
all_data = pickle.load(open('data/all_data.pickle'))
"""
Explanation: 2. Anomaly Detection
2.1 Load Data
First we will load the data using a pickle format. (We use import cPickle as pickle because cPickle is faster.)
The data we use contains the pageviews of one of our own websites and for convenience there is only 1 data point per hour.
End of explanation
"""
% matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(20,4)) # This creates a new figure with the dimensions of 20 by 4
plt.plot(past) # This creates the actual plot
plt.show() # This shows the plot
"""
Explanation: 2.2 Plot past data
To plot the past data we will use matplotlib.pyplot. For convenience we import it as plt.
% matplotlib inline makes sure you can see the output in the notebook.
(Use % matplotlib notebook if you want to make it ineractive. Don't forget to click the power button to finish the interaction and to be able to plot a new figure.)
End of explanation
"""
import numpy as np
##### Implement this part of the code #####
raise NotImplementedError()
# maximum = ?
# minimum = ?
print minimum, maximum
"""
Explanation: 2.3 Find the minimum and maximum
Use np.nanmax() and np.nanmin() to find the minmum and maximum while ignoring the NaNs.
End of explanation
"""
plt.figure(figsize=(20,4))
plt.plot(past)
plt.axhline(maximum, color='r')
plt.axhline(minimum, color='r')
plt.show()
"""
Explanation: And plot these together with the data using the plt.axhline() function.
End of explanation
"""
plt.figure(figsize=(20,4))
plt.plot(all_data, color='g')
plt.plot(past, color='b')
plt.axhline(maximum, color='r')
plt.axhline(minimum, color='r')
plt.show()
"""
Explanation: 2.4 Testing the model on unseen data
Now plot all the data instead of just the past data
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# reshaped_past = ?
assert len(reshaped_past.shape) == 2
assert reshaped_past.shape[1] == 24
"""
Explanation: You can clearly see now that this model does not detect any anomalies. However, the last day of data clearly looks different compared to the other days.
In what follows we will build a better model for anomaly detection that is able to detect these 'shape shifts' as well.
2.5 Building a model with seasonality
To do this we are going to take a step by step approach. Maybe it won't be clear at first why every step is necessary, but that will become clear throughout the process.
First we are going to reshape the past data to a 2 dimensional array with 24 columns. This will give us 1 row for each day and 1 column for each hour. For this we are going to use the np.reshape() function. The newshape parameter is a tuple which in this case should be (-1, 24). If you use a -1 the reshape function will automatically compute that dimension. Pay attention to the order in which the numbers are repositonned (the default ordering should work fine here).
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# average_past =
assert average_past.shape == (24,)
plt.plot(average_past)
plt.show()
"""
Explanation: Now we are going to compute the average over all days. For this we are going to use the np.mean() with the axis variable set to the first dimension (axis=0). Next we are going to plot this.
End of explanation
"""
model = []
for i in range(6):
##### Implement this part of the code #####
raise NotImplementedError()
# model = np.concatenate( ? )
plt.figure(figsize=(20,4))
plt.plot(model, color='k')
plt.plot(past, color='b')
plt.show()
"""
Explanation: What you can see in the plot above is the average number of pageviews for eacht hour of the day.
Now let's plot this together with the past data on 1 plot. Use a for loop and the np.concatenate() function to concatenate this average 6 times into the variable model.
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
print delta_min, delta_max
"""
Explanation: In the next step we are going to compute the maximum (= positive) and minimum (= negative) deviations from the average to determine what kind of deviations are normal. (Just substract the average/model from the past and take the min and the max of that)
End of explanation
"""
plt.figure(figsize=(20,4))
plt.plot(model, color='k')
plt.plot(past, color='b')
plt.plot(model + delta_max, color='r')
plt.plot(model + delta_min, color='r')
plt.show()
"""
Explanation: Now let's plot this.
End of explanation
"""
model_all = np.concatenate((model, average_past))
plt.figure(figsize=(20,4))
plt.plot(all_data, color='g')
plt.plot(model_all, color='k')
plt.plot(past, color='b')
plt.plot(model_all + delta_max, color='r')
plt.plot(model_all + delta_min, color='r')
plt.show()
"""
Explanation: Now let's test this on all data
End of explanation
"""
anomaly_timepoints = np.where(np.logical_or(all_data < model_all + delta_min, all_data > model_all + delta_max))[0]
plt.figure(figsize=(20,4))
plt.scatter(anomaly_timepoints, all_data[anomaly_timepoints], color='r', linewidth=8)
plt.plot(all_data, color='g')
plt.plot(model_all, color='k')
plt.plot(past, color='b')
plt.plot(model_all + delta_max, color='r')
plt.plot(model_all + delta_min, color='r')
plt.xlim(0, len(all_data))
plt.show()
print 'The anomaly occurs at the following timestamps:', anomaly_timepoints
"""
Explanation: Now you can clearly see where the anomaly is detected by this more advanced model. The code below can gives you the exact indices where an anomaly is detected. The functions uses are the following np.where() and np.logical_or().
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
pylab.rcParams['figure.figsize'] = (13.0, 8.0)
%matplotlib inline
"""
Explanation: 3. Modeling
It is often desired to understand the relationship between different sources of information. As an example we'll consider the historical request rate of a web server and compare it to its CPU usage. We'll try to predict the CPU usage of the server based on the request rates of the different pages. First some imports:
End of explanation
"""
data = pd.DataFrame.from_csv("data/request_rate_vs_CPU.csv")
"""
Explanation: 3.1 Data import and inspection
Pandas is a popular library for data wrangling, we'll use it to load and inspect a csv file that contains the historical web request and cpu usage of a web server:
End of explanation
"""
data.head()
"""
Explanation: The head command allows one to quickly see the structure of the loaded data:
End of explanation
"""
data.plot(figsize=(13,8), y="CPU")
"""
Explanation: We can select the CPU column and plot the data:
End of explanation
"""
data.drop('CPU',1).plot(figsize=(13,8))
"""
Explanation: Next we plot the request rates, leaving out the CPU column as it has another unit:
End of explanation
"""
request_names = data.drop('CPU',1).columns.values
request_names
"""
Explanation: Now to continue and start to model the data, we'll work with basic numpy arrays. By doing this we also drop the time-information as shown in the plots above.
We extract the column labels as the request_names for later reference:
End of explanation
"""
request_rates = data.drop('CPU',1).values
"""
Explanation: We extract the request rates as a 2-dimensional numpy array:
End of explanation
"""
cpu = data['CPU'].values
"""
Explanation: and the cpu usage as a one-dimensional numpy array
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# total_request_rate =
assert total_request_rate.shape == (288,)
"""
Explanation: 3.2 Simple linear regression
First, we're going to work with the total request rate on the server, and compare it to the CPU usage. The numpy function np.sum can be used to calculate the total request rate when selecting the right direction (axis=1) for the summation.
End of explanation
"""
plt.figure(figsize=(13,8))
plt.plot(total_request_rate)
"""
Explanation: Let's plot the total request rate to check:
End of explanation
"""
plt.figure(figsize=(13,8))
plt.xlabel("Total request rate")
plt.ylabel("CPU usage")
##### Implement this part of the code #####
raise NotImplementedError()
# plt.scatter( ? , ? )
"""
Explanation: We can make use of a PyPlot's scatter plot to understand the relation between the total request rate and the CPU usage:
End of explanation
"""
from sklearn import linear_model
simple_lin_model = linear_model.LinearRegression()
"""
Explanation: There clearly is a strong correlation between the request rate and the CPU usage. Because of this correlation we can build a model to predict the CPU usage from the total request rate. If we use a linear model we get a formula like the following:
$$ \text{cpu} = c_0 + c_1 \text{total_request_rate} $$
Since we don't know the exact values for $c_0$ and $c_1$ we will have to compute them. For that we'll make use of the scikit-learn machine learning library for Python and use least-squares linear regression
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# total_request_rate_M =
# Test to see it's a two dimensional array
assert len(total_request_rate_M.shape) == 2
# Test to see it's got only 1 column
assert total_request_rate_M.shape[1] == 1
"""
Explanation: Now we need to feed the data to the model to fit it. The model.fit(X,y) method in general takes an array X and vector y as arguments:
```
X = [[x_11, x_12, x_13, ...], y = [y_1,
[x_21, x_22, x_23, ...], y_2,
[x_31, x_32, x_33, ...], y_3,
...] ...]
```
and tries to find coefficients that allow to predict the y_i's from the x_ij's. In our case the matrix X will consist of only 1 column containing the total request rates. Our total_request_rate variable however, is still only a one-dimensional vector, so we need to np.reshape it into a two-dimensional array:
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# simple_lin_model.fit( ? , ? )
"""
Explanation: Then we fit our model using the the total request rate and cpu. The coefficients found are automatically stored in the simple_lin_model object.
End of explanation
"""
print "Coefficient = %s, constant term = %f" % (str(simple_lin_model.coef_), simple_lin_model.intercept_)
"""
Explanation: We can now inspect the coefficient $c_1$ and constant term (intercept) $c_0$ of the model:
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# simple_lin_model.predict( [[ ? ]] )
"""
Explanation: So this means that each additional request adds about 0.11% CPU load to the server and all the other processes running on the server consume on average 0.72% CPU.
Once the model is trained we can use it to predict the outcome for a given input (or array of inputs). Note that the predict function requires a 2-dimensional array similar to the fit function.
What is the expected CPU usage when we have 880 requests per second?
End of explanation
"""
plt.figure(figsize=(13,8))
plt.scatter(total_request_rate, cpu, color='black')
plt.plot(total_request_rate, simple_lin_model.predict(total_request_rate_M), color='blue', linewidth=3)
plt.xlabel("Total request rate")
plt.ylabel("CPU usage")
plt.show()
"""
Explanation: Now we plot the linear model together with our data to verify it captures the relationship correctly (the predict method can accept the entire total_request_rate_M array at once).
End of explanation
"""
simple_lin_model.score(total_request_rate_M, cpu)
"""
Explanation: Our model can calculate a score indicating how well the linear model captures the data. A score of 1 means the data is perfectly linear, a score of 0 (or lower) means the data is not linear at all (and it does not make sense to try to model it that way). The score method takes the same arguments as the fit method:
End of explanation
"""
print request_names
"""
Explanation: 3.3 Multiple linear regression
Now let us consider the separate request rates instead and build a linear model for that. The model we try to fit takes the form:
$$\text{cpu} = c_0 + c_1 \text{request_rate}_1 + c_2 \text{request_rate}_2 + \ldots + c_n \text{request_rate}_n$$
where the $\text{request_rate}_i$'s correspond the our different requests:
End of explanation
"""
multi_lin_model = linear_model.LinearRegression()
"""
Explanation: We start again by creating a LinearRegression model.
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# multi_lin_model.fit( ? , ? )
"""
Explanation: Next we fit the model on the data, using multi_lin_model.fit(X,y). In contrast to the case above our request_rates variable already has the correct shape to pass as the X matrix: it has one column per request type.
End of explanation
"""
# combine the requests and the output in a pandas data frame for easy printing
result_table = pd.DataFrame(zip(request_names, multi_lin_model.coef_), columns=['Request', 'Coef'])
# sort the results in descending order
result_table = result_table.sort_values(by='Coef',ascending=False)
# executing this as the last command returns a nice table
result_table
"""
Explanation: Now, given the coefficients calculated by the model, which capture the contribution of each request type to the total CPU usage, we can start to answer some interesting questions. For example,
which request causes most CPU usage, on a per visit basis?
For this we can generate a table of request names with their coefficients in descending order:
End of explanation
"""
print 'The other processes on the server consume %.2f%%' % multi_lin_model.intercept_
"""
Explanation: From this table we see that 'resources/js/basket.js' consumes the most per CPU per request. It generates about 0.30% CPU load for each additional request. 'products/science.html' on the other hand is much leaner and only consumes about 0.04% CPU per request.
Now let us investigate the constant term again.
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# average_request_rates =
assert average_request_rates.shape == (6,)
results = []
# Loop over all requests
for i in range(len(request_names)):
# make a copy of the array to avoid overwriting
tweaked_load = np.copy(average_request_rates)
##### Implement this part of the code #####
raise NotImplementedError()
# tweaked_load[ ? ] = ?
# resulting_cpu = ?
results.append( (request_names[i],
multi_lin_model.coef_[i],
average_request_rates[i],
resulting_cpu))
# Now we store the results in a pandas dataframe for easier inspection.
mlin_df = pd.DataFrame(results, columns=['Diverted request', 'Coef', 'Rate', 'Predicted CPU'])
mlin_df = mlin_df.sort_values(by='Predicted CPU')
mlin_df
"""
Explanation: As you can see this term is very similar to the result achieved in single linear regression, but it is not entirely the same. This means that these models are not perfect. However, the seem to be able to give a reliable estimate.
3.4 Multiple linear regression 'advanced'
In the previous section we have modeled how much load each individual request generates. But in some cases you might want to transfer one of the request to another server. Now, suppose we want to minimize average CPU usage on this server by deviating traffic of only one webpage to another server, which page should we choose?
For this we simulate diverting the traffic of one page to another server. This means that for the request that is diverted the rate becomes 0, for the other requests we use the average rate.
We implement this by first calculating the average_request_rates using np.mean. These average_request_rates are then fed to the multi_lin_model.predict() method but with setting each individual request rate to 0 once.
(For linear models you can also compute the result based on the coefficients, but this approach also works for non-linear models.)
End of explanation
"""
train_set = pickle.load(open('data/train_set_forecasting.pickle'))
plt.figure(figsize=(20,4))
plt.plot(train_set)
plt.show()
"""
Explanation: As you can see in the table above, it is best to divert the traffic of 'api/product/get.php' (Why is the result different than the table based on the coefficient)?
4. Forecasting
For the forecasting we are going to use page views data, very similar to the data used in the anomaly detection section. It is also page view data and contains 1 sample per hour.
End of explanation
"""
import sklearn
import sklearn.linear_model
import sklearn.gaussian_process
model = sklearn.linear_model.LinearRegression()
# the input X contains all the data except the last data point
X = train_set[ : -1].reshape((-1, 1)) # the reshape is necessary since sklearn requires a 2 dimensional array
# the output y contains all the data except the first data point
y = train_set[1 : ]
# this code fits the model on the train data
model.fit(X, y)
# this score gives you how well it fits on the train set
# higher is better and 1.0 is perfect
print 'The score of the linear model is', model.score(X, y)
"""
Explanation: In the graph above you can clearly see that there is a rising trend in the data.
4.1 One-step ahead prediction
This forecasting section will describe the one-step ahead prediction. This means in this case that we will only predict the next data point which is in this case the number of pageviews in the next hour.
Now let's first build a model that tries to predict the next data point from the previous one.
End of explanation
"""
nof_predictions = 100
import copy
# use the last data point as the first input for the predictions
x_test = copy.deepcopy(train_set[-1]) # make a copy to avoid overwriting the training data
prediction = []
for i in range(nof_predictions):
# predict the next data point
y_test = model.predict([[x_test]])[0] # sklearn requires a 2 dimensional array and returns a one-dimensional one
##### Implement this part of the code #####
raise NotImplementedError()
# prediction.append( ? )
# x_test = ?
prediction = np.array(prediction)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction)), 'g')
plt.plot(train_set, 'b')
plt.show()
"""
Explanation: As you can see from the score above, the model is not perfect but it seems to get a relatively high score. Now let's make a prediction into the future and plot this.
To predict the datapoint after that we will use the predicted data to make a new prediction. The code below shows how this works for this data set using the linear model you used earlier. Don't forget to fill out the missing code.
End of explanation
"""
def convert_time_series_to_Xy(ts, width):
X, y = [], []
for i in range(len(ts) - width - 1):
X.append(ts[i : i + width])
y.append(ts[i + width])
return np.array(X), np.array(y)
width = 5
X, y = convert_time_series_to_Xy(train_set, width)
print X.shape, y.shape
"""
Explanation: As you can see from the image above the model doesn't quite seem to fit the data well. Let's see how we can improve this.
4.2 Multiple features
If your model is not smart enough there is a simple trick in machine learning to make your model more intelligent (but also more complex). This is by adding more features.
To make our model better we will use more than 1 sample from the past. To make your life easier there is a simple function below that will create a data set for you. The width parameter sets the number of hours in the past that will be used.
End of explanation
"""
width = 5
X, y = convert_time_series_to_Xy(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(X,y)
print 'The score of the linear model with width =', width, 'is', model.score(X, y)
"""
Explanation: As you can see from the print above both X and y contains 303 datapoints. For X you see that there are now 5 features which contain the pageviews from the 5 past hours.
So let's have a look what the increase from 1 to 5 features results to.
End of explanation
"""
import copy
# this is a helper function to make the predictions
def predict(model, train_set, width, nof_points):
prediction = []
# create the input data set for the first predicted output
# copy the data to make sure the orriginal is not overwritten
x_test = copy.deepcopy(train_set[-width : ])
for i in range(nof_points):
# predict only the next data point
prediction.append(model.predict(x_test.reshape((1, -1))))
# use the newly predicted data point as input for the next prediction
x_test[0 : -1] = x_test[1 : ]
x_test[-1] = prediction[-1]
return np.array(prediction)
nof_predictions = 200
prediction = predict(model, train_set, width, nof_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
"""
Explanation: Now change the width parameter to see if you can get a better score.
4.3 Over-fitting
Now execute the code below to see the prediction of this model.
End of explanation
"""
width = 1 #find a better width
X, y = convert_time_series_to_Xy(train_set, width)
model = sklearn.linear_model.LinearRegression()
model.fit(X,y)
print 'The score of the linear model with width =', width, 'is', model.score(X, y)
prediction = predict(model, train_set, width, 200)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
"""
Explanation: As you can see in the image above the prediction is not what you would expect from a perfect model. What happened is that the model learned the training data by heart without 'understanding' what the data is really about. This fenomenon is called over-fitting and will always occur if you make your model more complex.
Now play with the width variable below to see if you can find a more sensible width.
End of explanation
"""
model_generators = [sklearn.linear_model.LinearRegression, sklearn.linear_model.RidgeCV,
sklearn.linear_model.LassoCV, sklearn.gaussian_process.GaussianProcess]
best_score = 0
##### Implement this part of the code #####
raise NotImplementedError()
# for model_gen in ? :
# for width in range( ? , ? ):
X, y = convert_time_series_to_Xy(train_set, width)
# train the model on the first 48 hours
X_train, y_train = X[ : -48, :], y[ : -48]
# use the last 48 hours for validation
X_val, y_val = X[-48 : ], y[-48 : ]
##### Implement this part of the code #####
raise NotImplementedError()
# model =
# there is a try except clause here because some models do not converge for some data
try:
##### Implement this part of the code #####
raise NotImplementedError()
# model.fit( ? , ? )
# this_score = ?
if this_score > best_score:
best_score = this_score
best_model_gen = model_gen
best_width = width
except:
pass
print best_model_gen().__class__, 'was selected as the best model with a width of', best_width, 'and a validation score of', best_score
"""
Explanation: As you will have noticed by now is that it is better to have a non-perfect score which will give you a much better outcome. Now try the same thing for the following models:
* sklearn.linear_model.RidgeCV()
* sklearn.linear_model.LassoCV()
* sklearn.gaussian_process.GaussianProcess()
The first 2 models also estimate the noise that is present in the data to avoid overfitting. RidgeCV will keep the weights that are found small, but it won't put them to zero. LassoCV on the other hand will put several weights to 0. Execute model.coef_ to see the actual coefficients that have been found.
GaussianProcess is a non-linear method. This makes this method a lot more complex and therefore it will need significantly less features to be able to learn the data by hart (and thus to over-fit). In many cases however this additional complexity allows to better understand the data. Additionally it has the advantage that it can estimate confidance intervals similar to the red lines used in the anomaly detection.
4.4 Automation
What we have done up to now is manually selecting the best outcome based on the test result. This can be considered cheating because you have just created a self-fulfilling prophecy. Additionally it is not only cheating it is also hard to find the exact width that gives the best result by just visually inspecting it. So we need a more objective approach to solve this.
To automate this process you can use a validation set. In this case we will use the last 48 hours of the training set to validate the score and select the best parameter value. This means that we will have to use a subset of the training set to fit the model.
End of explanation
"""
##### Implement this part of the code #####
raise NotImplementedError()
# width = ?
# model = ?
X, y = convert_time_series_to_Xy(train_set, width)
##### Implement this part of the code #####
raise NotImplementedError()
# model.fit( ? , ? )
nof_predictions = 200
prediction = predict(model, train_set, width, nof_predictions)
plt.figure(figsize=(20,4))
plt.plot(np.concatenate((train_set, prediction[:,0])), 'g')
plt.plot(train_set, 'b')
plt.show()
"""
Explanation: If everything is correct the LassoCV methods was selected.
Now we are going to train this best model on all the data. In this way we use all the available data to build a model.
End of explanation
"""
|
parklab/PaSDqc
|
examples/02_example-basic_PSD/Intro_to_PSDs.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import PaSDqc
%matplotlib inline
"""
Explanation: Introduction
In this example we load an example bulk, MDA, and MALBAC power spectral densities (PSDs) generated by the command line tool provided in the PaSDqc package. We additionally compare the PaSDqc PSDs to naive PSDs generated using the algorithm of Leung et al, 2016
End of explanation
"""
sample_mda = PaSDqc.PSDTools.SamplePSD.load_from_file("../data/intro_PSDs/example_MDA.spec", name='MDA')
sample_malbac = PaSDqc.PSDTools.SamplePSD.load_from_file("../data/intro_PSDs/example_MALBAC.spec", name='MALBAC')
sample_DOP = PaSDqc.PSDTools.SamplePSD.load_from_file("../data/intro_PSDs/example_PCR.spec", name='MALBAC')
sample_bulk = PaSDqc.PSDTools.SamplePSD.load_from_file("../data/intro_PSDs/example_bulk.spec", name='Bulk')
DOP_bulk = PaSDqc.PSDTools.SamplePSD.load_from_file("../data/intro_PSDs/PCR_bulk.chroms.spec", name='PCR Bulk')
freq = sample_mda.freq
freq2 = sample_DOP.freq
avg_mda = sample_mda.avg_PSD()
avg_bulk = sample_bulk.avg_PSD()
avg_malbac = sample_malbac.avg_PSD()
avg_dop = sample_DOP.avg_PSD()
bulk_dop = DOP_bulk.avg_PSD()
"""
Explanation: Load the PSD data using the PaSDqc API
End of explanation
"""
mda_norm = PaSDqc.PSDTools.normalize_psd(avg_mda)
malbac_norm = PaSDqc.PSDTools.normalize_psd(avg_malbac)
dop_norm = PaSDqc.PSDTools.normalize_psd(avg_mda, '../data/intro_PSDs/PCR_bulk.chroms.spec')
"""
Explanation: Normalize the single-cell samples using an idealized bulk for MDA and MALBAC and the associated PCR bulk for the DOP-PCR sample
End of explanation
"""
freq2_cut = freq2[freq2<=5e-3]
avg_sns_cut = avg_dop[freq2<=5e-3]
sns_norm_cut = bulk_dop[freq2<=5e-3]
freq2_small = freq2[freq2>5e-3]
avg_sns_small = avg_dop[freq2>5e-3]
sns_norm_small = bulk_dop[freq2>5e-3]
period = 1 / freq
"""
Explanation: Some manipulations of the DOP-PCR sample since it includes more frequencies than the other samples
End of explanation
"""
df_naive = pd.read_table("../data/intro_PSDs/naive_spectra.txt")
sns.set_context("poster")
sns.set_style("ticks", {'ytick.minor.size': 0.0, 'xtick.minor.size': 0.0})
fig = plt.figure(figsize=(10, 10))
ax0 = plt.subplot(211)
ax1 = plt.subplot(212)
cp = sns.color_palette()
## PaSDqc power spectral density plots
ax0.plot(period, 10*np.log10(avg_bulk), label='Bulk')
ax0.plot(period, mda_norm, label='MDA')
ax0.plot(period, malbac_norm, label='MALBAC', color=cp[3])
ax0.plot(1/freq2_cut, 10*np.log10(avg_sns_cut/sns_norm_cut), label='DOP-PCR', color=cp[4])
ax0.plot(1/freq2_small, 10*np.log10(avg_sns_small/sns_norm_small), color=cp[4], alpha=0.5)
# Vertical lines
ax0.plot((92804, 92804), (-8, 20), 'k--', linewidth=2)
ax0.plot((5e3, 5e3), (-8, 20), 'k--', linewidth=2)
ax0.plot((1e3, 1e3), (-8, 20), 'k--', linewidth=2)
# ax0.plot((92804, 92804), (-10, 50), 'k--', linewidth=2)
# ax0.plot((5e3, 5e3), (-10, 50), 'k--', linewidth=2)
# ax0.plot((1e3, 1e3), (-10, 50), 'k--', linewidth=2)
# Plot adjustments
ax0.set_xlim(1e2, 1e6)
ax0.set_ylim(-10, 20)
ax0.legend(loc=(0.01, 0.55))
# ax0.set_ylim(-15, 50)
# ax0.legend(loc=(0.01, 0.6))
ax0.set_xscale('log')
ax0.set_xticks([1e2, 1e3, 1e4, 1e5, 1e6])
ax0.set_xticklabels(["100 bp", "1 kb", "10 kb", "100 kb", "1 mb"])
ax0.set_ylabel("Power Spectral Density (dB)")
# Labels
ax0.text(1.25e5, 18, "Supra-amplicon\n variance", fontsize=16)
ax0.text(1.25e4, 18, " MDA\n Amplicon\nsize range", fontsize=16)
ax0.text(1.1e3, 18, " MALBAC\n Amplicon\n size range", fontsize=16)
ax0.text(1.5e2, 18, "Paired-end\ncorrelation", fontsize=16)
# Naive PSD plots
ax1.plot(1/df_naive.freq, df_naive.psd_Bulk, label='Bulk')
ax1.plot(1/df_naive.freq, df_naive.psd_MDA, label='MDA')
ax1.plot(1/df_naive.freq, df_naive.psd_MALBAC, label='MALBAC', color=cp[3])
ax1.set_xscale('log')
ax1.set_xlim(1e2, 1e6)
ax1.set_ylim(-20, 35)
ax1.legend(loc=(0.01, 0.7))
ax1.set_xlabel('Genomic scale')
ax1.set_ylabel('Power spectral density (dB)')
ax1.plot((92804, 92804), (-15, 30), 'k--', linewidth=2)
ax1.plot((5e3, 5e3), (-15, 30), 'k--', linewidth=2)
ax1.plot((1e3, 1e3), (-15, 30), 'k--', linewidth=2)
ax1.set_ylim(-20, 30)
ax1.set_xticklabels(["0", "100 bp", "1 kb", "10 kb", "100 kb", "1 mb"])
# Figure Layout
plt.tight_layout()
sns.despine(fig=fig, ax=ax0)
sns.despine(fig=fig, ax=ax1)
fig.text(0.01, 0.98, "A", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
fig.text(0.01, 0.49, "B", weight="bold", horizontalalignment='left', verticalalignment='center', fontsize=20)
"""
Explanation: Load naive spectra from file
End of explanation
"""
|
HarshaDevulapalli/foundations-homework
|
08/Homework 8 - Dataset - Devulapalli.ipynb
|
mit
|
#Starting out the basics.
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
slums= pd.read_csv("hyderabad_slum_master.csv")
slums.head() #The dataset is a spatialised one, hence the_geom column.
"""
Explanation: Homework 8: Dataset: Slums of Hyderabad
The following dataset is a heavily cleaned up version of the official data on slums from the Government's Rajiv Awas Yojana Scheme's Slum Free City Action Plan of 2011. This dataset is the only geocoded data for slums in India. While the quality of data could be questionable, it still provides an insightful understanding of the spatial patterns of poverty in Indian cities.
End of explanation
"""
slums.columns
"""
Explanation: Now let's look at the columns in the database.
End of explanation
"""
totalpopulation=slums['population'].sum()
print("The total number of people who live in slums in Hyderabad are",int(totalpopulation))
print("")
populationofhyderabad=6731790 #According to the 2011 census.
percentageofpopulation=(totalpopulation/populationofhyderabad)*100
print("The percentage of people who live in slums is",(percentageofpopulation))
"""
Explanation: So there are the following things in the dataset.
* Name of the Slums
The Municipal Circle and Ward Number
The number of Households and Population slumwise.
The number of Households that are Below Poverty Line (BPL) - It means there are people in slums who are not poor as defined by the government's definition. This becomes crucial because - it is often assumed that the slum is a proxy for urban poverty. Gautam Bhan and Arindam Jana's paper argues otherwise (http://epw.yodasoft.com/journal/2015/22/review-urban-affairs-review-issues/reading-spatial-inequality-urban-india.html)
Caste Parameters : In Indian cities, Caste is an important index. The households that are "General", "Scheduled Caste", "Scheduled Tribe" or "Other Backward Castes" are all counted and have their own columns.
Tenure Parameters: Because the dataset is primarily about eliminating slums with the idea of providing Government built houses, the land tenure status of the slum households is essential to know. They are Patta, Possession Certificate, Slums on Private Land, Slums on Public Land, Households that are renters and Other tenures that do not fit under this framework.
Structure of the House : This pertains to what kind of a house the slum dweller lives on. Pucca means a stable house made out of concrete, Semipucca could mean a not so stable house and Kuccha house means a house made out of mud/wood/etc.
Average Monthly Income, Expenditure and Debt.
The number of years have the dwellers stayed in the slums
Number of Female Headed Households.
1. What are the total number of slums in Hyderabad? What is the number of people who live in slums? How much percentage of the city's population lives in slums?
End of explanation
"""
print("The total number of slum households is",slums['households'].sum())
print("")
print("The average number of households in a slum is",slums['households'].mean())
print("")
print("The average family size is", (slums['population'].sum()/slums['households'].sum()))
"""
Explanation: A quarter of the city's population approximately lives in slums.
2. What is the average number of households in a slum? What is the average family size?
End of explanation
"""
circle_count=slums['circle_number'].value_counts()
circle_count
plt.style.use('ggplot')
circle_count.plot(kind='bar',x='Circle Number',y='Number of Slums',legend=False, figsize=(10,10))
"""
Explanation: 3. Which circles have the maximum number of slums?
Circle here refers to a Municipal Subdivision. The city is divided into 18 circles, with each circle being further subdivided into wards. The city has 150 of these wards.
End of explanation
"""
slums['ward_number'].value_counts().head(20)
"""
Explanation: Circle 4 - which is the old city quarter of Hyderabad has the maximum number of slums in terms of numbers.
4. Which wards have the maximum number of slums?
End of explanation
"""
print("The total percentage of literacy in slums is",(slums['literacy_literates'].sum())/(slums['population'].sum())*100)
"""
Explanation: Because the data is faulty, there are slums whose ward numbers is empty, ie 0.The remainder of the list are the ward numbers with maximum slums and on the right are the number of slums in them. That makes ward 108 have the maximum number of slums. On introspecting on a map(in CartoDB) - one is curious to know why is it that this ward has a highly fragmented number of slums, often not more than a couple of houses.
5. What is the total literacy level in the slums?
We count the total number of literates in each slum and compare it with the total population.
End of explanation
"""
(slums['number_of_bpl_households'].sum())/(slums['households'].sum())*100
"""
Explanation: 6. What percentage of households in the slum are BPL(Below Poverty Level)?
As argued above, there are valid concerns that the slum might not be a great proxy for urban powerty. But if the percentage of households that are Below Poverty Line is high enough, then the slum can be considered as a proxy for urban poverty.
End of explanation
"""
slums['percentageofliterates']=slums['literacy_literates']/slums['population']*100
slums['percentageofliterates'].mean()
"""
Explanation: That is high enough for us to consider the slum as a decent proxy for urban poverty in Hyderabad.
The remaining data in its absolute number forms make it impossible for us to compare slums among each other. It would be useful to convert the caste, tenure, age, structures into percentages, making it easy to compare.
Now, we create new columns in the dataframe using the existing columns and check.
End of explanation
"""
slums['percentageofbplhouseholds']=slums['number_of_bpl_households']/slums['households']*100
"""
Explanation: On closer introspection, it is found that the other datasets are per household and not population wise like tha literacy percentage. So we do the addition of the following columns into the dataframe. So we repeat the above calculation for percentage of households below poverty line.
End of explanation
"""
slums['percentageofgeneral'] = slums['caste_general']/slums['households']*100
slums['percentageofminority']= slums['minority']/slums['households']*100
slums['percentageofobc'] = slums['caste_obc']/slums['households']*100
slums['percentageofsc'] = slums['caste_sc']/slums['households']*100
slums['percentageofst'] = slums['caste_st']/slums['households']*100
"""
Explanation: Now we repeat this for the Caste Parameters.
End of explanation
"""
slums['percentageofpuccastructures'] = slums['structure_pucca']/slums['households']*100
slums['percentageofsemipuccastructures'] = slums['structure_semipucca']/slums['households']*100
slums['percentageofkucchastructures'] = slums['structure_kuccha']/slums['households']*100
"""
Explanation: And for the type of structures..
End of explanation
"""
slums['percentageoftenure_patta'] = slums['tenure_patta']/slums['households']*100
slums['percentageoftenure_pc'] = slums['tenure_possession_certificate']/slums['households']*100
slums['percentageoftenure_private'] = slums['tenure_private_land']/slums['households']*100
slums['percentageoftenure_public'] = slums['tenure_public']/slums['households']*100
slums['percentageoftenure_renters'] = slums['tenure_rented']/slums['households']*100
slums['percentageoftenure_other'] = slums['tenure_others']/slums['households']*100
"""
Explanation: .. And for the kind of tenure
End of explanation
"""
slums['percentageoftenure_0to1'] = slums['zerotoone_years_of_stay']/slums['households']*100
slums['percentageoftenure_1to3'] = slums['onetothree_years_of_stay']/slums['households']*100
slums['percentageoftenure_3to5'] = slums['threetofive_years_of_stay']/slums['households']*100
slums['percentageoftenure_morethan5'] = slums['morethanfive_years_of_stay']/slums['households']*100
"""
Explanation: and finally for the number of years the residents have stayed..
End of explanation
"""
slums.head()
slums.columns
"""
Explanation: Now, let us see how the slums dataframe looks like
End of explanation
"""
slums.sort_values(by='percentageofgeneral').head(5)
"""
Explanation: We now have the data with all the calculations necessary made for analysis and now we begin.
CASTE PARAMETERS
7. Which slums have the highest percentage of households that belong to the General Caste?
End of explanation
"""
slums.sort_values(by='percentageofsc').head(5)
"""
Explanation: 8. Which slums have the highest percentage of households that belong to the SC Caste?
End of explanation
"""
slums.sort_values(by='percentageofst').head(5)
"""
Explanation: 9. Which slums have the highest percentage of households that belong to the ST Caste?
End of explanation
"""
slums.sort_values(by='percentageofobc').head(5)
"""
Explanation: 10. Which slums have the highest percentage of households that belong to the OBC Caste?
End of explanation
"""
a=slums['percentageofgeneral'].mean()
b=slums['percentageofsc'].mean()
c=slums['percentageofst'].mean()
d=slums['percentageofobc'].mean()
print(a,b,c,d)
plt.style.use('ggplot')
series=pd.Series([a,b,c,d], index=['General', 'SC', 'ST','OBC'], name='Caste Percentages')
series.plot.pie(figsize=(6, 6))
"""
Explanation: Now, we see what percentage of each of these castes live in slums.
End of explanation
"""
a=slums['percentageofpuccastructures'].mean()
"""
Explanation: STRUCTURES OF HOUSES
Now let us see the number of households that are proper pucca structures:
11. What percentage of Households are Pucca Structures?
End of explanation
"""
b=slums['percentageofsemipuccastructures'].mean()
"""
Explanation: That's roughly about half of them. What about the remaining half?
12. What percentage of Households are Semi-pucca Structures?
End of explanation
"""
c=slums['percentageofkucchastructures'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c], index=['Pucca Houses', 'Semipucca Houses', 'Kuccha Houses'], name='Household Structure Type')
series.plot.pie(figsize=(6, 6))
"""
Explanation: That still leaves out a little bit more... and the remaining structures are made out of..
13. What percentage of Households are Neither Semipucca or Pucca Structures?
End of explanation
"""
a=slums['percentageoftenure_patta'].mean()
"""
Explanation: Kaccha houses.
LAND TENURE DETAILS
14. What percentage of Households have access to Pattas?
End of explanation
"""
b=slums['percentageoftenure_pc'].mean()
"""
Explanation: 15. What percentage of Households have access to Possession Certificates?
End of explanation
"""
c=slums['percentageoftenure_private'].mean()
"""
Explanation: 16. What percentage of Households are on Private Land?
End of explanation
"""
d=slums['percentageoftenure_public'].mean()
"""
Explanation: 17. What percentage of Households are on Public Land?
End of explanation
"""
e=slums['percentageoftenure_renters'].mean()
"""
Explanation: 18. What percentage of Households are Renters?
End of explanation
"""
f=slums['percentageoftenure_other'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c,d,e,f], index=['Pattas', 'PC', 'Private Land','Public Land','Renters','Others'], name='Tenuree Type')
series.plot.pie(figsize=(6, 6))
"""
Explanation: 19. What percentage of Households don't fit in any of the other categories?
End of explanation
"""
a=slums['percentageoftenure_0to1'].mean()
"""
Explanation: WHEN HAVE PEOPLE MOVED IN
20. What percentage of Households have moved in the last one year?
End of explanation
"""
b=slums['percentageoftenure_1to3'].mean()
"""
Explanation: 21. What percentage of Households have moved in the last one to three years?
End of explanation
"""
c=slums['percentageoftenure_3to5'].mean()
"""
Explanation: 22. What percentage of Households have moved in the last three to five years?
End of explanation
"""
d=slums['percentageoftenure_morethan5'].mean()
plt.style.use('ggplot')
series=pd.Series([a,b,c,d], index=['Zero to One Years', 'One to Three Years', 'Three to Five Years','More than Five Years'])
series.plot.pie(figsize=(6, 6))
"""
Explanation: 23. What percentage of Households have moved more than five years ago?
End of explanation
"""
slums['avg_monthly_income'].mean()
"""
Explanation: INCOME, EXPENDITURE, DEBT
24. What is the average monthly income of a slum household? (in rupees, 70 rupees make a dollar)
End of explanation
"""
slums['avg_monthly_expenditure'].mean()
"""
Explanation: 25. What is the average monthly expenditure of a slum household? (in rupees, 70 rupees make a dollar)
End of explanation
"""
slums['debts_outstanding'].mean()
"""
Explanation: 26. What is the average debt per slum?(in rupees, 70 rupees make a dollar)
End of explanation
"""
|
Cairo4/pythonkurs
|
03 python II/01 Python II .ipynb
|
mit
|
lst = [11,2,34,4,5,5111]
len([11,2,'sort',4,5,5111])
sorted(lst)
lst.sort()
min(lst)
max(lst)
str(1212)
sum([1,2,2])
lst.remove(4)
lst.append(4)
string = 'hello, wie geht Dir?'
string.split(',')
"""
Explanation: Python II
Wiederholung: die wichtigsten Funktion
Viel mächtigere Funktion: Modules und Libraries
Schauen wir uns diese simplen Funktionen genauer an
Bauen wir die eigenen Funktionen
Struktur und Troubleshooting
1 Wichtigste Funktionen
Eine Übersicht der 64 wichtigsten simplen Python-Funktionen sind hier gelistet.
End of explanation
"""
#diese Bausteine haben nette Programmierer für uns entwickelt.
import urllib
import requests
import glob
import pandas
import BeautifulSoup
import re
#etc. etc.
!pip install re
"""
Explanation: 2 Viel mächtigere Funktion: Modules und Libraries
Modules & Libraries
End of explanation
"""
import os
#Funktioniert leider nicht mit allen Built in Functionen
os.path.split??
#Beispiel Sort
def sort(list):
for index in range(1,len(list)):
value = list[index]
i = index-1
while i>=0:
if value < list[i]:
list[i+1] = list[i]
list[i] = value
i -= 1
else:
break
return list
#Ganz komplexe. Wenn Du nicht mit dem Modul urllib, bzw. urlretrieve
#arbeiten könntest, müsstest Du jetzt all das eintippen.
def urlretrieve(url, filename=None, reporthook=None, data=None):
url_type, path = splittype(url)
with contextlib.closing(urlopen(url, data)) as fp:
headers = fp.info()
# Just return the local path and the "headers" for file://
# URLs. No sense in performing a copy unless requested.
if url_type == "file" and not filename:
return os.path.normpath(path), headers
# Handle temporary file setup.
if filename:
tfp = open(filename, 'wb')
else:
tfp = tempfile.NamedTemporaryFile(delete=False)
filename = tfp.name
_url_tempfiles.append(filename)
with tfp:
result = filename, headers
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while True:
block = fp.read(bs)
if not block:
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: got only %i out of %i bytes"
% (read, size), result)
return result
"""
Explanation: 3 Aber wie sind Funktion, Modules und Libraries aufgebaut?
End of explanation
"""
lst = ['ich', 'habe', 'ganz', 'kalt']
def join(mylist):
#"join" und "mylist" sind von mir gewählte Namen.
long_str = ''
for elem in mylist:
long_str = long_str + ' ' + elem
# die Anführungszeichen mit Leerschlag machen, dass es Abstände zwischen den Strings gibt, wenn diese zusammengeführt werden.
return long_str.strip()
#".strip()" ist eine kleine Subfunktion, welche die Leerschläge am Anfang und Ende löscht.
"""
Explanation: 4 Bauen wir die eigenen Funktion
Bauen wir ganze Sätze, aus Listen von Strings
End of explanation
"""
join(lst)
"""
Explanation: Und zum aufrufen packe ich meine List in Klammen ()
End of explanation
"""
satz = "Die Unabhängigkeit der Notenbanken von der Politik gilt bisher als anerkannter Grundpfeiler der modernen Wirtschafts- und Geldpolitik in fortgeschrittenen Volkswirtschaften. Zu gross wäre sonst das Risiko, dass gewählte Politiker die Notenpresse anwerfen, wenn es ihren persönlichen Zielen gerade gelegen kommt, und dass dadurch die Stabilität des Geldes und das Vertrauen in das Zahlungsmittel untergraben wird."
satz
def find(string):
elem = input('Bitte geben Sie den Suchbegriff ein: ')
if elem in string:
return 'Treffer'
else:
return 'Kein Treffer'
find(satz)
"""
Explanation: Bauen wir eine simple Suche
End of explanation
"""
print('Immer im Code verwenden, um zu wissen wo der Fehler nun ganz genau passiert.')
#Beispiel Sort
def sort(list):
for index in range(1,len(list)):
value = list[index]
print(value)
i = index-1
print(i)
while i>=0:
if value < list[i]:
list[i+1] = list[i]
list[i] = value
i -= 1
else:
break
return list
sort(lst)
lst
"""
Explanation: 5 Struktur und Troubleshooting
Zuerst die Imports
Dann die eigenen Funktionen
Nun der eigentliche Code
End of explanation
"""
|
knowledgeanyhow/notebooks
|
hacks/Webserver in a Notebook.ipynb
|
mit
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy
import io
pd.options.display.mpl_style = 'default'
def plot_random_numbers(n=50):
'''
Plot random numbers as a line graph.
'''
fig, ax = plt.subplots()
# generate some random numbers
arr = numpy.random.randn(n)
ax.plot(arr)
ax.set_title('Random numbers!')
# fetch the plot bytes
output = io.BytesIO()
plt.savefig(output, format='png')
png = output.getvalue()
plt.close()
return png
"""
Explanation: Run a Web Server in a Notebook
In this notebook, we show how to run a Tornado or Flask web server within a notebook, and access it from the public Internet. It sounds hacky, but the technique can prove useful:
To quickly prototype a REST API for an external web application to consume
To quickly expose a simple web dashboard to select external users
In this notebook, we'll demonstrate the technique using both Tornado and Flask as the web server. In both cases, the servers will listen for HTTPS connections and use a self-signed certificate. The servers will not authenticate connecting users / clients. (We want to keep things simple for this demo, but such authentication is an obvious next step in securing the web service for real-world use.)
Define the Demo Scenario
Suppose we have completed a notebook that, among other things, can plot a point-in-time sample of data from an external source. Assume we now want to surface this plot in a very simple UI that has:
The title of the demo
The current plot
A refresh button that takes a new sample and updates the plot
Create the Plotting Function
Suppose we have a function that generates a plot and returns the image as a PNG in a Python string.
End of explanation
"""
from IPython.display import Image
Image(plot_random_numbers())
"""
Explanation: We can test our function by showing its output inline using the Image utility from IPython.
End of explanation
"""
import jinja2
page = jinja2.Template('''\
<!doctype html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.2/css/bootstrap.min.css" />
<title>{{ title }}</title>
</head>
<body>
<nav class="navbar navbar-default">
<div class="container-fluid">
<div class="navbar-header">
<a class="navbar-brand" href="#">{{ title }}</a>
</div>
</div>
</nav>
<div class="container text-center">
<div class="row">
<img src="/plot" alt="Random numbers for a plot" />
</div>
<div class="row">
<button class="btn btn-primary">Refresh Plot</button>
</div>
</div>
<script type="text/javascript" src="//code.jquery.com/jquery-2.1.3.min.js"></script>
<script type="text/javascript">
console.debug('running');
$('button').on('click', function() {
$('img').attr('src', '/plot?'+(new Date().getTime()));
});
</script>
</body>
</html>''')
"""
Explanation: Create a Simple Dashboard Page
Now we'll craft a simple dashboard page that includes our plot. We don't have to do anything fancy here other than use an <img> tag and a <button>. But to demonstate what's possible, we'll make it pretty with Bootstrap and jQuery, and use a Jinja template that accepts the demo title as a parameter.
Note that the image tag points to a /plot resource on the server. Nothing dictates that we must fetch the plot image from our dashboard page. Another application could treat our web server as an API and use it in other ways.
End of explanation
"""
%%bash
mkdir -p -m 700 ~/.ssh
openssl req -new -newkey rsa:4096 -days 365 -nodes -x509 \
-subj "/C=XX/ST=Unknown/L=Somewhere/O=None/CN=None" \
-keyout /home/notebook/.ssh/notebook.key -out /home/notebook/.ssh/notebook.crt
"""
Explanation: We can now expose both the plotting function and the template via our web servers (Tornado first, then Flask) using the following endpoints:
/ will serve the dashboard HTML.
/plot will serve the plot PNG.
Run Tornado in a Notebook
First we create a self-signed certificate using the openssl command line library. If we had a real cert, we could use it instead.
End of explanation
"""
import tornado.ioloop
import tornado.web
import tornado.httpserver
"""
Explanation: Next we import the Tornado models we need.
End of explanation
"""
class MainHandler(tornado.web.RequestHandler):
def get(self):
'''Renders the template with a title on HTTP GET.'''
self.finish(page.render(title='Tornado Demo'))
class PlotHandler(tornado.web.RequestHandler):
def get(self):
'''Creates the plot and returns it on HTTP GET.'''
self.set_header('content-type', 'image/png')
png = plot_random_numbers()
self.finish(png)
"""
Explanation: Then we define the request handlers for our two endpoints.
End of explanation
"""
application = tornado.web.Application([
(r"/", MainHandler),
(r"/plot", PlotHandler)
])
"""
Explanation: Now we define the application object which maps the web paths to the handlers.
End of explanation
"""
server = tornado.httpserver.HTTPServer(application, ssl_options = {
"certfile": '/home/notebook/.ssh/notebook.crt',
"keyfile": '/home/notebook/.ssh/notebook.key'
})
server.listen(9000, '0.0.0.0')
"""
Explanation: Finally, we create a new HTTP server bound to a publicly exposed port on our notebook server (e.g., 9000) and using the self-signed certificate with corresponding key.
<div class="alert" style="border: 1px solid #aaa; background: radial-gradient(ellipse at center, #ffffff 50%, #eee 100%);">
<div class="row">
<div class="col-sm-1"><img src="https://knowledgeanyhow.org/static/images/favicon_32x32.png" style="margin-top: -6px"/></div>
<div class="col-sm-11">In IBM Knowledge Anyhow Workbench, ports 9000 through 9004 are exposed on a public interface. We can bind our webserver to any of those ports.</div>
</div>
</div>
End of explanation
"""
server.close_all_connections()
server.stop()
"""
Explanation: To see the result, we need to visit the public IP address of our notebook server. For example, if our IP address is 192.168.11.10, we would visit https://192.168.11.10:9000.
<div class="alert" style="border: 1px solid #aaa; background: radial-gradient(ellipse at center, #ffffff 50%, #eee 100%);">
<div class="row">
<div class="col-sm-1"><img src="https://knowledgeanyhow.org/static/images/favicon_32x32.png" style="margin-top: -6px"/></div>
<div class="col-sm-11">In IBM Knowledge Anyhow Workbench, we can get our public IP address from an environment variable by executing the code below in our notebook:
<pre style="background-color: transparent">import os
os.getenv('HOST_PUBLIC_IP')</pre>
</div>
</div>
</div>
When we visit the web server in a browser and accept the self-signed cert warning, we should see the resulting dashboard. Clicking Refresh Plot in the dashboard shows us a new plot.
Note that since IPython itself is based on Tornado, we are able to run other cells and get ouput while the web server is running. In fact, we can even modify the plotting function and template and see the changes the next time we refresh the dashboard in our browser.
When we want to shut the server down, we execute the lines below. Restarting the notebook kernel has the same net effect.
End of explanation
"""
!pip install flask
"""
Explanation: Run Flask in a Notebook
The same technique works with Flask, albeit with different pros and cons. First, we need to install Flask since it does not come preinstalled in the notebook environment by default.
End of explanation
"""
from flask import Flask, make_response
flask_app = Flask('flask_demo')
@flask_app.route('/')
def index():
'''Renders the template with a title on HTTP GET.'''
return page.render(title='Flask Demo')
@flask_app.route('/plot')
def get_plot():
'''Creates the plot and returns it on HTTP GET.'''
response = make_response(plot_random_numbers())
response.mimetype = 'image/png'
return response
"""
Explanation: Now we import our Flask requirements, define our app, and create our route mappings.
End of explanation
"""
flask_app.run(host='0.0.0.0', port=9000, ssl_context='adhoc')
"""
Explanation: Finally, we run the Flask web server. Flask supports the generation of an ad-hoc HTTP certificate and key so we don't need to explicitly put one on disk like we did in the case of Tornado.
End of explanation
"""
from tornado.wsgi import WSGIContainer
server = tornado.httpserver.HTTPServer(WSGIContainer(flask_app), ssl_options = {
"certfile": '/home/notebook/.ssh/notebook.crt',
"keyfile": '/home/notebook/.ssh/notebook.key'
})
server.listen(9000, '0.0.0.0')
"""
Explanation: Unlike in the Tornado case, the run command above blocks the notebook kernel from returning for as long as the web server is running. To stop the server, we need to interrupt the kernel (Kernel → Interrupt).
Run Flask in a Tornado WSGIContainer
If we are in love with Flask syntax, but miss the cool, non-blocking ability of Tornado, we can run the Flask application in a Tornado WSGIContainer like so.
End of explanation
"""
server.close_all_connections()
server.stop()
"""
Explanation: And once we do, we can view the dashboard in a web browser even while executing cells in the notebook. When we're done, we can cleanup with the same logic as in the pure Tornado case.
End of explanation
"""
|
ComputationalModeling/spring-2017-danielak
|
past-semesters/fall_2016/day-by-day/day21-traveling-salesman-problem/TravelingSalesman_Problem_SOLUTIONS.ipynb
|
agpl-3.0
|
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import display, clear_output
def calc_total_distance(table_of_distances, city_order):
'''
Calculates distances between a sequence of cities.
Inputs: N x N table containing distances between each pair of the N
cities, as well as an array of length N+1 containing the city order,
which starts and ends with the same city (ensuring that the path is
closed)
Returns: total path length for the closed loop.
'''
total_distance = 0.0
# loop over cities and sum up the path length between successive pairs
for i in range(city_order.size-1):
total_distance += table_of_distances[city_order[i]][city_order[i+1]]
return total_distance
def plot_cities(city_order,city_x,city_y):
'''
Plots cities and the path between them.
Inputs: ordering of cities, x and y coordinates of each city.
Returns: a plot showing the cities and the path between them.
'''
# first make x,y arrays
x = []
y = []
# put together arrays of x and y positions that show the order that the
# salesman traverses the cities
for i in range(0, city_order.size):
x.append(city_x[city_order[i]])
y.append(city_y[city_order[i]])
# append the first city onto the end so the loop is closed
x.append(city_x[city_order[0]])
y.append(city_y[city_order[0]])
#time.sleep(0.1)
clear_output(wait=True)
display(fig) # Reset display
fig.clear() # clear output for animation
plt.xlim(-0.2, 20.2) # give a little space around the edges of the plot
plt.ylim(-0.2, 20.2)
# plot city positions in blue, and path in red.
plt.plot(city_x,city_y, 'bo', x, y, 'r-')
"""
Explanation: The Traveling Salesman problem
Names of group members
// put your names here!
Goals of this assignment
The main goal of this assignment is to use Monte Carlo methods to find the shortest path between several cities - the "Traveling Salesman" problem. This is an example of how randomization can be used to optimize problems that would be incredibly computationally expensive (and sometimes impossible) to solve exactly.
The Traveling Salesman problem
The Traveling Salesman Problem is a classic problem in computer science where the focus is on optimization. The problem is as follows: Imagine there is a salesman who has to travel to N cities. The order is unimportant, as long as he only visits each city once on each trip, and finishes where he started. The salesman wants to keep the distance traveled (and thus travel costs) as low as possible. This problem is interesting for a variety of reasons - it applies to transportation (finding the most efficient bus routes), logistics (finding the best UPS or FedEx delivery routes for some number of packages), or in optimizing manufacturing processes to reduce cost.
The Traveling Salesman Problem is extremely difficult to solve for large numbers of cities - testing every possible combination of cities would take N! (N factorial) individual tests. For 10 cities, this would require 3,628,800 separate tests. For 20 cities, this would require 2,432,902,008,176,640,000 (approximately $2.4 \times 10^{18}$) tests - if you could test one combination per microsecond ($10^{-6}$ s) it would take approximately 76,000 years! For 30 cities, at the same rate testing every combination would take more than one billion times the age of the Universe. As a result, this is the kind of problem where a "good enough" answer is sufficient, and where randomization comes in.
A good local example of a solution to the Traveling Salesman Problem is an optimized Michigan road trip calculated by a former MSU graduate student (and one across the US). There's also a widely-used software library for solving the Traveling Salesman Problem; the website has some interesting applications of the problem!
End of explanation
"""
# number of cities we'll use.
number_of_cities = 30
# seed for random number generator so we get the same value every time!
np.random.seed(2024561414)
# create random x,y positions for our current number of cities. (Distance scaling is arbitrary.)
city_x = np.random.random(size=number_of_cities)*20.0
city_y = np.random.random(size=number_of_cities)*20.0
# table of city distances - empty for the moment
city_distances = np.zeros((number_of_cities,number_of_cities))
# calculate distnace between each pair of cities and store it in the table.
# technically we're calculating 2x as many things as we need (as well as the
# diagonal, which should all be zeros), but whatever, it's cheap.
for a in range(number_of_cities):
for b in range(number_of_cities):
city_distances[a][b] = ((city_x[a]-city_x[b])**2 + (city_y[a]-city_y[b])**2 )**0.5
# create the array of cities in the order we're going to go through them
city_order = np.arange(city_distances.shape[0])
# tack on the first city to the end of the array, since that ensures a closed loop
city_order = np.append(city_order, city_order[0])
"""
Explanation: This code sets up everything we need
Given a number of cities, set up random x and y positions and calculate a table of distances between pairs of cities (used for calculating the total trip distance). Then set up an array that controls the order that the salesman travels between cities, and plots out the initial path.
End of explanation
"""
fig = plt.figure()
# Put your code here!
# number of steps we'll take
N_steps = 1000
step = [0]
distance = [calc_total_distance(city_distances,city_order)]
for i in range(N_steps):
swap1 = np.random.randint(1,city_order.shape[0]-2)
swap2 = np.random.randint(1,city_order.shape[0]-2)
orig_distance = calc_total_distance(city_distances,city_order)
new_city_order = np.copy(city_order)
hold = new_city_order[swap1]
new_city_order[swap1] = new_city_order[swap2]
new_city_order[swap2] = hold
new_distance = calc_total_distance(city_distances,new_city_order)
if new_distance < orig_distance:
city_order = np.copy(new_city_order)
step.append(i)
distance.append(new_distance)
plot_cities(city_order,city_x,city_y)
plt.plot(step,distance)
"""
Explanation: Put your code below this!
Your code should take some number of steps, doing the following at each step:
Randomly swap two cities in the array of cities (except for the first/last city)
Check the total distance traversed by the salesman
If the new ordering results in a shorter path, keep it. If not, throw it away.
Plot the shorter of the two paths (the original one or the new one)
Also, keep track of the steps and the minimum distance traveled as a function of number of steps and plot out the minimum distance as a function of step!
End of explanation
"""
from IPython.display import HTML
HTML(
"""
<iframe
src="https://goo.gl/forms/dDkx8yxbMC2aKHJb2?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
"""
)
"""
Explanation: Assignment wrapup
Please fill out the form that appears when you run the code below. You must completely fill this out in order to receive credit for the assignment!
End of explanation
"""
|
bcantarel/bcantarel.github.io
|
bicf_nanocourses/courses/python_1/lectures/introduction_to_pandas_and_dataframes.ipynb
|
gpl-3.0
|
# Import Pandas and Numpy
import pandas as pd
import numpy as np
"""
Explanation: Intoduction to Pandas and Dataframes
<hr>
Venkat Malladi (Computational Biologist BICF)
Agenda
<hr>
Introduction to Pandas
DataSeries
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Exercise 5
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
1.1 What is Pandas?
<hr>
Description
Python library providing high-performance, easy-to-use structures and data analysis tools.
Suitable for tabular data with heterogeneously-typed colums, as in an SQL table or Excel spreadsheet
Provides data analysis feautures similar to: R, MATLAB, SAS
Based on NumPy
1.2 Key Features
<hr>
The library is oriented towards table-like data structures that can be manipulated by a collection of methods:
Easy handling of missing data
Label-based slicing, indexing and subsetting of large data sets
Powerful and flexible group by functionality to perform split-apply-combine operations on data sets
Read/Write data from/to Excel, CSV, SQL databases, JSON
1.3 Import Pandas
<hr>
Before we explore the pandas package, let's import pandas.
The convention is to use pd to refere to pandas when importing the package.
End of explanation
"""
# Make Series of count data and visaulize series
counts = pd.Series([223, 43, 53, 24, 43])
counts
"""
Explanation: 1.4 Data Structures
Series
Dataframe
Agenda
<hr>
Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Exercise 5
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
2.1 Series
<hr>
One dimensional array-like object
Contains array of data (of any NumPy data type) with an index that labels each element in the vector.
Indexes can be:
integers
strings
other data types
<div style="background-color: #9999ff; padding: 10px;">NOTE: Not Numpy arrays but adds functionality to a Numpy array </div>
2.2 Series - Integer indexes
<hr>
If an index is not specified, a default sequence of integers is assigned as index.
| Index | Value |
| ------ | ------ |
| 0 | 35 |
| 1 | 50 |
| 2 | 25 |
Make a Series of count data
End of explanation
"""
# What datatype is the counts object?
type(counts)
"""
Explanation: What datatype is the counts object?
End of explanation
"""
# Make Series of count data with Gene Symbols
rna_counts = pd.Series([50, 10, 12, 29, 4], index=['BRCA2', 'GATA2', 'Myc', 'FOXA1', 'ERCC2'])
rna_counts
"""
Explanation: 2.3 Series - String indexes
<hr>
We can assign meaningful lables to the indexes when making the Series object by specfing an array of index
| Index | Value |
| ------ | ------ |
| CA | 35 |
| TX | 50 |
| OK | 25 |
Make Series of count data with Gene Symbols
End of explanation
"""
# Construct second sample RNA-counts dict
cell2_counts = {'BRCA2':5, 'GATA2':20, 'Myc':45, 'FOXA1':10, 'ERCC2':0, 'BRCA1': 20}
cell2_counts
"""
Explanation: 2.4 Series - Dictionary
<hr>
Can be thought of as a dict
Can be constructed from a dict directly.
Construct second sample RNA-counts dict
End of explanation
"""
# Make pandas Series from RNA-counts dict
rna_counts_cell2 = pd.Series(cell2_counts)
rna_counts_cell2
"""
Explanation: Make pandas Series from RNA-counts dict
End of explanation
"""
# Access the 1st element of counts data
counts[0]
"""
Explanation: 2.5.1 Series - Referencing Elements - Integer
<hr>
<div style="background-color: #9999ff; padding: 10px;">NOTE: We can access the values like an array text </div>
Integer Index
Access the 1st element of counts data
End of explanation
"""
# Get the 2nd through 4th elements
counts[1:4]
"""
Explanation: Get the 2nd through 4th elements
End of explanation
"""
# Get the counts for Myc Gene
rna_counts['Myc']
"""
Explanation: 2.5.2 Series - Referencing Elements - String
<hr>
String Index
Get the counts for the Myc Gene
End of explanation
"""
# Get the Counts for FOXA1, GATA2 and BRCA2
rna_counts[['FOXA1', 'GATA2', 'BRCA2']]
"""
Explanation: Get the counts for FOXA1, GATA2 and BRCA2
End of explanation
"""
# Get the values in the counts matrix
counts.values
"""
Explanation: 2.5.3 Series - Referencing Elements - array/index values
<hr>
Can get the array representation and index object of the Series via its values and index atrributes, respectively.
Get the values in the counts matrix
End of explanation
"""
# Get the index of the rna_counts matrix
rna_counts.index
"""
Explanation: Get the index of the rna_counts matrix
End of explanation
"""
rna_counts.name = 'RNA Counts'
rna_counts.index.name = 'Symbol'
rna_counts
"""
Explanation: 2.5.4 Series - Referencing Elements - labels
<hr>
Can give both the array of values and the index meaningful labels themselves
End of explanation
"""
# Select Genes that have greater than 20 counts
rna_counts > 20
"""
Explanation: 2.6 Series - array operations
<hr>
NumPy array operations can be applied to Series without losing the data structure
Use boolean array to filter Series
Select Genes that have greater than 20 counts
End of explanation
"""
# Select genes that have greater than 20 counts
rna_counts[rna_counts > 20]
"""
Explanation: Select genes that have greater than 20 counts
End of explanation
"""
# Make Movie Database with missing values
mcu_opening = {'Black Panther': 202003951, 'Thor: Ragnarok': 122744989, 'Spider-Man: Homecoming': 117027503,
'Guardians of the Galaxy Vol. 2': 146510104, 'Doctor Strange': 85058311,
'Captain America: Civil War': 179139142}
mcu_movies = ['Ant-Man and the Wasp', 'Avengers: Infinity War', 'Black Panther', 'Thor: Ragnarok',
'Spider-Man: Homecoming', 'Guardians of the Galaxy Vol. 2', 'Doctor Strange', 'Captain America: Civil War']
mcu_series = pd.Series(mcu_opening, index=mcu_movies)
mcu_series
"""
Explanation: 2.7 Series - null values
<hr>
Many times Series have missing values that you need to identify and clean
Marvel Cinematic Universe
Make Movie DataFrame with missing values
End of explanation
"""
# Find movies with no opening revenue
pd.isnull(mcu_series)
# Good opportunity to use Boolean filter get index and only movie names
mcu_series[pd.isnull(mcu_series)].index.values
"""
Explanation: Find movies with no opening revenue
End of explanation
"""
# Find movies with opening revenue
pd.notnull(mcu_series)
"""
Explanation: Find movies with opening revenue
End of explanation
"""
# Display only movies with no opening revenue
mcu_series[pd.isnull(mcu_series)].index.values
"""
Explanation: Display only movie with no opening revenue
End of explanation
"""
rna_counts
rna_counts_cell2
"""
Explanation: 2.8 Series - auto alignment
<hr>
Index labels are used to align (merge) data when used in operations with other Series objects
End of explanation
"""
# Combine counts for 2 cells
rna_counts + rna_counts_cell2
"""
Explanation: Combine counts for 2 cells (rna_counts and rna_counts_cell2)
End of explanation
"""
# Sample Python data and labels:
students = ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas']
test_scores = [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19]
s_scores = pd.Series(test_scores, index=students)
# Which Students have scores greater than 15?
# Bonus: How would you use get the Students scores greater than 15 and less than 20?
"""
Explanation: Adding Series combined values with the same label in the resulting series
Contrast this with arrays, where arrays of the same length will combine values element-wise
Notice that the missing values. If one Series has a missing value you can't add it to the other value and a missing value results.
<div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 1</div>
<hr>
Create a Series from a specified list data which has the index labels as student names and test values.
Which Students have scores greater than 15?
Bonus: How would you use get the Students scores greater than 15 and less than 20?
<div style="background-color: #9999ff; padding: 10px;">Hint: Will use the bitwise opperator & </div>
| Names | Value |
| ------ | ------ |
| Anastasia | 12.5 |
| Dima | 9 |
| Katherine | 16.5 |
| James | NaN |
| Emily | 9 |
| Michael | 20 |
| Matthew | 14.5 |
| Laura | NaN |
| Kevin | 8 |
| Jonas | 19 |
End of explanation
"""
# What is the mean, median and max test scores?
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 2</div>
What is the mean, median and max test scores?
End of explanation
"""
# Make Dataframe of Marvel data
mcu_data = {'Title': ['Ant-Man and the Wasp', 'Avengers: Infinity War', 'Black Panther', 'Thor: Ragnarok',
'Spider-Man: Homecoming', 'Guardians of the Galaxy Vol. 2'],
'Year':[2018, 2018, 2018, 2017, 2017, 2017],
'Studio':['Beuna Vista', 'Beuna Vista', 'Beuna Vista', 'Beuna Vista', 'Sony', 'Beuna Vista'],
'Rating': [np.nan, np.nan, 0.96, 0.92, 0.92, 0.83]}
df_mcu = pd.DataFrame(mcu_data)
df_mcu
"""
Explanation: Agenda
<hr>
Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Exercise 5
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
3.1 Dataframe
<hr>
A DataFrame is a tabular data structure, comprised of rows and columns like in a spreadsheet
Each column can be a different value type (numeric, string, boolean etc)
| Title | Year | Studio | Rating |
| :------: | :------: | :------: | :------: |
| Ant-Man and the Wasp | 2018| Beuna Vista | NaN|
| Avengers: Infinity War | 2018 | Beuna Vista | NaN|
| Black Panther | 2018 | Beuna Vista | 0.96 |
| Thor: Ragnarok | 2017 | Beuna Vista | 0.92|
| Spider-Man: Homecoming| 2017 | Sony | 0.92|
| Guardians of the Galaxy Vol. 2| 2017| Beuna Vista | 0.83 |
3.2 Dataframe - from dict of lists
<hr>
dict keys: columns
dict values (arrays): rows
Make Dataframe of Marvel data
End of explanation
"""
# What datatype is df_mcu?
type(df_mcu)
"""
Explanation: What datatype is df_mcu?
End of explanation
"""
# Assign column order and index based on Marvel Cinemetic Universe Movie Number
mcu_index = ['mcu_20','mcu_19', 'mcu_18', 'mcu_17', 'mcu_16', 'mcu_15']
mcu_columns = ['Title', 'Year', 'Studio', 'Rating']
df_mcu = pd.DataFrame(mcu_data, columns = mcu_columns, index = mcu_index)
df_mcu
"""
Explanation: 3.3 Dataframe - specifying indices and columns
<hr>
Order of columns/rows can be specified using:
columns array
index array
End of explanation
"""
# Make Dataframe of population
pop = {'Nevada': {2001: 2.9, 2002: 2.9}, 'Ohio': {2002: 3.6, 2001: 1.7, 2000: 1.5}}
df_pop = pd.DataFrame(pop)
df_pop
"""
Explanation: 3.4 Dataframe - from nested dict of dicts
<hr>
Outer dict keys as columns and inner dict keys as row indices
Make Dataframe of population of states
End of explanation
"""
# Get the number of rows in a Dataframe
len(df_mcu)
"""
Explanation: 3.5 Dataframe - number of rows and columns
<hr>
Get the number of rows in a Dataframe
End of explanation
"""
# Get the (rows, cols) of the Dataframe
df_mcu.shape
"""
Explanation: Get the (rows, cols) of the Dataframe
End of explanation
"""
# Get the column headers
df_mcu.columns
"""
Explanation: 3.6 Dataframe - index, columns and values
<hr>
Get the column headers
End of explanation
"""
# Get the row index values
df_mcu.index
"""
Explanation: Get the row index values
End of explanation
"""
# Get values of the Dataframe only
df_mcu.values
"""
Explanation: Get values of the Dataframe only
End of explanation
"""
# Select values in a single column
df_mcu['Title']
"""
Explanation: 3.7 Dataframe - Selecting Columns and Rows
<hr>
There are three basic ways to access the data in the Dataframe:
Quick Access: DataFrame[]
Integer position based selection method: DataFrame.iloc[row, col]
Label based selection method: DataFrame.loc[row, col]
3.7.1 Dataframe - Selecting Columns and Rows - Quick Access
<hr>
Select values in a single column
End of explanation
"""
# Select values in a list of columns
df_mcu[['Title', 'Rating']]
"""
Explanation: Select values in a list of columns
End of explanation
"""
# Use slice to get the first n rows (NumPy style indexing)
df_mcu[:2]
"""
Explanation: Use slice to get the first n rows (NumPy style indexing)
End of explanation
"""
# Can combine slice and column selection to select the first n rows
df_mcu['Title'][:2]
"""
Explanation: Can combine slice and column selection to select the first n rows
End of explanation
"""
df_mcu[:4]['Year']
"""
Explanation: Order of column and slice doesn't matter
End of explanation
"""
# Return values in the first row
df_mcu.iloc[0]
# Return values in the first row and second column
df_mcu.iloc[0,1]
"""
Explanation: 3.7.2 Dataframe - Selecting Columns and Rows - Integer based selection
<hr>
iloc is primarily an integer position based (from 0 to length-1 of the axis), when we include the starting from the end indexing, but may also be used with a boolean array.
Allowed inputs are:
Integer
End of explanation
"""
# Return values in the 3,5 and 6th rows
df_mcu.iloc[[2,4,5]]
"""
Explanation: A list or array of integers
End of explanation
"""
# Return values in the first row and columns 2 and 3
df_mcu.iloc[:2, 1:3]
"""
Explanation: A slice object with ints
End of explanation
"""
# Select all values of the 20th Movie
df_mcu.loc['mcu_20']
"""
Explanation: 3.7.3 Dataframe - Selecting Columns and Rows - Integer based selection
<hr>
loc is primarily label based, but may also be used with a boolean array.
Allowed inputs are:
A single label
End of explanation
"""
# Select all values of the 20th, 17th and 15th movie, which uses row index values,
# Not to be confused with df_mcu[['Title', 'Rating']] which uses column headers
df_mcu.loc[['mcu_20', 'mcu_17', 'mcu_15']]
"""
Explanation: A list or array of labels
End of explanation
"""
# Select the Year and Rating
df_mcu.loc[:, ['Year', 'Rating']]
"""
Explanation: A slice object with labels
<div style="background-color: #9999ff; padding: 10px;"> NOTE: Unlike numeric index python slices, both the start and the stop are included!</div>
End of explanation
"""
# Filter for Rating < .95
df_mcu.loc[df_mcu['Rating'] < .95, :]
"""
Explanation: 3.7.4 Dataframe - Selecting Columns and Rows - Data Filtering
<hr>
Data filtering using boolean
Filter and select on single condition
End of explanation
"""
# Filter for Rating < .95 and Sudio is Sony
# Reuse the bitwise comparator seen earlier but with OR instead of AND.
df_mcu.loc[(df_mcu['Rating'] < .95) | (df_mcu['Studio'] == 'Sony'), :]
"""
Explanation: Filter and select on multiple conidtions
End of explanation
"""
# Add new predicted rating to Dataframe
df_mcu['Predicted Rating'] = np.random.random(len(df_mcu))
df_mcu
"""
Explanation: 3.8 Dataframe - Adding and Deleting data
<hr>
Add a new column
End of explanation
"""
# Add a new row for a new movie
new_row = pd.Series(['Captain Marvel', 2019, 'BeunaVista', np.nan, np.random.random(1)[0]], index=df_mcu.columns, name= 'mcu_21' )
df_mcu.append(new_row)
"""
Explanation: Add a new row
End of explanation
"""
# Drop the Rating Column
df_mcu.drop('Rating', axis=1)
"""
Explanation: Drop an existing column
End of explanation
"""
# Drop the 17 and 19th movies
df_mcu.drop(['mcu_15', 'mcu_17'])
"""
Explanation: Drop an existing row
End of explanation
"""
# Sample Python data:
exam_data = {'Names': ['Anastasia', 'Dima', 'Katherine', 'James', 'Emily', 'Michael', 'Matthew', 'Laura', 'Kevin', 'Jonas'],
'Scores': [12.5, 9, 16.5, np.nan, 9, 20, 14.5, np.nan, 8, 19],
'Attempts': [1, 3, 2, 3, 2, 3, 1, 1, 2, 1],
'Qualify': ['yes', 'no', 'yes', 'no', 'no', 'yes', 'yes', 'no', 'no', 'yes']}
# Select the students the qualify based on scores using the Qualify column 'yes'
"""
Explanation: Columns, Rows or individual elements can be modified similarly using loc or iloc.
<div style="background-color: #9999ff; padding: 10px;"> NOTE: Return new object Dataframe, withouth changing the orignal dataframe (append, drop).</div>
<div style="background-color:yellow; padding: 10px"><h3><span></span>Exericse 3</div>
<hr>
Create a Dataframe from a specified dictionary data of
Which Students have scorescore is between 15 and 20?
| Names | Scores | Attempts | Qualify |
| ------ | ------ | ------ | ------ |
| Anastasia | 12.5 | 1 | 'yes' |
| Dima | 9 | 3 | 'no' |
| Katherine | 16.5 | 2| 'yes' |
| James | NaN | 3| 'no' |
| Emily | 9 | 2| 'no' |
| Michael | 20 | 3| 'yes' |
| Matthew | 14.5 | 1| 'yes' |
| Laura | NaN | 1| 'no' |
| Kevin | 8 | 2| 'no' |
| Jonas | 19 | 1| 'yes' |
End of explanation
"""
# Add a new column of, Grade Level, to indicate which Grade in High school the students are in?
# Add a new student named, Jack? (Hint: Need to use ignore_index=True)
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 4</div>
<hr>
Add a new column of, Grade Level, to indicate which Grade in High school the students are in?
Add a new student named, Jack? (Hint: Need to use ignore_index=True)
End of explanation
"""
# Add a new column of Pass that is either 0 or 1 based on the column Qualify. (Hint: use numpy.where)
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 5</div>
<hr>
Add a new column of Pass that is either 0 or 1 based on the column Qualify. (Hint: use numpy.where)
- 'yes': 1
- 'no': 0
End of explanation
"""
# Read in Winter Olympic Medal Winners
winter_olympics = pd.read_csv('data/winter_olympics.csv')
winter_olympics.head()
"""
Explanation: Agenda
<hr>
Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
4.1 Import and Store Data
<hr>
The first step in any problem is identifying what format your data is in, and then loading it into whatever framework you're using. Common formats are:
- Text Files: Text files with a common delimeter to seperate values (e.g. CSV uses ,)
- JSON(JavaScript Object Notation): standard formats for sending data by HTTP requests
- Web Page: XML and HTML
- Binary: "pickle” format and HDF5
- Database: MySQL, PostgreSQL
<div style="background-color: #9999ff; padding: 10px;"> NOTE: Most common is CSV or Text files with different delimeters. </div>
4.2 Import and Store Data - Text Files
<hr>
Reading
read_csv: Use comma seperated (,) deliminator to read file
read_table: Use tab (\t) deliminator to read file
Writting
write_csv: Use comma seperated (,) deliminator to write file
write_table: Use tab (\t) deliminator to write file
Read in Winter Olympic Medal Winners form Kaggle
End of explanation
"""
# Get the First 3 lines of a Dataframe
winter_olympics.head(3)
"""
Explanation: Agenda
<hr>
Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
5.1 Summarizing and Computing Descriptive Statistics
<hr>
Pandas as a lot of built-in essential functionality common to the pandas data structures to help explore the data.
5.2 Summarizing and Computing Descriptive Statistics - Head and Tail
<hr>
To view a small sample of a Series or DataFrame object, use:
head()
tail()
<div style="background-color: #9999ff; padding: 10px;"> NOTE: The default number of elements to display is five, but you may pass a custom number. </div>
Get the First 3 lines of Dataframe
End of explanation
"""
# Sort Dataframe by rows in ascending order
df_mcu.sort_index(axis=0, ascending=True)
"""
Explanation: 5.2 Summarizing and Computing Descriptive Statistics - Sorting
<hr>
To sort data for exploring the data use:
sort_index(): object by labels (along an axis)
sort_values(): by the values along either axis
<div style="background-color: #9999ff; padding: 10px;"> NOTE: axis: 0 or ‘index’, 1 or ‘columns’ and default is 0 </div>
Sort Dataframe by rows in ascending order
End of explanation
"""
# Sort Dataframe by column in descending order
df_mcu.sort_values(by=['Rating', 'Predicted Rating'], ascending=False)
"""
Explanation: Sort Dataframe by columns in descending order
End of explanation
"""
# Summary Statistics for the Dataframe
df_mcu.describe()
"""
Explanation: 5.3 Summarizing and Computing Descriptive Statistics - Descriptive statistics
<hr>
Built in functions to calculate the values over row or columns
describe(): return summary statistics of each column
- for numeric data: mean, std, max, min, 25%, 50%, 75%, etc.
- For non-numeric data: count, uniq, most-frequent item, etc
End of explanation
"""
# Mean of the Rating and Predicted Rating Columns
df_mcu.loc[:,['Rating', 'Predicted Rating']].mean()
"""
Explanation: mean()
End of explanation
"""
# Get the variance of the Rating column
df_mcu.loc[:,['Rating']].var()
"""
Explanation: var()
End of explanation
"""
# Drop rows with NaN values
df_mcu.dropna()
"""
Explanation: 5.4 Summarizing and Computing Descriptive Statistics - Missing Data
<hr>
Data comes in many shapes and forms and Pandas is very flexible in handling missing data:
While Nan is the default missing value marker
However, Python None will arise and we wish to also consider that “missing” or “not available” or “NA”
Drop rows with Nan values
End of explanation
"""
# File Nan in Dataframe with default value
df_mcu.fillna(0)
"""
Explanation: - Fill Nan with default value
End of explanation
"""
# File Nan in Dataframe with default value in place
df_mcu.fillna(0, inplace=True)
df_mcu
"""
Explanation: - Use inplace to modify the dataframe instead of retunring a new object
End of explanation
"""
# What is the median score of the students on the exam?
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 6</div>
<hr>
What is the median score of the students on the exam?
End of explanation
"""
# Deduct 4 points from everyone that attempted the exam 2 or more times. Replace all Nan scores with 0. (Passing is 12 points)
# Compute the mean. Would the class as a whole pass the test?
# Are there any students that will fail now?
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 7</div>
<hr>
Deduct 4 points from everyone that attempted the exam 2 or more times. Replace all Nan scores with 0.
(Passing is 12 points)
Compute the mean. Would the class as a whole pass the test?
Are there any students that will fail now?
End of explanation
"""
# Groupby year of release and get mean Rating
df_mcu.groupby('Year').mean()
"""
Explanation: Agenda
<hr>
Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
6.1 Grouped and apply
<hr>
The Grouped functionality is referring to a process involving one or more of the following steps:
- Splitting the data into groups based on some criteria
- Applying a function to each group independently
6.2 Grouped and apply - Splitting
<hr>
Pandas objects can be split on any of their axes.
- 'grouping' is to provided as a mapping of labels to group names in a Grouped Objec
- groupby()
End of explanation
"""
# Apply square to every value in a dataframe
test_data = np.arange(9).reshape(3,-1)
df_test = pd.DataFrame(test_data, index=['r1', 'r2', 'r3'], columns=['c1', 'c2', 'c3'])
df_test
df_test.applymap(np.square)
"""
Explanation: 6.3 Grouped and apply - Apply
<hr>
Apply the same function to every column or row:
- applymap: Apply same function across every cell
- apply: Apply same function to every column (default) or row
- Apply square to every value in a dataframe
End of explanation
"""
# Define max minus min function
def max_minus_min(x):
return max(x)-(2*min(x))
# Apply a new function that subtract max from min in every column
df_test.apply(max_minus_min)
"""
Explanation: - Apply a new function that subtract max from 2 times min in every column
End of explanation
"""
# Group students by attempts and find the average score?
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 8</div>
<hr>
Group students by attempts and find the average score?
End of explanation
"""
# Group students by their pass result and report the variance in scores?
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 9</div>
<hr>
Group students by their pass result and report the variance in scores?
End of explanation
"""
# Import maplotlib and setup to display plots notebook
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Introduction to Pandas
Series
Exercise 1
Exercise 2
Dataframe
Exercise 3
Exercise 4
Import and Store Data
Summarizing and Computing Descriptive Statistics
Exercise 6
Exercise 7
Grouped and apply
Exercise 8
Exercise 9
Data Transformation and Normalization
Exercise 10
Exercise 11
Exercise 12
7.1 Data Transformation and Normalization
<hr>
For Machine Learning (ML) and other data analysis it is important to:
- Explore your data
- Standardize data (Transform/Normalize) to obtain so different columns became comparable / compatible
7.1 Data Transformation and Normalization - Data Exploration
<hr>
Pandas Dataframes have a built in plot functionality:
- Dataframe.plot()
<div style="background-color: #9999ff; padding: 10px;"> NOTE: We will go into more plotting libraries later in the course. </div>
End of explanation
"""
# Plot the Number of Medals in barchart by country using: plot.bar()
winter_olympics.groupby(['Country'])['Medal'].count().plot.bar()
"""
Explanation: Plot the Number of Medals in barchart by country using: plot.bar()
End of explanation
"""
# In the Winter olympics which country has the most Biathlon medals?
# In the Winter olympics which country has the most Skiing medals?
# And in which event do they have the most Gold medals?
"""
Explanation: Other useful plots:
hist: plot.hist()
scatter: plot.scatter()
boxplot: plot.box()
7.2 Data Transformation and Normalization - Normalization
<hr>
Why Normalize (re-scale)?
- Transform data to obtain a certain distribution e.g. from lognormal to normal
- Normalize data so different columns became comparable / compatible
Typical normalization approach:
- Z-score transformation
- Scale to between 0 and 1
- Trimmed mean normalization
- Vector length transformation
- Quantilenorm
Further Resources:
- scikit-learn
Further Resources
<hr>
Resources:
- Pandas 10 min
- Pandas Tutorals
- Pandas Cookbook
<div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 10</div>
<hr>
In the Winter olympics which country has the most Biathlon medals?
In the Winter olympics which country has the most Skiing medals? And in which event do they have the most Gold medals?
End of explanation
"""
# Import the Summer Olympic dataset located in ('../data/summer_olypmics.csv')
summer_olympics = pd.read_csv('data/summer_olympics.csv')
#Which Olympian has the most medals?
summer_olympics.groupby(['Athlete'])['Medal'].count().sort_values(ascending=False).head()
# Which Olympian has the most Gold medals and for which Country?
summer_olympics.groupby(['Athlete','Medal','Country'])['Medal'].count().sort_values(ascending=False).head()
# Which Olympian has the most Gold medals and for which Sport?
summer_olympics.groupby(['Athlete','Medal','Discipline'])['Medal'].count().sort_values(ascending=False).head()
# Which rows have no values and why?
summer_olympics[pd.isnull(summer_olympics).any(axis=1)]
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 11</div>
<hr>
Import the Summer Olympic dataset located in ('data/summer_olypmics.csv')
Which Olympian has the most medals?
Which Olympian has the most Gold medals and for which Country and Sport?
Which rows have no values and why?
End of explanation
"""
# Import the example RNA-seq Count Data in ('data/RNAseq_count_table.txt')
rna_counts = pd.read_csv('data/RNAseq_count_table.txt', sep = '\t', index_col = 0)
rna_counts.head()
# Calculate CPM for each Sample.
rna_cpm = rna_counts.divide(rna_counts.sum(axis=0)).multiply(1000000)
rna_cpm.head()
# Which Gene has the highest average CPM?
rna_cpm.mean(axis=1).sort_values(ascending=False).head()
# What is the Correlation between SRR1550986 SRR1550987?
rna_cpm.corr()
"""
Explanation: <div style="background-color:yellow; padding: 10px"><h3><span></span>Exercise 12</div>
<hr>
Import the example RNA-seq Count Data in ('data/RNAseq_count_table.txt')
Calculate CPM for each Sample. (CPM, Counts Per Million)
Formula for CPM = readsMappedToGene x 1/totalNumReads x 10^6
totalNumReads - total number of mapped reads of a sample
readsMappedToGene - number of reads mapped to a selected gene
Which Gene has the highest average CPM?
What is the Correlation between SRR1550986 SRR1550987?
End of explanation
"""
|
gklambauer/SelfNormalizingNetworks
|
getSELUparameters.ipynb
|
gpl-3.0
|
import numpy as np
from scipy.special import erf,erfc
from sympy import Symbol, solve, nsolve
"""
Explanation: Obtain the SELU parameters for arbitrary fixed points
Author: Guenter Klambauer, 2017
tested under Python 3.5
End of explanation
"""
def getSeluParameters(fixedpointMean=0,fixedpointVar=1):
""" Finding the parameters of the SELU activation function. The function returns alpha and lambda for the desired fixed point. """
import sympy
from sympy import Symbol, solve, nsolve
aa = Symbol('aa')
ll = Symbol('ll')
nu = fixedpointMean
tau = fixedpointVar
mean = 0.5*ll*(nu + np.exp(-nu**2/(2*tau))*np.sqrt(2/np.pi)*np.sqrt(tau) + \
nu*erf(nu/(np.sqrt(2*tau))) - aa*erfc(nu/(np.sqrt(2*tau))) + \
np.exp(nu+tau/2)*aa*erfc((nu+tau)/(np.sqrt(2*tau))))
var = 0.5*ll**2*(np.exp(-nu**2/(2*tau))*np.sqrt(2/np.pi*tau)*nu + (nu**2+tau)* \
(1+erf(nu/(np.sqrt(2*tau)))) + aa**2 *erfc(nu/(np.sqrt(2*tau))) \
- aa**2 * 2 *np.exp(nu+tau/2)*erfc((nu+tau)/(np.sqrt(2*tau)))+ \
aa**2*np.exp(2*(nu+tau))*erfc((nu+2*tau)/(np.sqrt(2*tau))) ) - mean**2
eq1 = mean - nu
eq2 = var - tau
res = nsolve( (eq2, eq1), (aa,ll), (1.67,1.05))
return float(res[0]),float(res[1])
### To recover the parameters of the SELU with mean zero and unit variance
getSeluParameters(0,1)
### To obtain new parameters for mean zero and variance 2
myFixedPointMean = -0.1
myFixedPointVar = 2.0
myAlpha, myLambda = getSeluParameters(myFixedPointMean,myFixedPointVar)
getSeluParameters(myFixedPointMean,myFixedPointVar)
"""
Explanation: Function to obtain the parameters for the SELU with arbitrary fixed point (mean variance)
End of explanation
"""
def selu(x):
with ops.name_scope('elu') as scope:
alpha = myAlpha
scale = myLambda
return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
def dropout_selu(x, rate, alpha= -myAlpha*myLambda, fixedPointMean=myFixedPointMean, fixedPointVar=myFixedPointVar,
noise_shape=None, seed=None, name=None, training=False):
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = tf.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * tf.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
import tensorflow as tf
import numpy as np
from __future__ import absolute_import, division, print_function
import numbers
from tensorflow.contrib import layers
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.layers import utils
x = tf.Variable(tf.random_normal([10000],mean=myFixedPointMean, stddev=np.sqrt(myFixedPointVar)))
w = selu(x)
y = dropout_selu(w,0.2,training=True)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(init)
z,zz, zzz = sess.run([x, w, y])
#print(z)
#print(zz)
print("mean/var should be at:", myFixedPointMean, "/", myFixedPointVar)
print("Input data mean/var: ", "{:.12f}".format(np.mean(z)), "/", "{:.12f}".format(np.var(z)))
print("After selu: ", "{:.12f}".format(np.mean(zz)), "/", "{:.12f}".format(np.var(zz)))
print("After dropout mean/var", "{:.12f}".format(np.mean(zzz)), "/", "{:.12f}".format(np.var(zzz)))
"""
Explanation: Adjust the SELU function and Dropout to your new parameters
End of explanation
"""
myAlpha = -np.sqrt(2/np.pi) / (np.exp(0.5) * erfc(1/np.sqrt(2))-1 )
myLambda = (1-np.sqrt(np.exp(1))*erfc(1/np.sqrt(2))) * \
np.sqrt( 2*np.pi/ (2 + np.pi -2*np.sqrt(np.exp(1))*(2+np.pi)*erfc(1/np.sqrt(2)) + \
np.exp(1)*np.pi*erfc(1/np.sqrt(2))**2 + 2*np.exp(2)*erfc(np.sqrt(2))))
print("Alpha parameter of the SELU: ", myAlpha)
print("Lambda parameter of the SELU: ", myLambda)
"""
Explanation: For completeness: These are the correct expressions for mean zero and unit variance
End of explanation
"""
|
GAMPTeam/vampyre
|
demos/sparse/sparse_lin_inverse_amp.ipynb
|
mit
|
import os
import sys
vp_path = os.path.abspath('../../')
if not vp_path in sys.path:
sys.path.append(vp_path)
import vampyre as vp
"""
Explanation: Sparse Linear Inverse Demo with AMP
In this demo, we illustrate how to use the vampyre package for a simple sparse linear inverse problem. The problem is to estimate a sparse vector z0 from linear measurements of the form y=A.dot(z0)+w where w is Gaussian noise and A is a known linear transform -- a basic problem in compressed sensing. By sparse, we mean that the vector z0 has few non-zero values. Knowing that the vector is sparse can be used for improved reconstruction if an appropriate sparse reconstruction algorithm is used.
There are a large number of algorithms for sparse linear inverse problems. This demo uses the Generalized Approximate Message Passing (GAMP) method, one of several methods that will be included in the vampyre package. In going through this demo, you will learn to:
* Load the vampyre package
* Create synthetic data for a sparse linear inverse problem
* Set up the GAMP method in the vampyre package to perform the estimation for the linear inverse problem
* Measure the mean squared error (MSE) and compare the value to the predicted value from the VAMP method.
* Using the hist_list feature to track variables per iteration of the algorithm.
* Adjust the damping factor for ill-conditioned matrices.
An almost identical demo is available for the Vector AMP (VAMP) method. The VAMP method is more robust and similar to use. You can start on that demo instead.
Importing the Package
First we need to import the vampyre package. Since python does not have relative imports, you need to add the path location for the vampyre package to the system path. In this case, we have specified the path use a relative path location, but you can change this depending on where vampyre is located.
End of explanation
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: We will also load the other packages we will use in this demo. This could be done before the above import.
End of explanation
"""
# Parameters
nz0 = 1000 # number of components of z0
nz1 = 500 # number of measurements z1
# Compute the shapes
zshape0 = (nz0,) # Shape of z0 matrix
zshape1 = (nz1,) # Shape of z1 matrix = shape of y matrix
Ashape = (nz1,nz0) # Shape of A matrix
"""
Explanation: Generating Synthetic Data
We begin by generating synthetic data. The model is:
y = z1 + w, z1 = A.dot(z0)
where z0 and z1 are the unknown vectors, A is transform and w is noise. First, we set the dimensions and the shapes of the vectors we wil use.
End of explanation
"""
prob_on = 0.1 # fraction of components that are *on*
z0_mean_on = 0 # mean for the on components
z0_var_on = 1 # variance for the on components
snr = 30 # SNR in dB
"""
Explanation: To generate the synthetic data for this demo, we use the following simple probabilistic model. For the input z0, we will use Bernouli-Gaussian (BG) distribution, a simple model in sparse signal processing. In the BG model, the components z0[j] are i.i.d. where each component can be on or off.
With probability prob_on, z0[i] is on with z0[i] ~ N(z0_mean_on,z0_var_on)
With probability 1-prob_on, z0[i] is off with z0[i]=0.
Thus, on average, on prob_on*nz0 are *on$. We set the parameters for the model as well as the SNR for the measurements.
End of explanation
"""
# Generate the random input
z0_on = np.random.normal(z0_mean_on, np.sqrt(z0_var_on), zshape0)
u = np.random.uniform(0, 1, zshape0) < prob_on
z0 = z0_on*u
"""
Explanation: Using these parameters, we can generate random sparse z0 following this distribution with the following simple code.
End of explanation
"""
ind = np.arange(nz0)
plt.plot(ind,z0);
"""
Explanation: To illustrate the sparsity, we plot the vector z0. We can see from this plot that the majority of the components of z0 are zero.
End of explanation
"""
A = np.random.normal(0, 1/np.sqrt(nz0), Ashape)
z1 = A.dot(z0)
"""
Explanation: Now, we create a random transform A and output z1 = A.dot(z0)
End of explanation
"""
zvar1 = np.mean(np.abs(z1)**2)
wvar = zvar1*np.power(10, -0.1*snr)
w = np.random.normal(0,np.sqrt(wvar), zshape1)
y = z1 + w
"""
Explanation: Finally, we add noise at the desired SNR
End of explanation
"""
est0_off = vp.estim.DiscreteEst(0,1,zshape0)
est0_on = vp.estim.GaussEst(z0_mean_on, z0_var_on,zshape0)
"""
Explanation: Setting up the AMP / GAMP Solver
Now that we have created the sparse data, we will use the vampyre package to recover z0 and z1 from y. In vampyre the methods to perform this estimation are called solvers. The basic Approximate Message Passing (AMP) algorithm was developed in:
Donoho, David L., Arian Maleki, and Andrea Montanari. "Message-passing algorithms for compressed sensing." Proceedings of the National Academy of Sciences 106.45 (2009): 18914-18919.
The vampyre package currently implements a slightly more general solver, called Generalized AMP described in:
Rangan, Sundeep. "Generalized approximate message passing for estimation with random linear mixing." Proc. IEEE Internation Symposium on Information Theory (ISIT), 2011.
GAMP can handle nonlinear output channels. In this demo, we will restrict our attention to the linear Gaussian channel, so the GAMP solver essentially implements the AMP algorithm.
Similar to most of the solvers in the vampyre package, the GAMP solver needs precise specifications of the probability distributions of z0, z1 and y. For the linear inverse problem, we will specify three components:
* The prior p(z0);
* The transform A such that z1 = A.dot(z_0)
* The likelihood p(y|z1).
Both the prior and likelihood are described by estimators. The transform is described by an operator.
We first describe the estimator for the prior p(z0). The vampyre package will eventually have a large number of estimators to describe various densities. In this simple demo, p(z0) is what is called a mixture distribution since z0 is one distribution with probability 1-prob_on and a second distribution with probability prob_on. To describe this mixture distribution in the vampyre package, we need to first create estimator classes for each component distribution. To this end, the following code creates two estimators:
* est0_off: The estimator corresponding to the z0[j]=0. This is simply a discrete distribution with a point mass at zero.
* est0_on: The estimator corresponding to the case when z0[j] = N(z0_mean_on, z0_var_on). This is a Gaussian distribution
End of explanation
"""
est_list = [est0_off, est0_on]
pz0 = np.array([1-prob_on, prob_on])
est0 = vp.estim.MixEst(est_list, w=pz0, name='Input')
"""
Explanation: We next use the vampyre class, MixEst, to describe a mixture of the two distributions. This is done by creating a list, est_list, of the estimators and an array pz with the probability of each component. The resulting estimator, est_in, is the estimator for the prior $z$, which is also the input to the transform $A$. We give this a name Input since it corresponds to the input. But, any naming is fine. Or, you can let vampyre give it a generic name.
End of explanation
"""
Aop = vp.trans.MatrixLT(A,zshape0)
"""
Explanation: We next define the operator A. In this case the operator is defined by a matrix so we use the MatrixLT class.
End of explanation
"""
est1 = vp.estim.GaussEst(y,wvar,zshape1,name='Output')
"""
Explanation: Finally, we describe the likelihood function, p(y|z1). Since y=z1+w, we can describe this as a Gaussian estimator.
End of explanation
"""
nit = 20 # number of iterations
solver = vp.solver.Gamp(est0,est1,Aop,hist_list=['z0', 'zvar0'],nit=nit)
"""
Explanation: Running the GAMP Solver
Having described the input and output estimators and the variance handler, we can now construct a GAMP solver. The construtor takes the input and output estimators, the variance handler and other parameters. The paramter nit is the number of iterations. This is fixed for now. Later, we will add auto-termination. The other parameter, hist_list is optional, and will be described momentarily.
End of explanation
"""
solver.summary()
"""
Explanation: We can print a summary of the model which indicates the dimensions and the estimators.
End of explanation
"""
solver.solve()
"""
Explanation: We now run the solver by calling the solve() method. For a small problem like this, this should be close to instantaneous.
End of explanation
"""
zhat0 = solver.z0
ind = np.array(range(nz0))
plt.plot(ind,z0)
plt.plot(ind,zhat0)
plt.legend(['True', 'Estimate']);
"""
Explanation: The VAMP solver estimate is the field zhat. We plot one column of this (icol=0) and compare it to the corresponding column of the true matrix z. You should see a very good match.
End of explanation
"""
zerr0_act = np.mean(np.abs(zhat0-z0)**2)
zerr0_pred = solver.zvar0
zpow0 = np.mean(np.abs(z0)**2)
mse_act = 10*np.log10(zerr0_act/zpow0)
mse_pred = 10*np.log10(zerr0_pred/zpow0)
print("Normalized MSE (dB): actual {0:f} pred {1:f}".format(mse_act, mse_pred))
"""
Explanation: We can measure the normalized mean squared error as follows. The GAMP solver also produces an estimate of the MSE in the variable zvar0. We can extract this variable to compute the predicted MSE. We see that the normalized MSE is indeed low and closely matches the predicted value from VAMP.
End of explanation
"""
def plot_z0_est(solver,z0):
"""
Plots the true and predicted MSE for the estimates of z0
"""
# Compute the MSE as a function of the iteration
zhat0_hist = solver.hist_dict['z0']
zvar0_hist = solver.hist_dict['zvar0']
nit = len(zhat0_hist)
mse_act = np.zeros(nit)
mse_pred = np.zeros(nit)
for it in range(nit):
zerr0_act = np.mean(np.abs(zhat0_hist[it]-z0)**2)
zerr0_pred = zvar0_hist[it]
mse_act[it] = 10*np.log10(zerr0_act/zpow0)
mse_pred[it] = 10*np.log10(zerr0_pred/zpow0)
plt.plot(range(nit), mse_act, 'o-', linewidth=2)
plt.plot(range(nit), mse_pred, 's', linewidth=1)
plt.xlabel('Iteration')
plt.ylabel('Normalized MSE (dB)')
plt.legend(['Actual', 'Predicted'])
plt.grid()
plot_z0_est(solver,z0)
"""
Explanation: Finally, we can plot the actual and predicted MSE as a function of the iteration number. When solver was contructed, we passed an argument hist_list=['z0', 'zvar0']. This indicated to store the value of the estimate z0 and predicted error variance zvar0 with each iteration. We can recover these values from solver.hist_dict, the history dictionary. Using the values we can compute and plot the normalized MSE on each iteartion. Since we are going to plot several times in this demo, we wrap the plotting routine in a function, plot_z0est().
When we run plot_z0est() we see that GAMP gets a low MSE in very few iterations, about 10.
End of explanation
"""
# Generate a random transform
A = vp.trans.rand_rot_invariant_mat(nz1,nz0,cond_num=10)
Aop = vp.trans.MatrixLT(A,zshape0)
z1 = A.dot(z0)
"""
Explanation: Damping and Stability
A significant problem with GAMP is its stability. GAMP and AMP are designed for Gaussian i.i.d. matrices. For other matrices, the algorithms can diverge. This divergence issue is one of the main difficulties in using GAMP and AMP in practivce.
Recent research has shown that the convergence appears to be related to condition number of the matrix. Matrices A with higher condition numbers tend to cause GAMP / AMP to diverge. See, for example:
* Rangan, Sundeep, Philip Schniter, and Alyson Fletcher. "On the convergence of approximate message passing with arbitrary matrices." Proc. IEEE International Symposium on Information Theory (ISIT), 2014.
To illustrate we create a random matrix with a specified condition number. This can be done with the rand_rot_invariant command. Specifically, it creates a matrix A=USV.T where U and V are random orthogonal matrices and S has a specified condition number.
End of explanation
"""
# Add noise
zvar1 = np.mean(np.abs(z1)**2)
wvar = zvar1*np.power(10, -0.1*snr)
w = np.random.normal(0,np.sqrt(wvar), zshape1)
y = z1 + w
# Create the estimator
est1 = vp.estim.GaussEst(y,wvar,zshape1,name='Output')
# Run GAMP
nit = 20
solver = vp.solver.Gamp(est0,est1,Aop,hist_list=['z0', 'zvar0'],nit=nit)
solver.solve()
"""
Explanation: Now, we create a synthetic data based on the matrix and re-run GAMP.
End of explanation
"""
plot_z0_est(solver,z0)
"""
Explanation: We plot the results and we can see that the algorithm diverges.
End of explanation
"""
# Run GAMP with damping
nit = 200
solver = vp.solver.Gamp(est0,est1,Aop,hist_list=['z0', 'zvar0'],nit=nit,step=0.3)
solver.solve()
# Plot the results
plot_z0_est(solver,z0)
"""
Explanation: To fix the problem, one can apply damping. In damping, the GAMP algorithm is adjusted to take a partial step as controlled by a parameter step between 0 and 1. In general, the theory is that step <= 1/sqrt(cond_num). In practice, you can try different step sizes until you get reasonable results. A warning though: Sometimes you never get great results.
In this case, we take step=0.3. We also need to run the algorithm for many more iterations. We see we get better results although we have to run for more iterations.
End of explanation
"""
|
marcotcr/lime
|
doc/notebooks/Tutorial - images - Pytorch.ipynb
|
bsd-2-clause
|
import matplotlib.pyplot as plt
from PIL import Image
import torch.nn as nn
import numpy as np
import os, json
import torch
from torchvision import models, transforms
from torch.autograd import Variable
import torch.nn.functional as F
"""
Explanation: Using Lime with Pytorch
In this tutorial we will show how to use Lime framework with Pytorch. Specifically, we will use Lime to explain the prediction generated by one of the pretrained ImageNet models.
Let's start with importing our dependencies. This code is tested with Pytorch 1.0 but should work with older versions as well.
End of explanation
"""
def get_image(path):
with open(os.path.abspath(path), 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
img = get_image('./data/dogs.png')
plt.imshow(img)
"""
Explanation: Load our test image and see how it looks.
End of explanation
"""
# resize and take the center part of image to what our model expects
def get_input_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transf = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
return transf
def get_input_tensors(img):
transf = get_input_transform()
# unsqeeze converts single image to batch of 1
return transf(img).unsqueeze(0)
"""
Explanation: We need to convert this image to Pytorch tensor and also apply whitening as used by our pretrained model.
End of explanation
"""
model = models.inception_v3(pretrained=True)
"""
Explanation: Load the pretrained model for Resnet50 available in Pytorch.
End of explanation
"""
idx2label, cls2label, cls2idx = [], {}, {}
with open(os.path.abspath('./data/imagenet_class_index.json'), 'r') as read_file:
class_idx = json.load(read_file)
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
cls2label = {class_idx[str(k)][0]: class_idx[str(k)][1] for k in range(len(class_idx))}
cls2idx = {class_idx[str(k)][0]: k for k in range(len(class_idx))}
"""
Explanation: Load label texts for ImageNet predictions so we know what model is predicting
End of explanation
"""
img_t = get_input_tensors(img)
model.eval()
logits = model(img_t)
"""
Explanation: Get the predicition for our image.
End of explanation
"""
probs = F.softmax(logits, dim=1)
probs5 = probs.topk(5)
tuple((p,c, idx2label[c]) for p, c in zip(probs5[0][0].detach().numpy(), probs5[1][0].detach().numpy()))
"""
Explanation: Predicitions we got are logits. Let's pass that through softmax to get probabilities and class labels for top 5 predictions.
End of explanation
"""
def get_pil_transform():
transf = transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224)
])
return transf
def get_preprocess_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transf = transforms.Compose([
transforms.ToTensor(),
normalize
])
return transf
pill_transf = get_pil_transform()
preprocess_transform = get_preprocess_transform()
"""
Explanation: We are getting ready to use Lime. Lime produces the array of images from original input image by pertubation algorithm. So we need to provide two things: (1) original image as numpy array (2) classification function that would take array of purturbed images as input and produce the probabilities for each class for each image as output.
For Pytorch, first we need to define two separate transforms: (1) to take PIL image, resize and crop it (2) take resized, cropped image and apply whitening.
End of explanation
"""
def batch_predict(images):
model.eval()
batch = torch.stack(tuple(preprocess_transform(i) for i in images), dim=0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
batch = batch.to(device)
logits = model(batch)
probs = F.softmax(logits, dim=1)
return probs.detach().cpu().numpy()
"""
Explanation: Now we are ready to define classification function that Lime needs. The input to this function is numpy array of images where each image is ndarray of shape (channel, height, width). The output is numpy aaray of shape (image index, classes) where each value in array should be probability for that image, class combination.
End of explanation
"""
test_pred = batch_predict([pill_transf(img)])
test_pred.squeeze().argmax()
"""
Explanation: Let's test our function for the sample image.
End of explanation
"""
from lime import lime_image
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(np.array(pill_transf(img)),
batch_predict, # classification function
top_labels=5,
hide_color=0,
num_samples=1000) # number of images that will be sent to classification function
"""
Explanation: Import lime and create explanation for this prediciton.
End of explanation
"""
from skimage.segmentation import mark_boundaries
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=False)
img_boundry1 = mark_boundaries(temp/255.0, mask)
plt.imshow(img_boundry1)
"""
Explanation: Let's use mask on image and see the areas that are encouraging the top prediction.
End of explanation
"""
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)
img_boundry2 = mark_boundaries(temp/255.0, mask)
plt.imshow(img_boundry2)
"""
Explanation: Let's turn on areas that contributes against the top prediction.
End of explanation
"""
|
rastala/mmlspark
|
notebooks/samples/304 - Medical Entity Extraction.ipynb
|
mit
|
from mmlspark import CNTKModel, ModelDownloader
from pyspark.sql.functions import udf, col
from pyspark.sql.types import IntegerType, ArrayType, FloatType, StringType
from pyspark.sql import Row
from os.path import abspath, join
import numpy as np
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import os, tarfile, pickle
import urllib.request
import nltk
"""
Explanation: 304 - Medical Entity Extraction with a BiLSTM
In this tutorial we use a Bidirectional LSTM entity extractor from the MMLSPark
model downloader to extract entities from PubMed medical abstracts
Our goal is to identify useful entities in a block of free-form text. This is a
nontrivial task because entities might be referenced in the text using variety of
synonymns, abbreviations, or formats. Our target output for this model is a set
of tags that specify what kind of entity is referenced. The model we use was
trained on a large dataset of publically tagged pubmed abstracts. An example
annotated sequence is given below, "O" represents no tag:
|I-Chemical | O |I-Chemical | O | O |I-Chemical | O |I-Chemical | O | O | O | O |I-Disease |I-Disease| O | O |
|:---: |:---:|:---: |:---:|:---:|:---: |:---:|:---: |:---:|:---: |:---:|:---:|:---: |:---: |:---:|:---: |
|Baricitinib| , |Methotrexate| , | or |Baricitinib|Plus |Methotrexate| in |Patients|with |Early|Rheumatoid|Arthritis| Who |Had...|
End of explanation
"""
modelName = "BiLSTM"
modelDir = abspath("models")
d = ModelDownloader(spark, "wasb://" + modelDir)
modelSchema = d.downloadByName(modelName)
modelName = "BiLSTM"
modelDir = abspath("models")
d = ModelDownloader(spark, "file://" + modelDir)
modelSchema = d.downloadByName(modelName)
"""
Explanation: Get the model and extract the data.
End of explanation
"""
nltk.download("punkt", download_dir=modelDir)
nltk.data.path.append(modelDir)
wordEmbFileName = "WordEmbeddings_PubMed.pkl"
pickleFile = join(abspath("models"), wordEmbFileName)
if not os.path.isfile(pickleFile):
urllib.request.urlretrieve("https://mmlspark.blob.core.windows.net/datasets/" + wordEmbFileName, pickleFile)
"""
Explanation: Download the embeddings and the tokenizer
We use the nltk punkt sentence and word tokenizers and a set of embeddings trained on PubMed Articles
End of explanation
"""
pickleContent = pickle.load(open(pickleFile, "rb"), encoding="latin-1")
wordToIndex = pickleContent["word_to_index"]
wordvectors = pickleContent["wordvectors"]
classToEntity = pickleContent["class_to_entity"]
nClasses = len(classToEntity)
nFeatures = wordvectors.shape[1]
maxSentenceLen = 613
content = "Baricitinib, Methotrexate, or Baricitinib Plus Methotrexate in Patients with Early Rheumatoid\
Arthritis Who Had Received Limited or No Treatment with Disease-Modifying-Anti-Rheumatic-Drugs (DMARDs):\
Phase 3 Trial Results. Keywords: Janus kinase (JAK), methotrexate (MTX) and rheumatoid arthritis (RA) and\
Clinical research. In 2 completed phase 3 studies, baricitinib (bari) improved disease activity with a\
satisfactory safety profile in patients (pts) with moderately-to-severely active RA who were inadequate\
responders to either conventional synthetic1 or biologic2DMARDs. This abstract reports results from a\
phase 3 study of bari administered as monotherapy or in combination with methotrexate (MTX) to pts with\
early active RA who had limited or no prior treatment with DMARDs. MTX monotherapy was the active comparator."
sentences = sent_tokenize(content)
df = spark.createDataFrame(enumerate(sentences), ["index","sentence"])
# Add the tokenizers to all worker nodes
def prepNLTK(partition):
localPath = abspath("nltk")
nltk.download("punkt", localPath)
nltk.data.path.append(localPath)
return partition
df = df.rdd.mapPartitions(prepNLTK).toDF()
tokenizeUDF = udf(word_tokenize, ArrayType(StringType()))
df = df.withColumn("tokens",tokenizeUDF("sentence"))
countUDF = udf(len, IntegerType())
df = df.withColumn("count",countUDF("tokens"))
def wordToEmb(word):
return wordvectors[wordToIndex.get(word.lower(), wordToIndex["UNK"])]
def featurize(tokens):
X = np.zeros((maxSentenceLen, nFeatures))
X[-len(tokens):,:] = np.array([wordToEmb(word) for word in tokens])
return [float(x) for x in X.reshape(maxSentenceLen, nFeatures).flatten()]
featurizeUDF = udf(featurize, ArrayType(FloatType()))
df = df.withColumn("features", featurizeUDF("tokens"))
df.show()
"""
Explanation: Load the embeddings and create functions for encoding sentences
End of explanation
"""
model = CNTKModel() \
.setModelLocation(spark, modelSchema.uri) \
.setInputCol("features") \
.setOutputCol("probs") \
.setOutputNodeIndex(0) \
.setMiniBatchSize(1)
df = model.transform(df).cache()
df.show()
def probsToEntities(probs, wordCount):
reshaped_probs = np.array(probs).reshape(maxSentenceLen, nClasses)
reshaped_probs = reshaped_probs[-wordCount:,:]
return [classToEntity[np.argmax(probs)] for probs in reshaped_probs]
toEntityUDF = udf(probsToEntities,ArrayType(StringType()))
df = df.withColumn("entities", toEntityUDF("probs", "count"))
df.show()
"""
Explanation: Run the CNTKModel
End of explanation
"""
# Color Code the Text based on the entity type
colors = {
"B-Disease": "blue",
"I-Disease":"blue",
"B-Drug":"lime",
"I-Drug":"lime",
"B-Chemical":"lime",
"I-Chemical":"lime",
"O":"black",
"NONE":"black"
}
def prettyPrint(words, annotations):
formattedWords = []
for word,annotation in zip(words,annotations):
formattedWord = "<font size = '2' color = '{}'>{}</font>".format(colors[annotation], word)
if annotation in {"O","NONE"}:
formattedWords.append(formattedWord)
else:
formattedWords.append("<b>{}</b>".format(formattedWord))
return " ".join(formattedWords)
prettyPrintUDF = udf(prettyPrint, StringType())
df = df.withColumn("formattedSentence", prettyPrintUDF("tokens", "entities")) \
.select("formattedSentence")
sentences = [row["formattedSentence"] for row in df.collect()]
df.registerTempTable("df")
from IPython.core.display import display, HTML
for sentence in sentences:
display(HTML(sentence))
%%sql -q -o df
select * from df
%%local
sentences =df["formattedSentence"]
from IPython.core.display import display, HTML
for sentence in sentences:
display(HTML(sentence))
"""
Explanation: Show the annotated text
End of explanation
"""
|
moustakas/impy
|
projects/desi/lya/18dec19/mock-contaminants-qso.ipynb
|
gpl-2.0
|
import os
from desiutil.log import get_logger
log = get_logger()
import seaborn as sns
rc = {'font.family': 'serif'}#, 'text.usetex': True}
sns.set(style='ticks', font_scale=1.5, palette='Set2', rc=rc)
%matplotlib inline
"""
Explanation: Mock Target Contaminants - QSO Edition
The purpose of this notebook is to illustrate how contaminants are included in QSO (tracer+Lyman-alpha) spectral simulations generated using desitarget/bin/select_mock_targets.
The results below were generated with the tagged versions of the code in the 18.12 software release.
Additional information about simulating QSO targets (including contaminants) can be found on the SimulateQuasarTargets wiki page.
John Moustakas
Siena College
2018 December 19
Preliminaries.
End of explanation
"""
from desitarget.mock.mockmaker import QSOMaker, LYAMaker
QSO = QSOMaker()
data_tracer = QSO.read(only_coords=True, zmax_qso=1.8)
qso_density = QSO.mock_density(QSO.default_mockfile)
"""
Explanation: Target Densities
Recall that we expect with the current target selection algorithms (which will be tested extensively during Survey Validation) to obtain spectra of, on average, 120 tracer QSOs/deg2 (i.e., QSOs at z<2.1), 50 Lya QSOs/deg2 (i.e., QSOs at z>2.1), and 90 contaminants/deg2. Very roughly, approximately two-thirds of these contaminants will be stars and the remainder will be intermediate-redshift galaxies.
However, a more detailed analyis of the Galactic and extragalactic objects expected to contaminate QSO target selection is clearly needed.
Brief recap: how do we generate spectra for (mock) QSO targets?
Start with cosmological mocks which have RA, Dec, and redshift:
Tracer QSOs: DarkSky/v1.0.1
Lya QSOs: london/v4.2.0
The two mocks are stiched together precisely at z=1.8 (including RSD) to avoid double-counting.
Use desisim.templates.SIMQSO to generate (continuum) spectra at the input redshift.
Under the hood, we draw from the BOSS/DR9 QSO luminosity function to get the apparent (normalization) magnitude of each QSO (accounting for the K-correction) and to synthesize (noiseless) grzW1W2 photometry.
We perturb the photometry given the depth of our imaging and iteratively apply the latest target selection criteria (using color-cuts, not random forest) until we achieve the desired target density.
For the Lya QSOs target selection cuts are applied after Lya forest, BALs, etc. are included.
Finally, we write out targets.fits and truth.fits files (and spectra) with all the relevant catalog and ancillary data.
End of explanation
"""
log.info(QSO.default_mockfile)
log.info('Average density = {:.3f} QSO/deg2'.format(qso_density))
QSO.qamock_sky(data_tracer)
LYA = LYAMaker()
mockfile = os.path.join(os.getenv('DESI_ROOT'), 'mocks', 'lya_forest', 'london', 'v4.2.0', 'master.fits')
data_lya = LYA.read(mockfile=mockfile, only_coords=True, zmax_qso=1.8)
lya_density = LYA.mock_density(mockfile)
log.info(mockfile)
log.info('Average density = {:.3f} LYa QSO/deg2'.format(lya_density))
LYA.qamock_sky(data_lya)
"""
Explanation: Note that in general all the cosmological mocks are oversampled, so we have to subsample by a constant fraction in order to preserve the large-scale structure signal.
For example, we downsample the DarkSky/QSO mock by an average factor of 0.35 from 340/deg2 to 120/deg2.
End of explanation
"""
from IPython.display import Image, HTML, display
Image('histo-QSO.png')
"""
Explanation: Contaminants: a wish list.
To properly include contaminants into our spectral simulations we need (for the kinds of objects that pass DESI/QSO target selection criteria):
The correct redshift distribution (for extragalactic contaminants).
A sensible spatial distribution (should not be random, since stellar contamination will vary with Galactic latitude).
An optical luminosity function or, at the very least, a roughly correct apparent magnitude-redshift distribution.
Spectral templates (so the correct multiband colors can be synthesized).
Fulfilling this wish-list will require non-negligible dedicated effort, with input from the Lya and Target Selection Working Groups. Alternatively, Survey Validation should provide all the necessary observations.
Contaminants: current status.
Briefly, in the current version of select_mock_targets we use the following inputs.
Extragalactic contaminants:
Use the Buzzard/v1.6 mock (flux-limited to roughly r=24) for spatial coordinates and redshifts.
Use the desisim/BGS templates (see, e.g., this notebook), as representative of the full range of spectral shapes spanned by galaxies. (But note: these templates were trained on spectra of galaxies only down to I<20.5.)
For speed, pre-select the templates (at a given redshift) that will pass QSO color-cuts and drawn from this subset of templates with uniform probability.
Normalize each spectrum in the r-band using the QSO apparent magnitude distribution measured in DR7.1 (see this notebook). (Note: doing this ignores the correlation between redshift and apparent magnitude.)
Galactic / stellar contaminants:
Use the MWS/v0.0.6 and MWS-Superfaint/v0.0.6 mocks (flux-limited to r=23.5) to get spatial coordinates and radial velocities.
Use the desisim/STAR templates (see this notebook).
As for extragalactic contaminants, we pre-select the stellar templates that will pass QSO color-cuts and normalize to the appropriate QSO r-band magnitude distribution.
Preliminary results.
Some preliminary results can be viewed by navigating to targets-qa/QSO.html, which is based on spectral simulations (of all target classes, including contaminants) of 240k targets spanning 10 DESI tiles (roughly 40 deg2) generated as part of the 18.12 software release.
Target density
The final target density is close to the nominal density (OK given variations in large-scale structure), but would be nice to confirm over a larger footprint.
End of explanation
"""
display(HTML("<table><tr><td><img src='mock-nz-QSO.png'></td><td><img src='mock-zvmag-QSO.png'></td></tr></table>"))
"""
Explanation: Redshift distribution and apparent magnitude vs redshift relation.
Overall the final redshift distribution is not terrible, given the simple assumptions used.
Not clear whether the "expected" dn/dz in desimodel/data/targets/nz_QSO.dat includes contaminants or not.
Need to test whether the redshift distribution of the extragalactic contaminants is sensible.
Too few Lya QSOs in the simulations? Or perhaps an issue with nz_QSO.dat?
Need to check that the apparent magnitude-redshift relation matches data.
End of explanation
"""
|
kubeflow/kfp-tekton-backend
|
samples/core/dsl_static_type_checking/dsl_static_type_checking.ipynb
|
apache-2.0
|
!python3 -m pip install 'kfp>=0.1.31' --quiet
"""
Explanation: KubeFlow Pipeline DSL Static Type Checking
In this notebook, we will demo:
Defining a KubeFlow pipeline with Python DSL
Compile the pipeline with type checking
Static type checking helps users to identify component I/O inconsistencies without running the pipeline. It also shortens the development cycles by catching the errors early. This feature is especially useful in two cases: 1) when the pipeline is huge and manually checking the types is infeasible; 2) when some components are shared ones and the type information is not immediately avaiable to the pipeline authors.
Since this sample focuses on the DSL type checking, we will use components that are not runnable in the system but with various type checking scenarios.
Component definition
Components can be defined in either YAML or functions decorated by dsl.component.
Type definition
Types can be defined as string or a dictionary with the openapi_schema_validator property formatted as:
yaml
{
type_name: {
openapi_schema_validator: {
}
}
}
For example, the following yaml declares a GCSPath type with the openapi_schema_validator for output field_m.
The type could also be a plain string, such as the GcsUri. The type name could be either one of the core types or customized ones.
yaml
name: component a
description: component a desc
inputs:
- {name: field_l, type: Integer}
outputs:
- {name: field_m, type: {GCSPath: {openapi_schema_validator: {type: string, pattern: "^gs://.*$" } }}}
- {name: field_n, type: customized_type}
- {name: field_o, type: GcsUri}
implementation:
container:
image: gcr.io/ml-pipeline/component-a
command: [python3, /pipelines/component/src/train.py]
args: [
--field-l, {inputValue: field_l},
]
fileOutputs:
field_m: /schema.txt
field_n: /feature.txt
field_o: /output.txt
If you define the component using the function decorator, there are a list of core types.
For example, the following component declares a core type Integer for input field_l while
declares customized_type for its output field_n.
python
@component
def task_factory_a(field_l: Integer()) -> {'field_m': {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}},
'field_n': 'customized_type',
'field_o': 'Integer'
}:
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-a',
arguments = [
'--field-l', field_l,
],
file_outputs = {
'field_m': '/schema.txt',
'field_n': '/feature.txt',
'field_o': '/output.txt'
}
)
Type check switch
Type checking is enabled by default. It can be disabled as --disable-type-check argument if dsl-compile is run in the command line, or dsl.compiler.Compiler().compile(type_check=False).
If one wants to ignore the type for one parameter, call ignore_type() function in PipelineParam.
How does type checking work?
DSL compiler checks the type consistencies among components by checking the type_name as well as the openapi_schema_validator. Some special cases are listed here:
1. Type checking succeed: If the upstream/downstream components lack the type information.
2. Type checking succeed: If the type check is disabled.
3. Type checking succeed: If the parameter type is ignored.
Setup
Install Pipeline SDK
End of explanation
"""
# In yaml, one can optionally add the type information to both inputs and outputs.
# There are two ways to define the types: string or a dictionary with the openapi_schema_validator property.
# The openapi_schema_validator is a json schema object that describes schema of the parameter value.
component_a = '''\
name: component a
description: component a desc
inputs:
- {name: field_l, type: Integer}
outputs:
- {name: field_m, type: {GCSPath: {openapi_schema_validator: {type: string, pattern: "^gs://.*$" } }}}
- {name: field_n, type: customized_type}
- {name: field_o, type: GcsUri}
implementation:
container:
image: gcr.io/ml-pipeline/component-a
command: [python3, /pipelines/component/src/train.py]
args: [
--field-l, {inputValue: field_l},
]
fileOutputs:
field_m: /schema.txt
field_n: /feature.txt
field_o: /output.txt
'''
component_b = '''\
name: component b
description: component b desc
inputs:
- {name: field_x, type: customized_type}
- {name: field_y, type: GcsUri}
- {name: field_z, type: {GCSPath: {openapi_schema_validator: {type: string, pattern: "^gs://.*$" } }}}
outputs:
- {name: output_model_uri, type: GcsUri}
implementation:
container:
image: gcr.io/ml-pipeline/component-a
command: [python3]
args: [
--field-x, {inputValue: field_x},
--field-y, {inputValue: field_y},
--field-z, {inputValue: field_z},
]
fileOutputs:
output_model_uri: /schema.txt
'''
"""
Explanation: Type Check with YAML components: successful scenario
Author components in YAML
End of explanation
"""
import kfp.components as comp
import kfp.dsl as dsl
import kfp.compiler as compiler
# The components are loaded as task factories that generate container_ops.
task_factory_a = comp.load_component_from_text(text=component_a)
task_factory_b = comp.load_component_from_text(text=component_b)
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_a',
description='')
def pipeline_a():
a = task_factory_a(field_l=12)
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'])
compiler.Compiler().compile(pipeline_a, 'pipeline_a.zip', type_check=True)
"""
Explanation: Author a pipeline with the above components
End of explanation
"""
# In this case, the component_a contains an output field_o as GcrUri
# but the component_b requires an input field_y as GcsUri
component_a = '''\
name: component a
description: component a desc
inputs:
- {name: field_l, type: Integer}
outputs:
- {name: field_m, type: {GCSPath: {openapi_schema_validator: {type: string, pattern: "^gs://.*$" } }}}
- {name: field_n, type: customized_type}
- {name: field_o, type: GcrUri}
implementation:
container:
image: gcr.io/ml-pipeline/component-a
command: [python3, /pipelines/component/src/train.py]
args: [
--field-l, {inputValue: field_l},
]
fileOutputs:
field_m: /schema.txt
field_n: /feature.txt
field_o: /output.txt
'''
component_b = '''\
name: component b
description: component b desc
inputs:
- {name: field_x, type: customized_type}
- {name: field_y, type: GcsUri}
- {name: field_z, type: {GCSPath: {openapi_schema_validator: {type: string, pattern: "^gs://.*$" } }}}
outputs:
- {name: output_model_uri, type: GcsUri}
implementation:
container:
image: gcr.io/ml-pipeline/component-a
command: [python3]
args: [
--field-x, {inputValue: field_x},
--field-y, {inputValue: field_y},
--field-z, {inputValue: field_z},
]
fileOutputs:
output_model_uri: /schema.txt
'''
"""
Explanation: Type Check with YAML components: failed scenario
Author components in YAML
End of explanation
"""
import kfp.components as comp
import kfp.dsl as dsl
import kfp.compiler as compiler
from kfp.dsl.types import InconsistentTypeException
task_factory_a = comp.load_component_from_text(text=component_a)
task_factory_b = comp.load_component_from_text(text=component_b)
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_b',
description='')
def pipeline_b():
a = task_factory_a(field_l=12)
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'])
try:
compiler.Compiler().compile(pipeline_b, 'pipeline_b.zip', type_check=True)
except InconsistentTypeException as e:
print(e)
"""
Explanation: Author a pipeline with the above components
End of explanation
"""
# Disable the type_check
compiler.Compiler().compile(pipeline_b, 'pipeline_b.zip', type_check=False)
"""
Explanation: Author a pipeline with the above components but type checking disabled.
End of explanation
"""
from kfp.dsl import component
from kfp.dsl.types import Integer, GCSPath
from kfp.dsl import ContainerOp
# when components are defined based on the component decorator,
# the type information is annotated to the input or function returns.
# There are two ways to define the type: string or a dictionary with the openapi_schema_validator property
@component
def task_factory_a(field_l: Integer()) -> {'field_m': {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}},
'field_n': 'customized_type',
'field_o': 'Integer'
}:
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-a',
arguments = [
'--field-l', field_l,
],
file_outputs = {
'field_m': '/schema.txt',
'field_n': '/feature.txt',
'field_o': '/output.txt'
}
)
# Users can also use the core types that are pre-defined in the SDK.
# For a full list of core types, check out: https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/dsl/types.py
@component
def task_factory_b(field_x: 'customized_type',
field_y: Integer(),
field_z: {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}}) -> {'output_model_uri': 'GcsUri'}:
return ContainerOp(
name = 'operator b',
image = 'gcr.io/ml-pipeline/component-a',
command = [
'python3',
field_x,
],
arguments = [
'--field-y', field_y,
'--field-z', field_z,
],
file_outputs = {
'output_model_uri': '/schema.txt',
}
)
"""
Explanation: Type Check with decorated components: successful scenario
Author components with decorator
End of explanation
"""
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_c',
description='')
def pipeline_c():
a = task_factory_a(field_l=12)
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'])
compiler.Compiler().compile(pipeline_c, 'pipeline_c.zip', type_check=True)
"""
Explanation: Author a pipeline with the above components
End of explanation
"""
from kfp.dsl import component
from kfp.dsl.types import Integer, GCSPath
from kfp.dsl import ContainerOp
# task_factory_a outputs an input field_m with the openapi_schema_validator different
# from the task_factory_b's input field_z.
# One is gs:// and the other is gcs://
@component
def task_factory_a(field_l: Integer()) -> {'field_m': {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}},
'field_n': 'customized_type',
'field_o': 'Integer'
}:
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-a',
arguments = [
'--field-l', field_l,
],
file_outputs = {
'field_m': '/schema.txt',
'field_n': '/feature.txt',
'field_o': '/output.txt'
}
)
@component
def task_factory_b(field_x: 'customized_type',
field_y: Integer(),
field_z: {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gcs://.*$"}'}}) -> {'output_model_uri': 'GcsUri'}:
return ContainerOp(
name = 'operator b',
image = 'gcr.io/ml-pipeline/component-a',
command = [
'python3',
field_x,
],
arguments = [
'--field-y', field_y,
'--field-z', field_z,
],
file_outputs = {
'output_model_uri': '/schema.txt',
}
)
"""
Explanation: Type Check with decorated components: failure scenario
Author components with decorator
End of explanation
"""
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_d',
description='')
def pipeline_d():
a = task_factory_a(field_l=12)
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'])
try:
compiler.Compiler().compile(pipeline_d, 'pipeline_d.zip', type_check=True)
except InconsistentTypeException as e:
print(e)
"""
Explanation: Author a pipeline with the above components
End of explanation
"""
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_d',
description='')
def pipeline_d():
a = task_factory_a(field_l=12)
# For each of the arguments, authors can also ignore the types by calling ignore_type function.
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'].ignore_type())
compiler.Compiler().compile(pipeline_d, 'pipeline_d.zip', type_check=True)
"""
Explanation: Author a pipeline with the above components but ignoring types.
End of explanation
"""
from kfp.dsl import component
from kfp.dsl.types import Integer, GCSPath
from kfp.dsl import ContainerOp
# task_factory_a lacks the type information for output filed_n
# task_factory_b lacks the type information for input field_y
# When no type information is provided, it matches all types.
@component
def task_factory_a(field_l: Integer()) -> {'field_m': {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}},
'field_o': 'Integer'
}:
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-a',
arguments = [
'--field-l', field_l,
],
file_outputs = {
'field_m': '/schema.txt',
'field_n': '/feature.txt',
'field_o': '/output.txt'
}
)
@component
def task_factory_b(field_x: 'customized_type',
field_y,
field_z: {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}}) -> {'output_model_uri': 'GcsUri'}:
return ContainerOp(
name = 'operator b',
image = 'gcr.io/ml-pipeline/component-a',
command = [
'python3',
field_x,
],
arguments = [
'--field-y', field_y,
'--field-z', field_z,
],
file_outputs = {
'output_model_uri': '/schema.txt',
}
)
"""
Explanation: Type Check with missing type information
Author components(with missing types)
End of explanation
"""
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_e',
description='')
def pipeline_e():
a = task_factory_a(field_l=12)
b = task_factory_b(field_x=a.outputs['field_n'], field_y=a.outputs['field_o'], field_z=a.outputs['field_m'])
compiler.Compiler().compile(pipeline_e, 'pipeline_e.zip', type_check=True)
"""
Explanation: Author a pipeline with the above components
End of explanation
"""
#Use the component as part of the pipeline
@dsl.pipeline(name='type_check_f',
description='')
def pipeline_f():
a = task_factory_a(field_l=12)
b = task_factory_b(a.outputs['field_n'], a.outputs['field_o'], field_z=a.outputs['field_m'])
compiler.Compiler().compile(pipeline_f, 'pipeline_f.zip', type_check=True)
"""
Explanation: Type Check with both named arguments and positional arguments
End of explanation
"""
@component
def task_factory_a(field_m: {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}}, field_o: 'Integer'):
return ContainerOp(
name = 'operator a',
image = 'gcr.io/ml-pipeline/component-b',
arguments = [
'--field-l', field_m,
'--field-o', field_o,
],
)
# Pipeline input types are also checked against the component I/O types.
@dsl.pipeline(name='type_check_g',
description='')
def pipeline_g(a: {'GCSPath': {'openapi_schema_validator': '{"type": "string", "pattern": "^gs://.*$"}'}}='gs://kfp-path', b: Integer()=12):
task_factory_a(field_m=a, field_o=b)
try:
compiler.Compiler().compile(pipeline_g, 'pipeline_g.zip', type_check=True)
except InconsistentTypeException as e:
print(e)
"""
Explanation: Type Check between pipeline parameters and component parameters
End of explanation
"""
from pathlib import Path
for p in Path(".").glob("pipeline_[a-g].zip"):
p.unlink()
"""
Explanation: Clean up
End of explanation
"""
|
ealogar/curso-python
|
basic/4_Functions_classes_and_modules.ipynb
|
apache-2.0
|
def spam(): # Functions are declared with the 'def' keyword, its name, parrentheses and a colon
print "spam" # Remeber to use indentation!
spam() # Functions are executed with its name followed by parentheses
"""
Explanation: Functions
Let's declare a function
End of explanation
"""
def eggs(arg1): # Functions arguments are declared inside the parentheses
print "eggs", arg1
eggs("eggssss") # Function calls specify arguments inside parentheses
def func(arg1, arg2, arg3): # There is no limit of arguments
print "func", arg1, arg2, arg3
func("spam", "eggs", "fooo")
print func("spam", "eggs", "fooo") # By default functions return None
def my_sum(arg1, arg2):
return arg1 + arg2 # Use the return keyword to output any result
print my_sum(3, 5)
print my_sum(3.333, 5)
print my_sum("spam", "eggs") # Given that Python is a dynamic language we can reuse the same method
"""
Explanation: Let's declare a function with arguments
End of explanation
"""
def my_pow(arg1, arg2=2): # It is possible to define deault values for the arguments, always after arguments without default values
return arg1 ** arg2
print my_pow(3)
def my_func(arg1, arg2=2, arg3=3, arg4=4):
return arg1 ** arg2 + arg3 ** arg4
print my_func(3, arg3=2) # Use keyword arguments to call skip some of the arguments with default value
"""
Explanation: Let's declare a function with arguments and default values
End of explanation
"""
def my_func(arg1=1, arg2=2, *args): # This arbitrary list is a (kind-off) tuple of positional arguments
print args
return arg1 + arg2
print my_func(2, 3)
print my_func(2, 3, 5, 7)
spam = (5, 7)
print my_func(2, 3, *spam) # It is possible to unpack a tuple or list as an arbitrary list of arguments
"""
Explanation: Let's use an arbitrary arguments list
End of explanation
"""
def my_func(arg1=1, arg2=2, **kwargs): # This arbitrary 'args' list is a (kind-off) tuple of positional arguments
print kwargs
return arg1 + arg2
print my_func(2, 3)
print my_func(2, 3, param3=5, param4=7)
spam = {"param3": 5, "param4": 7}
print my_func(2, 3, **spam) # It is possible to unpack a tuple or list as an arbitrary list of arguments
"""
Explanation: The same applies for arbitrary keyword arguments
End of explanation
"""
def function_caller(f):
f()
def func_as_arg():
print 'There should be one-- and preferably only one --obvious way to do it.'
function_caller(func_as_arg) # Functions can be passed as arguments
"""
Explanation: Functions are first classed objects
End of explanation
"""
class Spam: # 'class' keyword, camel case class name and colon :
pass
spammer = Spam() # Class instantiation: spammer becomes an instance of Spam
print spammer
class Eggs(Spam): # Ancestor superclasses inside parentheses for inheritance
a_class_attr = "class_val" # Class attributes inside the body, outside class methods. Must have value
def __init__(self, attr_val): # __init__ is called in the instances initialization (not constructor)
self.attr = attr_val
def method(self, arg1, arg2=None): # Method declaration. Indented and receiving self (the instance)
print "'method' of", self
print self.attr, arg1, arg2 # Access instance attributes using self with a dot .
def second_method(self):
self.attr = 99.99
self.method("FROM 2nd") # Methos may call other methods using self with a dot .
"""
Explanation: REMEMBER:
Functions are declared with the 'def' keyword, its name, parrentheses and a colon
Specify arguments inside the parentheses
Define arguments' default values with an equal, after arguments without def val
Specify arbitrary arguments or keyword arguments with args or *kwargs
Actually only the asterisks matter, the name is up to you
Use indentation for the body of the function, typically 4 spaces per level
Functions are executed with its name followed by parentheses
Provide input arguments inside the parentheses
Provide keywords arguments specifying their name
Functions can be declared and called outside classes
Functions are first classed objects
You can pass them as arguments
Classes
Let's see how to declare custom classes
End of explanation
"""
egger = Eggs(12.345) # Provide __init__ arguments in the instantiation
print egger
print egger.attr # Retrieve instance attributes with a dot
print egger.a_class_attr # Retrieve class attributes with a dot
print Eggs.a_class_attr
egger.a_class_attr = "new value"
print egger.a_class_attr
print Eggs.a_class_attr
"""
Explanation: Still easy?
End of explanation
"""
print Eggs
"""
Explanation: Class attributes can be retrieved directly from the class
Instances only modify class attributes value locally
End of explanation
"""
egger.method("value1", "value2")
egger.second_method()
print egger.method
print Eggs.method
inst_method = egger.method
inst_method("valueA", "valueB")
"""
Explanation: Classes are objects too:
Python evaluates its declaration and instantiates a special object
This object is called each time a new class instance is created
End of explanation
"""
class Spam:
def spam_method(self):
print self.__class__ # __class__ is a special attribute containing the class of any object
print type(self)
spammer = Spam()
spammer.spam_method()
print spammer
print type(spammer)
# Why type says it is an 'instance' and not a 'Spam'?
class Spam(object): # Inherit from 'object'
def spam_method(self):
print self.__class__
print type(self)
spammer = Spam()
print spammer
print type(spammer) # This is a new-style class
"""
Explanation: Methods are also attributes (bounded) of classes and instances
Time to talk about new-style classes
End of explanation
"""
class OldStyleClass():
pass
old_inst = OldStyleClass()
print type(old_inst)
# Let's inherit from an old-style class
class NewStyleSubClass(OldStyleClass, object): # Multiple inheritance
pass
new_inst = NewStyleSubClass()
print type(new_inst)
"""
Explanation: New-style classes were introduced in Python 2.2 to unify classes and types
Provide unified object model with a full meta-model (more in the Advanced block)
Other benefits: subclass most built-in types, descriptors (slots, properties, static and class methods)...
By default all classes are old-style until Python 3
In Python 2 you have to inherit from 'object' to use new-style
You must avoid old-style
So you must inherit ALWAYS from 'object'
Other changes introduced Python 2.2: new, new dir() behavior, metaclasses, new MRO (also in 2.3)
More info: http://www.python.org/doc/newstyle/
End of explanation
"""
class Spam(object):
spam_class_attr = "spam" # Class attributes must have value always (you may use None...)
def spam_method(self):
print "spam_method", self, self.spam_class_attr
print self.__class__
class Eggs(object):
eggs_class_attr = "eggs"
def eggs_method(self):
print "eggs_method", self, self.eggs_class_attr
print self.__class__
class Fooo(Spam, Eggs): # Specify a list of ancestor superclasses
fooo_class_attr = "fooo"
def fooo_method(self):
self.spam_method()
self.eggs_method() # Retrieve superclasses attributes as if they were yours
print "fooo_method", self, self.fooo_class_attr
print self.__class__
foooer = Fooo()
foooer.fooo_method()
foooer.spam_method()
foooer.eggs_method() # self is ALWAYS an instance of the subclass
print foooer.spam_class_attr
print foooer.eggs_class_attr
print foooer.fooo_class_attr # We have access to all own and ancestors' attributes
# Given that Python is a dynamic language...
class Spam(object):
pass
spammer = Spam()
spammer.name = "John"
spammer.surname = "Doe"
spammer.age = 65
spammer.male = True # ... this is legal
print spammer.name
print spammer.surname
print spammer.age
print spammer.male
"""
Explanation: Inherit from both old-style classes and 'object' to obtain new-style classes
Let's play a bit with inheritance
End of explanation
"""
class Spam(object):
def method(self, arg=None):
print "Called 'method' with", self, arg
@classmethod # This is a decorator
def cls_method(cls, arg=None):
print "Called 'cls_method' with", cls, arg
@staticmethod # This is another decorator
def st_method(arg=None):
print "Called 'st_method' with", arg
spammer = Spam()
spammer.method(10)
Spam.method(spammer, 100) # Although it works, this is not exacty the same
print spammer.method
print Spam.method # It is unbounded, not related with an instance
spammer.cls_method(20)
Spam.cls_method(200)
print spammer.cls_method
print Spam.cls_method # Both are a bounded method... to the class
spammer.st_method(30)
Spam.st_method(300)
print spammer.st_method
print Spam.st_method # Both are a plain standard functions
"""
Explanation: What about static or class methods?
End of explanation
"""
print "'__name__' value:", __name__
"""
Explanation: REMEMBER:
Classes are declared with the 'class' keyword, its name in camel case and a colon
Specify ancestors superclasses list between parrentheses after the class name
So you must inherit ALWAYS from 'object' to have new-style classes
Use indentation for class body declarations (attributes and methods)
Specify class attributes (with value) inside the class, outside any method
Specify methods inside the body, with indentation (method body has 2x indentation)
Method's first parameter is always self, the instance whose method is being called
Use self to access attributes and other methods of the instance
When inheriting, ancestors attributes and methods can be accessed transparently
There are no private attributes in Python
There is a convention to use underscore _ prefix
Classes definition is not closed. At any time you can add (or delete) an attribute
classmethod to specify class methods; bounded to the class, not its instances
Used to implement alternative constructors (e.g. dict.copy)
staticmethod to specify static methods; standard functions declared inside the class
Only for organisation, it is equivalent to declare the function in the class module
Modules
What is it a python module?
A module is a file containing Python definitions and statements.
Python interpreter reads the file and evaluates its definitions and statements.
Python does not accept dashes - in modules names
End of explanation
"""
def func():
print "Called func in", __name__
print "'func.__module__' value:", func.__module__
!cat my_modules.py
!python my_modules.py
import my_modules
"""
Explanation: The file name is the module name with the suffix .py appended.
Global variable 'name' contains the name of current module.
Functions and classes also have a variable containing their module name
End of explanation
"""
# What will it happen if we import the module again?
import my_modules
### All code is evaluated (executed) only once the first time it is imported
func()
my_modules.func()
from my_modules import func
func()
func()
!rm -rf basic_tmp
!mkdir basic_tmp
!echo 'print "This is the __init__.py", __name__\n' > basic_tmp/__init__.py
!cp my_modules.py basic_tmp
!python -c "import basic_tmp.my_modules"
"""
Explanation: The module name depends on how the module is being evaluated (imported or executed)
Use if name == "main": to detect when a module (script) is imported or executed
End of explanation
"""
!python -c "from basic_tmp.my_modules import func;func();print my_modules"
!python -c "from basic_tmp.my_modules import func as the_module;the_module();print the_module.__name__"
"""
Explanation: Packages are folders with a init.py file
This init.py is also evaluated, so it may contain code
The init.py is actually the package (check its module name)
The module name depends on the packages path
End of explanation
"""
!rm -rf basic_tmp
!mkdir basic_tmp
!echo 'print "This is the __init__.py", __name__\n' > basic_tmp/__init__.py
!cp my_modules.py basic_tmp
!echo 'from my_modules import func\n' > basic_tmp/__init__.py
!python -c "from basic_tmp import func;func()"
"""
Explanation: LESSONS LEARNT:
- Modules are objects too, and their variables, functions and classes are their attributes
- Modules can be imported in different ways:
- import packages.path.to.module.module_name
- from packages.path.to.module import module_name_1, module_name_2
- from packages.path.to.module import (module_name_1, module_name_2,
module_name_3, module_name_4)
- from packages.path.to.module import module_name as new_module_name
- You are binding the module to another name, like you do with lists or strings
- Modules is indepent on how you call (bind) them when importing
End of explanation
"""
|
seap-udea/interstellar
|
Figures.ipynb
|
gpl-3.0
|
#Constants
AU=1.465e8
LY=9.4608e12
data=np.loadtxt("cloud-nomult.data")
datan=np.loadtxt("cloud-many.data")
data=np.loadtxt("cloud-many.data")
data=np.loadtxt("cloud.data")
#Elements
qs=data[1:,51]
es=data[1:,52]
if verbose:print("Means: q:",qs.mean()/AU,", e:",es.mean())
if verbose:print("Dispersion: q:",qs.std()/AU,", e:",es.std())
fig=plt.figure()
ax=fig.gca()
ax.plot(qs/AU,es,'ko',ms=1)
#Terminal J2000 coordinates
RAs=data[:,21]
DECs=data[:,22]
if verbose:print("Means: RA:",15*RAs.mean(),", DEC:",DECs.mean())
if verbose:print("Dispersion: q:",15*RAs.std(),", e:",DECs.std())
fig=plt.figure()
ax=fig.gca()
ax.plot(15*RAs,DECs,'ko',ms=1)
ax.set_xlabel('RA (deg)')
ax.set_ylabel('DEC (deg)')
#Future J2000 coordinates
RAs=data[:,46]
DECs=data[:,47]
fig=plt.figure()
ax=fig.gca()
ax.plot(RAs,DECs,'ko',ms=1)
#Terminal positions
xts=data[:,3]
yts=data[:,4]
zts=data[:,5]
rts=np.sqrt(xts**2+yts**2+zts**2)
#Dispersion
if verbose:print("Dispersion in AU:",xts.std()/AU,yts.std()/AU,zts.std()/AU)
disp=zts.std()/rts.mean()
if verbose:print("Percentual dispersion:",disp)
fig=plt.figure()
ax=fig.gca()
ax.plot(xts,yts,'ko',ms=1)
#Future positions
xfs=data[:,34]
yfs=data[:,35]
zfs=data[:,36]
rfs=np.sqrt(xfs**2+yfs**2+zfs**2)
#Dispersion
if verbose:print("Dispersion in ly:",xfs.std()/LY,yfs.std()/LY,zfs.std()/LY)
disp=zfs.std()/rfs.mean()
if verbose:print("Percentual dispersion:",disp)
fig=plt.figure()
ax=fig.gca()
ax.plot(xfs,yfs,'ko',ms=1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
rt=rts.mean()
xns=xts/rt
yns=yts/rt
zns=zts/rt
ax.plot(xns,yns,zns,'ko',ms=1)
#Elements
qs=data[:,51]/AU
es=data[:,52]
ies=data[:,53]
Ws=data[:,54]
fig=plt.figure()
ax=fig.gca()
ax.plot(es,Ws,'ko',ms=1)
#Covariance matrix
means=[1.197188708990351,.2544567273446408,2458005.972892582579,24.60295925659798,241.5429238264925,122.604016492383]
covariance=[
[2.858709169167452E-6,1.098820139532213E-6,2.140740994127999E-5,-4.629574614074441E-6,.0001980724106465366,.0001029927307342494],
[1.098820139532213E-6,4.223650568138116E-7,8.228257068002674E-6,-1.779505280075431E-6,7.613474148207401E-5,3.958804146112113E-5],
[2.140740994127999E-5,8.228257068002674E-6,.0001603227078400257,-3.466805081263181E-5,.001483242149313819,.0007712542878842905],
[-4.629574614074441E-6,-1.779505280075431E-6,-3.466805081263181E-5,7.497524956627533E-6,-.0003207714690047103,-.000166792920108298],
[.0001980724106465366,7.613474148207401E-5,.001483242149313819,-.0003207714690047103,.01372394843890766,.007136100317461406],
[.0001029927307342494,3.958804146112113E-5,.0007712542878842905,-.000166792920108298,.007136100317461406,.003710639376110803],
]
covariance=np.array(covariance)
dcovariance=np.zeros_like(covariance)
for i in range(6):
dcovariance[i,i]=covariance[i,i]
values=np.random.multivariate_normal(means,dcovariance,1000)
if verbose:print(values.shape)
fig=plt.figure()
ax=fig.gca()
ax.plot(values[:,0],values[:,1],'ko',ms=1)
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
ax.plot(values[:,0],values[:,1],values[:,2],'ko',ms=1)
"""
Explanation: Data map:
0:i
1:tdb (terminal)
2:tdb (future)
3-8:Position Ecliptic J2000
9-14:Position J2000
15-20:Position Galactic J2000
21:RA(h) (terminal)
22:DEC(deg)
23:l(deg)
24:b(deg)
25:d(AU)
26-33:Asymptotic elements, q,e,i,W,w,Mo,to,mu
34-39:Future Position Ecliptic J2000
40-45:Future Position Galactic
46:RA(h) (future)
47:DEC(deg)
48:l(deg)
49:b(deg)
50:d(pc)
51-58:Initial elements, q,e,i,W,w,Mo,to,mu
End of explanation
"""
C=3E5 #km/s
YEAR=365.25*86400 #s
LY=C*YEAR #km
PARSEC=3.2616*LY #km
if verbose:print("1 parsec = %e km"%PARSEC)
#Units
PPY=PARSEC/YEAR
if verbose:print("1 pc/year = %e km/s"%PPY)
#Definitions of lines
p1=np.array([0,0,0]);d1=np.array([0,0,1])
p2=np.array([1,0,0]);d2=np.array([1,0,1])
#Actual data
#Our object:
p1=np.array([5.78971383586522936e+00,1.14501432490714183e+01,3.89758448167880145e+00])
d1=np.array([-1.13203438793190063e+01,-2.23879147871595947e+01,-7.62075215522603067e+00])/PPY
#Star data ()
p2=np.array([-1.39195331002988240e+02, 1.26930155523994657e+01, -1.54372105428406201e+02])
d2=np.array([-10.23491, -16.60858, -34.30936])/PPY
#Closest star
p2=np.array([107.91891000000001, -428.38295, -234.92487000000003])
d2=np.array([-1.00043, -5.59274, -2.40552])/PPY
#Similar
p2=np.array([147.671, 79.1533, -178.551])
d2=np.array([-9.56911, -19.8781, -7.687880000000001])/PPY
#Closer
#-4.19605, 10.7816, -7.46557, -12.4072, -20.0619, -9.6609
p2=np.array([-4.19605, 10.7816, -7.46557])
d2=np.array([-12.4072, -20.0619, -9.6609])/PPY
#Problematic
p2=np.array([-4.33408e+01, 4.66358e+00, -3.74980e+02])
d2=np.array([-9.65888e+00, -7.97706e+00, -4.87376e+01])/PPY
#Star data (Star 0):
p2=np.array([-3.16028220972354973e+02, 1.84156158702263504e+01, -3.58527081068144980e+02])
d2=np.array([13.37458, -17.29674, -15.42706])/PPY
#New candidates
p2=np.array([-20.185400000000001,-2.9136900000000003,16.673200000000001])
d2=np.array([-45.270899999999997,-39.641500000000001,8.8886599999999998])/PPY
#High velocity star
p2=np.array([-22.1313, 5.2090100000000001, 14.912699999999999])
d2=np.array([-16.180199999999999, -22.9924, -5.5882699999999996])/PPY
UT=1e7
data.iloc[30]["hip"]
GAIA[GAIA["hip"]==40170.0]
#Test if lines are skewed
a=p1+d1;b=p1+2*d1
c=p2+d2;d=p2+2*d2
VM=np.vstack((a-b,b-c,c-d))
detVM=np.linalg.det(VM)
if verbose:print(detVM)
#Distance
n=np.cross(d1,d2)
d=np.dot(n,(p1-p2))/np.linalg.norm(n)
if verbose:print("Distance between lines:",d)
#Nearest points
n=np.cross(d1,d2)
n1=np.cross(d1,n)
n2=np.cross(d2,n)
c1=p1+np.dot((p2-p1),n2)/np.dot(d1,n2)*d1
c2=p2+np.dot((p1-p2),n1)/np.dot(d2,n1)*d2
if verbose:print("Nearest point in line 1:",c1)
if verbose:print("Nearest point in line 2:",c2)
#Compute time of encounter
print(c2[0],p2[0],d2[0])
t=(c1[0]-p1[0])/d1[0]
if verbose:print("Encounter time:",t)
t=(c2[0]-p2[0])/d2[0]
if verbose:print("Encounter time:",t)
#Time of minimum distance
dp=p1-p2
dv=d1-d2
dvmag=np.linalg.norm(dv)
tsimin=-np.dot(dp,dv)/(dvmag*dvmag)
print(tsimin)
dsimin=np.linalg.norm(dp+dv*tsimin)
print(dsimin)
dp,dv,d1,d2
ts=np.linspace(-10,10,10000)
r1s=np.array([p1+d1*t for t in ts])
r2s=np.array([p2+d2*t for t in ts])
ds=np.array([np.linalg.norm(r1s[i]-r2s[i]) for i in range(len(ts))])
imin=ds.argmin()
print(ts[imin]/1e6,ds[imin])
#Plot
ts=np.linspace(-10.0*UT,10.0*UT,100)
r1s=np.array([p1+d1*t for t in ts])
r2s=np.array([p2+d2*t for t in ts])
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
ax.plot([0],[0],[0],'o',color='yellow',ms=10)
ax.plot(r1s[:,0],r1s[:,1],r1s[:,2],'b-') #BLUE IS BODY
ax.plot(r2s[:,0],r2s[:,1],r2s[:,2],'r-') #RED IS STAR
ax.plot([p1[0]],[p1[1]],[p1[2]],'g^',ms=10)
ax.plot([p2[0]],[p2[1]],[p2[2]],'g^',ms=10)
dt=1*UT
ax.plot([p1[0],p1[0]+dt*d1[0]],[p1[1],p1[1]+dt*d1[1]],[p1[2],p1[2]+dt*d1[2]],'g-',ms=10)
ax.plot([p2[0],p2[0]+dt*d2[0]],[p2[1],p2[1]+dt*d2[1]],[p2[2],p2[2]+dt*d2[2]],'g-',ms=10)
ax.plot([c1[0]],[c1[1]],[c1[2]],'rs',ms=10)
ax.plot([c2[0]],[c2[1]],[c2[2]],'rs',ms=10)
ax.plot([c1[0],c2[0]],[c1[1],c2[1]],[c1[2],c2[2]],'k-')
GAIA=pd.read_csv("../RVGaia/DB/RVGaia.csv")
GAIA
data=pd.read_csv('encounters.data')
data
past=data[data.tmin<0]
if verbose:print("Encounters in the past:",len(past))
past["admin"]=np.abs(past["dmin"])
past
close=past.sort_values(by=['admin'])
veryclose=close[close["admin"]<1]
if verbose:print("Really close encounters:",len(veryclose))
veryclose
verylike=veryclose.sort_values(by=['vrel'])
verylike
verylike.iloc[1].values[1:7].tolist()
GAIA.iloc[int(verylike.iloc[1][0])]
#70136
GAIA.iloc[70136]
UL=1.496e11
UM=2e30
GCONST=6.67e-11
UT=np.sqrt(UL**3/(GCONST*UM))
if verbose:print(UL/UT/1e3)
E=-GCONST*UM/(2*UL)
if verbose:print("Total orbital energy:",E)
UJ=GCONST*(1e-3*UM)/(7e7)
if verbose:print(np.sqrt(2*(E+UJ)))
#Slingshot
#Formulas in: http://www.mathpages.com/home/kmath114/kmath114.htm
RAD=180/np.pi
DEG=1/RAD
U=1
v1=1
qs=np.linspace(0,90,100)*DEG
v2s=(v1+2*U)*np.sqrt(1-4*U*v1*(1-np.cos(qs))/(v1+2*U)**2)
vinfs=np.sqrt(v2s**2-2*U**2)
fig=plt.figure()
ax=fig.gca()
ax.plot(qs,vinfs)
"""
Explanation: Lines intersection
End of explanation
"""
#Constants
AU=1.496e11 #m
MSUN=1.98e30 #kg
GCONST=6.67e-11 #m^3/(kg s^2)
RAD=180/np.pi
DEG=1/RAD
#Units
G=1.0
UL=1*AU
UM=1*MSUN
UT=np.sqrt(G*UL**3/(GCONST*UM))
UV=UL/UT
if verbose:print("Time unit:",UT)
#Star properties
Ms=0.5
mu=G*Ms
if verbose:print("mu:",mu)
#Planet and star properties (canonic units)
ap=2.9
Mp=1e-3
Rp=7e7/AU
if verbose:print("Planetary radius:",Rp)
RH=ap*(Mp/(3*Ms))**(1./3)
if verbose:print("Hill radius:",RH)
#Planetary orbital velocity
vp=np.sqrt(mu/ap)
if verbose:print("Orbital velocity:",vp)
#Escape velocity
vesc=np.sqrt(2*mu/ap)
if verbose:print("Escape velocity:",vesc)
#Example of slingshot
#Initial velocity of particle respect to planet
vinf1=0.3
#Incoming angle
theta=20*DEG
#Velocity w.r.t. star
vx=-vinf1*np.cos(theta)+u
vy=+vinf1*np.sin(theta)
vmag=np.sqrt(vx**+vy**2)
if verbose:print("Particle velocity w.r.t. star:",[vx,vy])
if verbose:print("Magnitude of velocity:",vmag)
#Semimajor axis using vis-viva
a=mu/vinf1
if verbose:print("Semimajor axis:",a)
#Periastron
q=0.1*RH
#Corresponding eccentricity
e=q/a+1
if verbose:print("Eccentricity:",e)
#Hiperbolic angle
phi=np.arccos(1/e)
if verbose:print("Aperture angle:",phi*RAD)
#Output angle
beta=2*phi-theta
if verbose:print("Output angle:",beta*RAD)
#Output vinf
vinf2x=vinf1*np.cos(beta)
vinf2y=vinf1*np.sin(beta)
if verbose:print("Output vector w.r.t. planet:",[vinf2x,vinf2y])
#Output velocity
vx=vinf2x+u
vy=vinf2y
if verbose:print("Output velocity w.r.t. star:",[vx,vy])
#Magnitude of output velocity
v2=vx**2+vy**2
if verbose:print("Output velocity:",np.sqrt(v2))
#Infinite velocity for the system
vinf2=v2**2-vesc**2
if vinf2<0:
if verbose:print("El cuerpo sigue ligado")
else:
vinf=np.sqrt(vinf2)
if verbose:print("Velocity body after escaping solar sytem: %e km/s"%(vinf*UV/1e3))
"""
Explanation: Semianalytic expulsion velocity distribution
End of explanation
"""
#Routines
rand=np.random.normal
verbose=0
#Basic theory: http://www.mathpages.com/home/kmath114/kmath114.htm
#Star properties
Ms=1.0
mu=G*Ms
if verbose:print("mu:",mu)
#Planet and star properties (canonic units)
ap=1
Mp=1e-3
Rp=7e7/AU
if verbose:print("Planetary radius:",Rp)
RH=ap*(Mp/(3*Ms))**(1./3)
if verbose:print("Hill radius:",RH)
#Planetary orbital velocity
vp=np.sqrt(mu/ap)
if verbose:print("Planetary orbital velocity:",vp)
#Escape velocity
vesc=np.sqrt(2*mu/ap)
if verbose:print("Planetary system velocity:",vesc)
#np.random.seed(1)
Npart=10000
n=0
vinfs=[]
while n<Npart:
if verbose:print("Test particle:",n)
#Basic elements
ab=rand(ap,0.5)
eb=rand(0.5,0.5)
if eb<0 or eb>1:continue
pb=ab*(1-eb**2)
ib=rand(2)
hop=np.sqrt(mu/pb)
if verbose:print("\tOrbital elements (a,e,p,h/p): ",ab,eb,pb,hop)
#Longitude of the ascending node
Ob=0.0
if np.random.rand()>0.5:Ob=180.0
if verbose:print("\tLongitude of the ascending note (O):",Ob)
#Argument of the periastron
if Ob==0:
coswb=(pb-ap)/eb
if np.abs(coswb)>1:continue
wb=np.arccos(coswb)*RAD
wpf=0.0
else:
coswb=(ap-pb)/eb
if np.abs(coswb)>1:continue
wb=np.arccos(coswb)*RAD
wpf=180.0
if verbose:print("\tPeriastron argument (w):",wb)
if verbose:print("\tw+f:",wpf)
#Magnitude of asteroid velocity
v=np.sqrt(2*mu/ap-mu/ab)
if verbose:print("\tMagnitude of asteroid velocity w.r.t. Sun:",v)
#Components of asteroid velocity
#xdot=-hop*eb*np.cos(Ob*DEG)*(np.sin(wpf*DEG)+eb*np.sin(wb*DEG))
xdot=-hop*eb*np.cos(Ob*DEG)*np.sin(wb*DEG)
ydot=+hop*np.cos(Ob*DEG)*np.cos(ib*DEG)*(np.cos(wpf*DEG)+eb*np.cos(wb*DEG))
zdot=+hop*np.sin(ib*DEG)*(np.cos(wpf*DEG)+eb*np.cos(wb*DEG))
#Magnitude
if verbose:print("\tHeliocentric velocity :",xdot,ydot,zdot," (%lf)"%np.sqrt(xdot**2+ydot**2+zdot**2))
#if verbose:print("Magnitude of asteroid velocity w.r.t. Sun:",np.sqrt(xdot**2+ydot**2+zdot**2))
#Relative velocity
xdotrel=xdot-0
ydotrel=ydot-vp
zdotrel=zdot-0
if verbose:print("\tRelative velocity :",xdotrel,ydotrel,zdotrel)
vinf2=xdotrel**2+ydotrel**2+zdotrel**2
vinf=np.sqrt(vinf2)
rhorel=np.sqrt(xdotrel**2+ydotrel**2+zdotrel**2)
if verbose:print("\tRelative velocity:",vinf)
#Incident angle
theta=np.abs(np.arccos((-ydotrel*vp)/(vinf*vp)))*RAD
if verbose:print("\tIncident angle (cos^-1 (vb.vp)/(vp vp)):",theta)
#Impact parameter
q=(0.5*RH-Rp)*np.random.rand()+Rp
#q=2.5*Rp
if verbose:print("\tImpact parameter:",q)
#Semimajor axis and eccentricity
ainf=1/vinf
einf=q/ainf+1
phi=np.arccos(1/einf)*RAD
if verbose:print("\tAsymptotic a,e,phi:",ainf,einf,phi)
#Output angle
beta=2*phi-theta
if verbose:print("\tOutput angle:",beta)
#Output vinf
vinf2y=vinf*np.cos(beta)
vinf2x=vinf*np.sin(beta)
if verbose:print("\tOutput vector w.r.t. planet:",[vinf2x,vinf2y])
#Output velocity
vy=vinf2y+vp
vx=vinf2x
if verbose:print("\tOutput velocity w.r.t. star:",[vx,vy])
#Magnitude of output velocity
vout=np.sqrt(vx**2+vy**2)
if verbose:print("\tOutput velocity:",vout)
#Infinite velocity for the system
vinf2=vout**2-vesc**2
if vinf2<0:
if verbose:print("\tEl cuerpo sigue ligado")
continue
else:
vinf=np.sqrt(vinf2)
vinfs+=[vinf*UV/1e3]
if verbose:print("\tVelocity body after escaping solar sytem: %e km/s"%(vinf*UV/1e3))
if verbose:print
n+=1
vinfs=np.array(vinfs)
fig=plt.figure()
ax=fig.gca()
ax.hist(vinfs,50)
ax.set_xlabel("v (km/s)")
print("Average velocity: %lf+/-%lf"%(vinfs.mean(),vinfs.std()))
data=np.loadtxt("ejected_orbital_parameters.dat")
fig=plt.figure()
ax=fig.gca()
vinfs=data[:,13]
vinfs=vinfs[vinfs<100]
ax.hist(vinfs)
import astropy
import astropy.coordinates as coord
import astropy.units as u
c1 = coord.ICRS(ra=45.1128*u.degree, dec=0.380844*u.degree,
distance=(2.09081*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=-1.57293*np.cos(0.380844*np.pi/180)*u.mas/u.yr,
pm_dec=-11.6616*u.mas/u.yr,
radial_velocity=2.061*u.km/u.s)
c1.transform_to(coord.Galactic)
gc1 = c1.transform_to(coord.Galactocentric)
print(gc1)
GAIA.iloc[0]
coord.ICRS?
data=pd.read_csv('candidates.csv')
data.sort_values(by=['vrel'])[["tmin","dmin"]]
data.iloc[30].values
data=pd.read_csv('encounters.csv')
data
rhosun=0.1*MSUN/(1e3*PARSEC)**3
z=27*PARSEC*1E3
dphidz=4*np.pi*GCONST*rhosun*z
print(dphidz)
"""
Explanation: Monte Carlo escaping velocity distribution
End of explanation
"""
UL=1*PARSEC*1E3
UM=1*MSUN
UT=1*YEAR
UV=UL/UT
print(UM,UL/1e16,UT)
G=GCONST/(UL**3/(UM*UT**2))
if verbose:print("G=",G)
print("dphidz = %e m/s^2"%dphidz)
print("dphidz = %e UL/UT^2"%(dphidz/(UL/UT**2)))
"""
Explanation: Units of gradient of phi: UL^3/(UM UT^2) * UM / UL^3 * UL = UL/UT^2
End of explanation
"""
data=pd.read_csv("cloud-int.csv")
data["t"]
fig=plt.figure()
ax=fig.gca()
t=data["t"]
Rs=data["part0-R"]
phis=data["part0-phi"]
zs=data["part0-Z"]
ax.plot(t,Rs)
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
xs=data["part0-x"].values
ys=data["part0-y"].values
zs=data["part0-z"].values
ax.plot(xs,ys,zs)
xs=data["part5-x"].values
ys=data["part5-y"].values
zs=data["part5-z"].values
ax.plot(xs,ys,zs)
rmax=max(np.abs(xs).max(),np.abs(ys).max())
ax.set_xlim((-rmax,rmax))
ax.set_ylim((-rmax,rmax))
ax.set_zlim((-1e2,1e2))
"""
Explanation: Integration results
End of explanation
"""
#Segment
p1=np.array([0,0,0])
p2=np.array([1,0,0])
#Point
p=np.array([1.5,0.5,0])
#Segment
p1=np.array([-8.19403e+03,-1.17627e+03,2.94740e+01])
p2=np.array([-8.19038e+03,-1.19769e+03,2.93844e+01])
#Point
p=np.array([-8.19157e+03,-1.19456e+03,2.79498e+01])
#Position
dp=np.dot((p-p1),(p2-p1))/np.linalg.norm(p2-p1)**2*(p2-p1)
dm=np.linalg.norm((p-p1)-dp)
#Time
dt=dp[0]/(p2[0]-p1[0])
print("Length of segment:",np.linalg.norm(p2-p1))
print("Position projection point in segment:",dp)
print("Distance from segment:",dm)
print("dt:",dt)
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
ax.plot([p1[0],p2[0]],[p1[1],p2[1]],[p1[2],p2[2]])
ax.plot([p[0]],[p[1]],[p[2]],'ko')
data=pd.read_csv("encounters.csv")
tmins=np.abs(data.tmin[data.tmin<0])/1e6
fig=plt.figure()
ax=fig.gca()
ax.hist(tmins,100)
ax.set_xlim((0,100))
pot=pd.read_csv('potential.csv')
pot.sort_values("dynvrel")
1e-3/3600*180/np.pi
data=pd.read_csv("potential.csv")
data.sort_values(by="dyndmin")[["hip","dyndmin","vrel"]]
a=1*((86400*2e30**2)/(2*np.pi*6.67e-11))**(1./3)/(2e30/1e3)
a*25.5*3.971**(1./3)*1.1**(2./3)*318
print(a*318)
fig=plt.figure()
ax=fig.gca()
ies=np.linspace(0.0,90.0,100)
ax.plot(ies,np.sqrt(1-np.sin(ies*np.pi/180)**2))
(10/1e2)**(1./6)*0.1**(1./6)
"""
Explanation: Distance from point to interval
End of explanation
"""
#Star properties
Ms=1.0
mu=G*Ms
if verbose:print("mu:",mu)
#Planet and star properties (canonic units)
ap=1
Mp=1e-3
Rp=7e7/AU
if verbose:print("Planetary radius:",Rp)
RH=ap*(Mp/(3*Ms))**(1./3)
if verbose:print("Hill radius:",RH)
#Planetary orbital velocity
vp=np.sqrt(mu/ap)
if verbose:print("Planetary orbital velocity:",vp)
#Escape velocity
vesc=np.sqrt(2*mu/ap)
if verbose:print("Planetary system velocity:",vesc)
"""
Explanation: Semianalytic expulsion velocities following Wiegert, 2014
Paper: https://arxiv.org/pdf/1404.2159.pdf
End of explanation
"""
data=np.loadtxt("ejection.data")
aps=np.unique(data[:,0]);na=len(aps)
Mps=np.unique(data[:,1]);nM=len(Mps)
print("Number of data points: N(a) = %d, N(Mp) = %d"%(na,nM))
print("Values of ap:",aps)
print("Values of Mp:",Mps)
print(vmean_data.shape)
vmean_data=data[:,2].reshape(na,nM)
vstd_data=data[:,3].reshape(na,nM)
fig=plt.figure()
ax=fig.gca()
for i,M in enumerate(Mps):
ax.plot(aps,vmean_data[:,i],label='Mp = %.1e'%Mps[i])
ax.legend(loc='best')
ax.set_xlabel('a(UL)')
ax.set_ylabel('v(UL/UT)')
ax.set_xlim((0,5))
fig=plt.figure()
ax=fig.gca()
for i,M in enumerate(Mps):
ax.plot(aps,vstd_data[:,i],label='Mp = %.1e'%Mps[i])
ax.legend(loc='best')
ax.set_xlabel('a(AU)')
ax.set_ylabel('$\Delta v$(UL/UT)')
fig=plt.figure()
ax=fig.gca()
for i,M in enumerate(Mps):
ax.plot(aps,vstd_data[:,i]/vmean_data[:,i],label='Mp = %.1e'%Mps[i])
ax.legend(loc='best')
ax.set_xlabel('a(UL)')
ax.set_ylabel('$\Delta v/v$')
vinfs=np.loadtxt("vinfs.data")
def maxwelliana(x,a):
return np.exp(-(x/a)**2)
fig=plt.figure()
ax=fig.gca()
sol=ax.hist(vinfs,20)
hs=sol[0];vs=sol[1]
vm=(vs[1:]+vs[:-1])/2
ax.plot(vm,hs.sum()*(vm[1]-vm[0])*gaussian.pdf(vm,0.198,0.114),'r-')
ax.plot(vm,15000*vm**2*maxwelliana(vm,0.15),'b-')
ax.set_xlabel("$v_\infty$(UL/UT)")
MPS,APS=np.meshgrid(Mps,aps)
fig=plt.figure()
ax=fig.gca()
c=ax.contourf(MPS,APS,vmean_data,100,cmap='spectral')
cb=fig.colorbar(c)
cb.set_label("$v_\infty$ (UL/UT)")
c=ax.contour(MPS,APS,vmean_data)
ax.set_xlabel("$M_p$ (UM)")
ax.set_ylabel("$a$ (UL)")
ax.set_xscale("log")
"""
Explanation: Ejection Model
End of explanation
"""
|
tensorflow/federated
|
docs/tutorials/random_noise_generation.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2021 The TensorFlow Federated Authors.
End of explanation
"""
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
"""
Explanation: Random noise generation in TFF
This tutorial will discuss the recommended best practices for random noise generation in TFF. Random noise generation is an important component of many privacy protection techniques in federated learning algorithms, e.g., differential privacy.
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/federated/tutorials/random_noise_generation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/federated/blob/v0.27.0/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/federated/blob/v0.27.0/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/federated/docs/tutorials/random_noise_generation.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Before we begin
First, let us make sure the notebook is connected to a backend that has the relevant components compiled.
End of explanation
"""
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
"""
Explanation: Run the following "Hello World"
example to make sure the TFF environment is correctly setup. If it doesn't work,
please refer to the Installation guide for instructions.
End of explanation
"""
# Set to use 10 clients.
tff.backends.native.set_local_python_execution_context(default_num_clients=10)
@tff.tf_computation
def noise_from_seed(seed):
return tf.random.stateless_normal((), seed=seed)
seed_type_at_server = tff.type_at_server(tff.to_type((tf.int64, [2])))
@tff.federated_computation(seed_type_at_server)
def get_random_min_and_max_deterministic(seed):
# Broadcast seed to all clients.
seed_on_clients = tff.federated_broadcast(seed)
# Clients generate noise from seed deterministicly.
noise_on_clients = tff.federated_map(noise_from_seed, seed_on_clients)
# Aggregate and return the min and max of the values generated on clients.
min = tff.aggregators.federated_min(noise_on_clients)
max = tff.aggregators.federated_max(noise_on_clients)
return min, max
seed = tf.constant([1, 1], dtype=tf.int64)
min, max = get_random_min_and_max_deterministic(seed)
assert min == max
print(f'Seed: {seed.numpy()}. All clients sampled value {min:8.3f}.')
seed += 1
min, max = get_random_min_and_max_deterministic(seed)
assert min == max
print(f'Seed: {seed.numpy()}. All clients sampled value {min:8.3f}.')
"""
Explanation: Random noise on clients
The need for noise on clients generally falls into two cases: identical noise and i.i.d. noise.
For identical noise, the recommended pattern is to maintain a seed on the server, broadcast it to clients, and use the tf.random.stateless
functions to generate noise.
For i.i.d. noise, use a tf.random.Generator initialized on the client with from_non_deterministic_state, in keeping with TF's recommendation to avoid the tf.random.\<distribution> functions.
Client behavior is different from server (doesn't suffer from the pitfalls discussed later) because each client will build their own computation graph and initialize their own default seed.
Identical noise on clients
End of explanation
"""
@tff.tf_computation
def nondeterministic_noise():
gen = tf.random.Generator.from_non_deterministic_state()
return gen.normal(())
@tff.federated_computation
def get_random_min_and_max_nondeterministic():
noise_on_clients = tff.federated_eval(nondeterministic_noise, tff.CLIENTS)
min = tff.aggregators.federated_min(noise_on_clients)
max = tff.aggregators.federated_max(noise_on_clients)
return min, max
min, max = get_random_min_and_max_nondeterministic()
assert min != max
print(f'Values differ across clients. {min:8.3f},{max:8.3f}.')
new_min, new_max = get_random_min_and_max_nondeterministic()
assert new_min != new_max
assert new_min != min and new_max != max
print(f'Values differ across rounds. {new_min:8.3f},{new_max:8.3f}.')
"""
Explanation: Independent noise on clients
End of explanation
"""
def _keras_model():
inputs = tf.keras.Input(shape=(1,))
outputs = tf.keras.layers.Dense(1)(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)
@tff.tf_computation
def tff_return_model_init():
model = _keras_model()
# return the initialized single weight value of the dense layer
return tf.reshape(
tff.learning.ModelWeights.from_model(model).trainable[0], [-1])[0]
@tff.federated_computation
def get_random_min_and_max_nondeterministic():
noise_on_clients = tff.federated_eval(tff_return_model_init, tff.CLIENTS)
min = tff.aggregators.federated_min(noise_on_clients)
max = tff.aggregators.federated_max(noise_on_clients)
return min, max
min, max = get_random_min_and_max_nondeterministic()
assert min != max
print(f'Values differ across clients. {min:8.3f},{max:8.3f}.')
new_min, new_max = get_random_min_and_max_nondeterministic()
assert new_min != new_max
assert new_min != min and new_max != max
print(f'Values differ across rounds. {new_min:8.3f},{new_max:8.3f}.')
"""
Explanation: Model initializer on clients
End of explanation
"""
tf.random.set_seed(1)
@tf.function
def return_one_noise(_):
return tf.random.normal([])
n1=return_one_noise(1)
n2=return_one_noise(2)
assert n1 == n2
print(n1.numpy(), n2.numpy())
"""
Explanation: Random noise on the server
Discouraged usage: directly using tf.random.normal
TF1.x like APIs tf.random.normal for random noise generation are strongly discouraged in TF2 according to the random noise generation tutorial in TF. Surprising behavior may happen when these APIs are used together with tf.function and tf.random.set_seed. For example, the following code will generate the same value with each call. This surprising behavior is expected for TF, and explanation can be found in the documentation of tf.random.set_seed.
End of explanation
"""
tf.random.set_seed(1)
@tff.tf_computation
def return_one_noise(_):
return tf.random.normal([])
n1=return_one_noise(1)
n2=return_one_noise(2)
assert n1 != n2
print(n1, n2)
"""
Explanation: In TFF, things are slightly different. If we wrap the noise generation as tff.tf_computation instead of tf.function, non-deterministic random noise will be generated. However, if we run this code snippet multiple times, different set of (n1, n2) will be generated each time. There is no easy way to set a global random seed for TFF.
End of explanation
"""
@tff.tf_computation
def tff_return_one_noise(i):
g=tf.random.Generator.from_seed(i)
@tf.function
def tf_return_one_noise():
return g.normal([])
return tf_return_one_noise()
@tff.federated_computation
def return_two_noise():
return (tff_return_one_noise(1), tff_return_one_noise(2))
n1, n2 = return_two_noise()
assert n1 != n2
print(n1, n2)
"""
Explanation: Moreover, deterministic noise can be generated in TFF without explicitly setting a seed. The function return_two_noise in the following code snippet returns two identical noise values. This is expected behavior because TFF will build computation graph in advance before execution. However, this suggests users have to pay attention on the usage of tf.random.normal in TFF.
Usage with care: tf.random.Generator
We can use tf.random.Generator as suggested in the TF tutorial.
End of explanation
"""
@tff.tf_computation
def tff_return_one_noise(i):
g=tf.random.Generator.from_seed(i)
weights = [
tf.ones([2, 2], dtype=tf.float32),
tf.constant([2], dtype=tf.float32)
]
@tf.function
def tf_return_one_noise():
return tf.nest.map_structure(lambda x: g.normal(tf.shape(x)), weights)
return tf_return_one_noise()
@tff.federated_computation
def return_two_noise():
return (tff_return_one_noise(1), tff_return_one_noise(2))
n1, n2 = return_two_noise()
assert n1[1] != n2[1]
print('n1', n1)
print('n2', n2)
"""
Explanation: However, users may have to be careful on its usage
tf.random.Generator uses tf.Variable to maintain the states for RNG algorithms. In TFF, it is recommended to contruct the generator inside a tff.tf_computation; and it is difficult to pass the generator and its state between tff.tf_computation functions.
the previous code snippet also relies on carefully setting seeds in generators. We may get expected but surprising results (deterministic n1==n2) if we use tf.random.Generator.from_non_deterministic_state() instead.
In general, TFF prefers functional operations and we will showcase the usage of tf.random.stateless_* functions in the following sections.
In TFF for federated learning, we often work with nested structures instead of scalars and the previous code snippet can be naturally extended to nested structures.
End of explanation
"""
def timestamp_seed():
# tf.timestamp returns microseconds as decimal places, thus scaling by 1e6.
return tf.cast(tf.timestamp() * 1e6, tf.int64)
class RandomSeedGenerator():
def initialize(self, seed=None):
if seed is None:
return tf.stack([timestamp_seed(), 0])
else:
return tf.constant(self.seed, dtype=tf.int64, shape=(2,))
def next(self, state):
return state + tf.constant([0, 1], tf.int64)
def structure_next(self, state, nest_structure):
"Returns seed in nested structure and the next state seed."
flat_structure = tf.nest.flatten(nest_structure)
flat_seeds = [state + tf.constant([0, i], tf.int64) for
i in range(len(flat_structure))]
nest_seeds = tf.nest.pack_sequence_as(nest_structure, flat_seeds)
return nest_seeds, flat_seeds[-1] + tf.constant([0, 1], tf.int64)
"""
Explanation: Recommended usage: tf.random.stateless_* with a helper
A general recommendation in TFF is to use the functional tf.random.stateless_* functions for random noise generation. These functions take seed (a Tensor with shape [2] or a tuple of two scalar tensors) as an explicit input argument to generate random noise. We first define a helper class to maintain the seed as pseudo state. The helper RandomSeedGenerator has functional operators in a state-in-state-out fashion. It is reasonable to use a counter as pseudo state for tf.random.stateless_* as these functions scramble the seed before using it to make noises generated by correlated seeds statistically uncorrelated.
End of explanation
"""
@tff.tf_computation
def tff_return_one_noise(seed_state):
g=RandomSeedGenerator()
weights = [
tf.ones([2, 2], dtype=tf.float32),
tf.constant([2], dtype=tf.float32)
]
@tf.function
def tf_return_one_noise():
nest_seeds, updated_state = g.structure_next(seed_state, weights)
nest_noise = tf.nest.map_structure(lambda x,s: tf.random.stateless_normal(
shape=tf.shape(x), seed=s), weights, nest_seeds)
return nest_noise, updated_state
return tf_return_one_noise()
@tff.tf_computation
def tff_init_state():
g=RandomSeedGenerator()
return g.initialize()
@tff.federated_computation
def return_two_noise():
seed_state = tff_init_state()
n1, seed_state = tff_return_one_noise(seed_state)
n2, seed_state = tff_return_one_noise(seed_state)
return (n1, n2)
n1, n2 = return_two_noise()
assert n1[1] != n2[1]
print('n1', n1)
print('n2', n2)
"""
Explanation: Now let us use the helper class and tf.random.stateless_normal to generate (nested structure of) random noise in TFF. The following code snippet looks a lot like a TFF iterative process, see simple_fedavg as an example of expressing federated learning algorithm as TFF iterative process. The pseudo seed state here for random noise generation is tf.Tensor that can be easily transported in TFF and TF functions.
End of explanation
"""
|
kingb12/languagemodelRNN
|
report_templates/EncDecReportTemplate.ipynb
|
mit
|
report_file = 'reports/encdec_200_512_2.json'
log_file = 'logs/encdec_200_512_logs.json'
import json
import matplotlib.pyplot as plt
with open(report_file) as f:
report = json.loads(f.read())
with open(log_file) as f:
logs = json.loads(f.read())
print'Encoder: \n\n', report['architecture']['encoder']
print'Decoder: \n\n', report['architecture']['decoder']
"""
Explanation: Encoder-Decoder Analysis
Model Architecture
End of explanation
"""
print('Train Perplexity: ', report['train_perplexity'])
print('Valid Perplexity: ', report['valid_perplexity'])
print('Test Perplexity: ', report['test_perplexity'])
"""
Explanation: Perplexity on Each Dataset
End of explanation
"""
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][1], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][2], label=str(k) + ' (valid)')
plt.title('Loss v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
"""
Explanation: Loss vs. Epoch
End of explanation
"""
%matplotlib inline
for k in logs.keys():
plt.plot(logs[k][0], logs[k][3], label=str(k) + ' (train)')
plt.plot(logs[k][0], logs[k][4], label=str(k) + ' (valid)')
plt.title('Perplexity v. Epoch')
plt.xlabel('Epoch')
plt.ylabel('Perplexity')
plt.legend()
plt.show()
"""
Explanation: Perplexity vs. Epoch
End of explanation
"""
def print_sample(sample, best_bleu=None):
enc_input = ' '.join([w for w in sample['encoder_input'].split(' ') if w != '<pad>'])
gold = ' '.join([w for w in sample['gold'].split(' ') if w != '<mask>'])
print('Input: '+ enc_input + '\n')
print('Gend: ' + sample['generated'] + '\n')
print('True: ' + gold + '\n')
if best_bleu is not None:
cbm = ' '.join([w for w in best_bleu['best_match'].split(' ') if w != '<mask>'])
print('Closest BLEU Match: ' + cbm + '\n')
print('Closest BLEU Score: ' + str(best_bleu['best_score']) + '\n')
print('\n')
for i, sample in enumerate(report['train_samples']):
print_sample(sample, report['best_bleu_matches_train'][i] if 'best_bleu_matches_train' in report else None)
for i, sample in enumerate(report['valid_samples']):
print_sample(sample, report['best_bleu_matches_valid'][i] if 'best_bleu_matches_valid' in report else None)
for i, sample in enumerate(report['test_samples']):
print_sample(sample, report['best_bleu_matches_test'][i] if 'best_bleu_matches_test' in report else None)
"""
Explanation: Generations
End of explanation
"""
def print_bleu(blue_struct):
print 'Overall Score: ', blue_struct['score'], '\n'
print '1-gram Score: ', blue_struct['components']['1']
print '2-gram Score: ', blue_struct['components']['2']
print '3-gram Score: ', blue_struct['components']['3']
print '4-gram Score: ', blue_struct['components']['4']
# Training Set BLEU Scores
print_bleu(report['train_bleu'])
# Validation Set BLEU Scores
print_bleu(report['valid_bleu'])
# Test Set BLEU Scores
print_bleu(report['test_bleu'])
# All Data BLEU Scores
print_bleu(report['combined_bleu'])
"""
Explanation: BLEU Analysis
End of explanation
"""
# Training Set BLEU n-pairs Scores
print_bleu(report['n_pairs_bleu_train'])
# Validation Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_valid'])
# Test Set n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_test'])
# Combined n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_all'])
# Ground Truth n-pairs BLEU Scores
print_bleu(report['n_pairs_bleu_gold'])
"""
Explanation: N-pairs BLEU Analysis
This analysis randomly samples 1000 pairs of generations/ground truths and treats them as translations, giving their BLEU score. We can expect very low scores in the ground truth and high scores can expose hyper-common generations
End of explanation
"""
print 'Average (Train) Generated Score: ', report['average_alignment_train']
print 'Average (Valid) Generated Score: ', report['average_alignment_valid']
print 'Average (Test) Generated Score: ', report['average_alignment_test']
print 'Average (All) Generated Score: ', report['average_alignment_all']
print 'Average Gold Score: ', report['average_alignment_gold']
"""
Explanation: Alignment Analysis
This analysis computs the average Smith-Waterman alignment score for generations, with the same intuition as N-pairs BLEU, in that we expect low scores in the ground truth and hyper-common generations to raise the scores
End of explanation
"""
|
robertoalotufo/ia898
|
master/dftexamples.ipynb
|
mit
|
import sys,os
%matplotlib inline
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from numpy.fft import fft2
"""
Explanation: Demo dftexamples
Importing scripts
End of explanation
"""
f = np.ones((100, 200))
print("Constant image:\n",f)
F = fft2(f)
print("\nDFT of a constant image:\n",F.round(4))
plt.title('Constant image')
plt.imshow(f,cmap='gray')
plt.colorbar()
plt.title('DFT of a constant image')
plt.imshow(ia.dftview(F),cmap='gray')
plt.colorbar()
"""
Explanation: Constant image
The DFT of a constant image is a single point at F(0,0), which gives the sum of all pixels in the image.
End of explanation
"""
f = np.zeros((128, 128))
f[63:63+5,63:63+5] = 1
F = fft2(f)
plt.title('Square image')
plt.imshow(f,cmap='gray')
plt.colorbar()
plt.title('DFT of a square image')
plt.imshow(ia.dftview(F),cmap='gray')
plt.colorbar()
"""
Explanation: Square image
The DFT of a square image is a digital sync.
End of explanation
"""
f = np.zeros((128, 128))
k = np.array([[1,2,3,4,5,6,5,4,3,2,1]])
k2 = np.dot(k.T, k)
f[63:63+k2.shape[0], 63:63+k2.shape[1]] = k2
F = fft2(f)
plt.title('Pyramid image')
plt.imshow(f,cmap='gray')
plt.colorbar()
plt.title('DFT of a pyramid image')
plt.imshow(ia.dftview(F),cmap='gray')
plt.colorbar()
"""
Explanation: Pyramid image
The DFT of a pyramid is the square of the digital sync.
End of explanation
"""
import numpy as np
def gaussian(s, mu, cov):
d = len(s) # dimension
n = np.prod(s) # n. of samples (pixels)
x = np.indices(s).reshape( (d, n))
xc = x - mu
k = 1. * xc * np.dot(np.linalg.inv(cov), xc)
k = np.sum(k,axis=0) #the sum is only applied to the rows
g = (1./((2 * np.pi)**(d/2.) * np.sqrt(np.linalg.det(cov)))) * np.exp(-1./2 * k)
return g.reshape(s)
f = gaussian((128,128),np.transpose([[65,65]]),[[3*3,0],[0,5*5]])
plt.title('Gaussian image')
plt.imshow(ia.normalize(f),cmap='gray')
plt.colorbar()
F = fft2(f)
plt.title('DFT of a gaussian image')
plt.imshow(ia.dftview(F),cmap='gray')
plt.colorbar()
"""
Explanation: Gaussian image
The DFT of a Gaussian image is a Gaussian image.
Function to create a gaussian
End of explanation
"""
f = ia.comb((128,128),(4,4),(0,0))
plt.title('Impulse image')
plt.imshow(ia.normalize(f),cmap='gray')
plt.colorbar()
F = fft2(f)
plt.title('DFT of a impulse image')
plt.imshow(ia.dftview(F),cmap='gray')
plt.colorbar()
"""
Explanation: Impulse image
The DFT of an impulse image is an impulse image.
End of explanation
"""
|
gururajl/deep-learning
|
transfer-learning/Transfer_Learning.ipynb
|
mit
|
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
vgg_dir = 'tensorflow_vgg/'
# Make sure vgg exists
if not isdir(vgg_dir):
raise Exception("VGG directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(vgg_dir + "vgg16.npy"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar:
urlretrieve(
'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy',
vgg_dir + 'vgg16.npy',
pbar.hook)
else:
print("Parameter file already exists!")
"""
Explanation: Transfer Learning
Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using VGGNet trained on the ImageNet dataset as a feature extractor. Below is a diagram of the VGGNet architecture.
<img src="assets/cnnarchitecture.jpg" width=700px>
VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes.
You can read more about transfer learning from the CS231n course notes.
Pretrained VGGNet
We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This code is already included in 'tensorflow_vgg' directory, sdo you don't have to clone it.
This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. You'll need to clone the repo into the folder containing this notebook. Then download the parameter file using the next cell.
End of explanation
"""
import tarfile
dataset_folder_path = 'flower_photos'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('flower_photos.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar:
urlretrieve(
'http://download.tensorflow.org/example_images/flower_photos.tgz',
'flower_photos.tar.gz',
pbar.hook)
if not isdir(dataset_folder_path):
with tarfile.open('flower_photos.tar.gz') as tar:
tar.extractall()
tar.close()
"""
Explanation: Flower power
Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial.
End of explanation
"""
import os
import numpy as np
import tensorflow as tf
from tensorflow_vgg import vgg16
from tensorflow_vgg import utils
data_dir = 'flower_photos/'
contents = os.listdir(data_dir)
classes = [each for each in contents if os.path.isdir(data_dir + each)]
"""
Explanation: ConvNet Codes
Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier.
Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $224 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code):
```
self.conv1_1 = self.conv_layer(bgr, "conv1_1")
self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2")
self.pool1 = self.max_pool(self.conv1_2, 'pool1')
self.conv2_1 = self.conv_layer(self.pool1, "conv2_1")
self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2")
self.pool2 = self.max_pool(self.conv2_2, 'pool2')
self.conv3_1 = self.conv_layer(self.pool2, "conv3_1")
self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2")
self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3")
self.pool3 = self.max_pool(self.conv3_3, 'pool3')
self.conv4_1 = self.conv_layer(self.pool3, "conv4_1")
self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2")
self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3")
self.pool4 = self.max_pool(self.conv4_3, 'pool4')
self.conv5_1 = self.conv_layer(self.pool4, "conv5_1")
self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2")
self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3")
self.pool5 = self.max_pool(self.conv5_3, 'pool5')
self.fc6 = self.fc_layer(self.pool5, "fc6")
self.relu6 = tf.nn.relu(self.fc6)
```
So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use
with tf.Session() as sess:
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer,
feed_dict = {input_: images}
codes = sess.run(vgg.relu6, feed_dict=feed_dict)
End of explanation
"""
# Set the batch size higher if you can fit in in your GPU memory
batch_size = 10
codes_list = []
labels = []
batch = []
codes = None
with tf.Session() as sess:
# TODO: Build the vgg network here
vgg = vgg16.Vgg16()
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
with tf.name_scope("content_vgg"):
vgg.build(input_)
for each in classes:
print("Starting {} images".format(each))
class_path = data_dir + each
files = os.listdir(class_path)
for ii, file in enumerate(files, 1):
# Add images to the current batch
# utils.load_image crops the input images for us, from the center
img = utils.load_image(os.path.join(class_path, file))
batch.append(img.reshape((1, 224, 224, 3)))
labels.append(each)
# Running the batch through the network to get the codes
if ii % batch_size == 0 or ii == len(files):
# Image batch to pass to VGG network
images = np.concatenate(batch)
# TODO: Get the values from the relu6 layer of the VGG network
codes_batch = sess.run(vgg.relu6, feed_dict={input_ : images})
# Here I'm building an array of the codes
if codes is None:
codes = codes_batch
else:
codes = np.concatenate((codes, codes_batch))
# Reset to start building the next batch
batch = []
print('{} images processed'.format(ii))
# write codes to file
with open('codes', 'w') as f:
codes.tofile(f)
# write labels to file
import csv
with open('labels', 'w') as f:
writer = csv.writer(f, delimiter='\n')
writer.writerow(labels)
"""
Explanation: Below I'm running images through the VGG network in batches.
Exercise: Below, build the VGG network. Also get the codes from the first fully connected layer (make sure you get the ReLUd values).
End of explanation
"""
# read codes and labels from file
import csv
with open('labels') as f:
reader = csv.reader(f, delimiter='\n')
labels = np.array([each for each in reader if len(each) > 0]).squeeze()
with open('codes') as f:
codes = np.fromfile(f, dtype=np.float32)
codes = codes.reshape((len(labels), -1))
codes[0:3]
"""
Explanation: Building the Classifier
Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work.
End of explanation
"""
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
lb.fit(labels)
labels_vecs = lb.transform(labels)# Your one-hot encoded labels array here
"""
Explanation: Data prep
As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels!
Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels.
End of explanation
"""
from sklearn.model_selection import StratifiedShuffleSplit
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
train_idx, val_idx = next(ss.split(codes, labels))
half_val_len = int(len(val_idx)/2)
val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:]
train_x, train_y = codes[train_idx], labels_vecs[train_idx]
val_x, val_y = codes[val_idx], labels_vecs[val_idx]
test_x, test_y = codes[test_idx], labels_vecs[test_idx]
print("Train shapes (x, y):", train_x.shape, train_y.shape)
print("Validation shapes (x, y):", val_x.shape, val_y.shape)
print("Test shapes (x, y):", test_x.shape, test_y.shape)
"""
Explanation: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn.
You can create the splitter like so:
ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2)
Then split the data with
splitter = ss.split(x, y)
ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide.
Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets.
End of explanation
"""
inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]])
labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]])
# TODO: Classifier layers and operations
fc1 = tf.layers.dense(inputs=inputs_, units=128, activation=tf.nn.relu)
logits = logits = tf.layers.dense(inputs=fc1, units=5, activation=None)# output layer logits
entropy = tf.losses.softmax_cross_entropy(onehot_labels=labels_, logits=logits)# cross entropy loss
cost = tf.reduce_mean(entropy)
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost) # training optimizer
# Operations for validation/test accuracy
predicted = tf.nn.softmax(logits)
correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
"""
Explanation: If you did it right, you should see these sizes for the training sets:
Train shapes (x, y): (2936, 4096) (2936, 5)
Validation shapes (x, y): (367, 4096) (367, 5)
Test shapes (x, y): (367, 4096) (367, 5)
Classifier layers
Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network.
Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost.
End of explanation
"""
def get_batches(x, y, n_batches=10):
""" Return a generator that yields batches from arrays x and y. """
batch_size = len(x)//n_batches
for ii in range(0, n_batches*batch_size, batch_size):
# If we're not on the last batch, grab data with size batch_size
if ii != (n_batches-1)*batch_size:
X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size]
# On the last batch, grab the rest of the data
else:
X, Y = x[ii:], y[ii:]
# I love generators
yield X, Y
"""
Explanation: Batches!
Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data.
End of explanation
"""
saver = tf.train.Saver()
with tf.Session() as sess:
epochs = 10
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in get_batches(train_x, train_y):
batch_cost, _, batch_acc = sess.run([cost, optimizer, accuracy], feed_dict={inputs_: x,
labels_: y})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost),
"Accuracy: {:.4f}".format(batch_acc))
# TODO: Your training code here
saver.save(sess, "checkpoints/flowers.ckpt")
"""
Explanation: Training
Here, we'll train the network.
Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. Use the get_batches function I wrote before to get your batches like for x, y in get_batches(train_x, train_y). Or write your own!
End of explanation
"""
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: test_x,
labels_: test_y}
test_acc = sess.run(accuracy, feed_dict=feed)
print("Test accuracy: {:.4f}".format(test_acc))
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.ndimage import imread
"""
Explanation: Testing
Below you see the test accuracy. You can also see the predictions returned for images.
End of explanation
"""
test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg'
test_img = imread(test_img_path)
plt.imshow(test_img)
# Run this cell if you don't have a vgg graph built
if 'vgg' in globals():
print('"vgg" object already exists. Will not create again.')
else:
#create vgg
with tf.Session() as sess:
input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
vgg = vgg16.Vgg16()
vgg.build(input_)
with tf.Session() as sess:
img = utils.load_image(test_img_path)
img = img.reshape((1, 224, 224, 3))
feed_dict = {input_: img}
code = sess.run(vgg.relu6, feed_dict=feed_dict)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
feed = {inputs_: code}
prediction = sess.run(predicted, feed_dict=feed).squeeze()
plt.imshow(test_img)
plt.barh(np.arange(5), prediction)
_ = plt.yticks(np.arange(5), lb.classes_)
"""
Explanation: Below, feel free to choose images and see how the trained classifier predicts the flowers in them.
End of explanation
"""
|
fluffy-hamster/A-Beginners-Guide-to-Python
|
A Beginners Guide to Python/Homework Solutions/24. Getting Help (HW).ipynb
|
mit
|
x = [ [2] * 3 ] * 3
x[0][0] = "ZZ"
print(*x, sep="\n")
"""
Explanation: Where to Get Help: Homework Assignment
You need to be think a little bit about your search, the better that is the more likely you are to find what you want. Let me give you a real example I stuggled with:
End of explanation
"""
out=[[0]*3]*3
print( id(out[0]) )
print( id(out[1]) ) # want to know what "id" is? Why not read the documentation!
"""
Explanation: What I wanted to do was build a nested list, x is supposed to look like:
[[2, 2, 2],
[2, 2, 2],
[2, 2, 2]]
And then I wanted to change the value at index [0][0] but notice that instead of a single value changing the first item in every list changes. I wanted:
[["ZZ", 2, 2],
[ 2, 2, 2],
[ 2, 2, 2]]
But I got:
[["ZZ", 2, 2],
["ZZ", 2, 2],
["ZZ", 2, 2]]
Wierd right?
Your homework for this week it to search google for an awnser to this problem. Why doesn't X behave like I want it too and what to I need to make it work?
I know for a fact the awnser is on stackoverflow already (and probably hundreds of other websites too), so this is a test of your googling skills:
What search query is likely to return the information you need?
This excerise is a really useful I think. Through-out your programming carrear you are going to stumped on tough questions. In many cases, the fastest way to solve your issue is going to be google search. BUT to get the most out of a search engine is going to require you to carefully think about your problem and what query might contain the awnser.
Possible Solution
Google to the rescue! Okay so let's think about this problem a little bit; what "buzz words” might we need to feed google in order to get the answer we want?
Lets try...
Python
Err...yeah, I think we are going to need to be a bit more specific than that.
Python nested list
This search is a bit better, I mean, from the context alone Google has probably figured out that we are not interested in snakes! But again, still probably not specific enough.
Python nested list sublist assignment
This query seems decent right? It seems pretty descriptive of the problem, afterall.
Lets run it!
Google Search (15th June 2017)
The third hit sounds promising, lets go there and see.
...And sure enough, it sounds like someone is dealing with the EXACT ISSUE we had. The top-voted reply not only explains the issue but also the fix.
Basically, the issue is that when you write [ [0]*3] *3 ] we are actually storing a reference to the same list.
End of explanation
"""
a = [2] * 3
x = [a] * 3
print(*x, sep="\n")
print()
a[0] = "ZZ"
print(*x, sep="\n")
"""
Explanation: To see what's happening lets rewrite the code to make the issue even clearer:
End of explanation
"""
x = []
for i in range(3):
x.append([2]*3)
print(*x, sep="\n")
print()
x[0][0] = "ZZ"
print(*x, sep="\n")
"""
Explanation: So basically the issue is was that when we write [2]*3 and try to add it to a list python isn't making three separate lists, rather, its adding the same list three times!
The fix then, we need to make sure Python knows we want three separate lists, which we can do with a for loop:
End of explanation
"""
|
jdhp-docs/python_notebooks
|
nb_dev_jupyter/notebook_snippets_en.ipynb
|
mit
|
%matplotlib notebook
# As an alternative, one may use: %pylab notebook
# For old Matplotlib and Ipython versions, use the non-interactive version:
# %matplotlib inline or %pylab inline
# To ignore warnings (http://stackoverflow.com/questions/9031783/hide-all-warnings-in-ipython)
import warnings
warnings.filterwarnings('ignore')
import math
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets
from ipywidgets import interact
"""
Explanation: Notebook snippets, tips and tricks
TODO:
* Read https://www.dataquest.io/blog/jupyter-notebook-tips-tricks-shortcuts/
* Read http://blog.juliusschulz.de/blog/ultimate-ipython-notebook
* howto avoid loosing matplotlib interactive rendering when a document is converted to HTML ?
* https://www.reddit.com/r/IPython/comments/36p360/try_matplotlib_notebook_for_interactive_plots/
* http://stackoverflow.com/questions/36151181/exporting-interactive-jupyter-notebook-to-html
* https://jakevdp.github.io/blog/2013/12/05/static-interactive-widgets/
* table of contents (JS)
* matplotlib / D3.js interaction
* matplotlib animations: how to make it faster
* inspiration
* http://louistiao.me/posts/notebooks/embedding-matplotlib-animations-in-jupyter-notebooks/
* https://github.com/ltiao/notebooks
* https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/
* Howto make (personalized) Reveal.js slides from this notebook: https://forum.poppy-project.org/t/utiliser-jupyter-pour-des-presentations-etape-par-etape-use-jupyter-to-present-step-by-step/2271/2
* See https://blog.dominodatalab.com/lesser-known-ways-of-using-notebooks/
Extension wishlist and todo:
- Table of content
- Hide some blocks in the HTML export
- See https://github.com/jupyter/notebook/issues/534
- Customize CSS in HTML export
- Add disqus in HTML export
- See: https://github.com/jupyter/nbviewer/issues/80
- Example: http://nbviewer.jupyter.org/gist/tonyfast/977184c1243287e7e55e
- Add metadata header/footer (initial publication date, last revision date, author, email, website, license, ...)
- Vim like editor/navigation shortcut keys (search, search+edit, ...)
- Spell checking
- See https://github.com/ipython/ipython/issues/3216#issuecomment-59507673 and http://www.simulkade.com/posts/2015-04-07-spell-checking-in-jupyter-notebooks.html
Inspiration:
- https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks
Import directives
End of explanation
"""
x = np.arange(-2 * np.pi, 2 * np.pi, 0.1)
y = np.sin(x)
plt.plot(x, y)
"""
Explanation: Useful keyboard shortcuts
Enter edit mode: Enter
Enter command mode: Escape
In command mode:
Show keyboard shortcuts: h
Find and replace: f
Insert a cell above the selection: a
Insert a cell below the selection: b
Switch to Markdown: m
Delete the selected cells: dd (type twice 'd' quickly)
Undo cell deletion: z
Execute the selected cell: Ctrl + Enter
Execute the selected cell and select the next cell: Shift + Enter
Execute the selected cell and insert below: Alt + Enter
Toggle output: o
Toggle line number: l
Copy selected cells: c
Paste copied cells below: v
Select the previous cell: k
Select the next cell: j
Merge selected cells, or current cell with cell below if only one cell selected: Shift + m
In edit mode:
Code completion or indent: Tab
Tooltip: Shift + Tab
Type "Shift + Tab" twice to see the online documentation of the selected element
Type "Shift + Tab" 4 times to the online documentation in a dedicated frame
Indent: ⌘] (on MacOS)
Dedent: ⌘[ (on MacOS)
Execute the selected cell: Ctrl + Enter
Execute the selected cell and select the next cell: Shift + Enter
Execute the selected cell and insert below: Alt + Enter
Cut a cell at the current cursor position: Ctrl + Shift + -
Matplotlib
To plot a figure within a notebook, insert the
%matplotlib notebook (or %pylab notebook)
directive at the begining of the document.
As an alternative, one may use
%matplotlib inline (or %pylab inline)
for non-interactive plots on old Matplotlib/Ipython versions.
2D plots
End of explanation
"""
from mpl_toolkits.mplot3d import axes3d
# Build datas ###############
x = np.arange(-5, 5, 0.25)
y = np.arange(-5, 5, 0.25)
xx,yy = np.meshgrid(x, y)
z = np.sin(np.sqrt(xx**2 + yy**2))
# Plot data #################
fig = plt.figure()
ax = axes3d.Axes3D(fig)
ax.plot_wireframe(xx, yy, z)
plt.show()
"""
Explanation: 3D plots
End of explanation
"""
from matplotlib.animation import FuncAnimation
# Plots
fig, ax = plt.subplots()
def update(frame):
x = np.arange(frame/10., frame/10. + 2. * math.pi, 0.1)
ax.clear()
ax.plot(x, np.cos(x))
# Optional: save plots
filename = "img_{:03}.png".format(frame)
plt.savefig(filename)
# Note: "interval" is in ms
anim = FuncAnimation(fig, update, interval=100)
plt.show()
"""
Explanation: Animations
End of explanation
"""
%%html
<div id="toc"></div>
%%javascript
var toc = document.getElementById("toc");
toc.innerHTML = "<b>Table of contents:</b>";
toc.innerHTML += "<ol>"
var h_list = $("h2, h3"); //$("h2"); // document.getElementsByTagName("h2");
for(var i = 0 ; i < h_list.length ; i++) {
var h = h_list[i];
var h_str = h.textContent.slice(0, -1); // "slice(0, -1)" remove the last character
if(h_str.length > 0) {
if(h.tagName == "H2") { // https://stackoverflow.com/questions/10539419/javascript-get-elements-tag
toc.innerHTML += "<li><a href=\"#" + h_str.replace(/\s+/g, '-') + "\">" + h_str + "</a></li>";
} else if(h.tagName == "H3") { // https://stackoverflow.com/questions/10539419/javascript-get-elements-tag
toc.innerHTML += "<li> <a href=\"#" + h_str.replace(/\s+/g, '-') + "\">" + h_str + "</a></li>";
}
}
}
toc.innerHTML += "</ol>"
"""
Explanation: Interactive plots with Plotly
TODO: https://plot.ly/ipython-notebooks/
Interactive plots with Bokeh
TODO: http://bokeh.pydata.org/en/latest/docs/user_guide/notebook.html
Embedded HTML and Javascript
End of explanation
"""
%run ./notebook_snippets_run_test.py
%run ./notebook_snippets_run_mpl_test.py
"""
Explanation: IPython built-in magic commands
See http://ipython.readthedocs.io/en/stable/interactive/magics.html
Execute an external python script
End of explanation
"""
# %load ./notebook_snippets_run_mpl_test.py
#!/usr/bin/env python3
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module has been written to illustrate the ``%run`` magic command in
``notebook_snippets.ipynb``.
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
x = np.arange(-10, 10, 0.1)
y = np.cos(x)
plt.plot(x, y)
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
"""
Explanation: Load an external python script
Load the full script
End of explanation
"""
# %load -s main ./notebook_snippets_run_mpl_test.py
def main():
x = np.arange(-10, 10, 0.1)
y = np.cos(x)
plt.plot(x, y)
plt.grid(True)
plt.show()
"""
Explanation: Load a specific symbol (funtion, class, ...)
End of explanation
"""
# %load -r 22-41 ./notebook_snippets_run_mpl_test.py
"""
This module has been written to illustrate the ``%run`` magic command in
``notebook_snippets.ipynb``.
"""
import numpy as np
import matplotlib.pyplot as plt
def main():
x = np.arange(-10, 10, 0.1)
y = np.cos(x)
plt.plot(x, y)
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
"""
Explanation: Load specific lines
End of explanation
"""
%%time
plt.hist(np.random.normal(loc=0.0, scale=1.0, size=100000), bins=50)
"""
Explanation: Time measurement
%time
End of explanation
"""
%%timeit
plt.hist(np.random.normal(loc=0.0, scale=1.0, size=100000), bins=50)
"""
Explanation: %timeit
End of explanation
"""
#help(ipywidgets)
#dir(ipywidgets)
from ipywidgets import IntSlider
from IPython.display import display
slider = IntSlider(min=1, max=10)
display(slider)
"""
Explanation: ipywidget
On jupyter lab, you should install widgets extension first (see https://ipywidgets.readthedocs.io/en/latest/user_install.html#installing-the-jupyterlab-extension):
jupyter labextension install @jupyter-widgets/jupyterlab-manager
End of explanation
"""
#help(ipywidgets.interact)
"""
Explanation: ipywidgets.interact
Documentation
See http://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
End of explanation
"""
@interact(text="IPython Widgets")
def greeting(text):
print("Hello {}".format(text))
"""
Explanation: Using interact as a decorator with named parameters
To me, this is the best option for single usage functions...
Text
End of explanation
"""
@interact(num=5)
def square(num):
print("{} squared is {}".format(num, num*num))
@interact(num=(0, 100))
def square(num):
print("{} squared is {}".format(num, num*num))
@interact(num=(0, 100, 10))
def square(num):
print("{} squared is {}".format(num, num*num))
"""
Explanation: Integer (IntSlider)
End of explanation
"""
@interact(num=5.)
def square(num):
print("{} squared is {}".format(num, num*num))
@interact(num=(0., 10.))
def square(num):
print("{} squared is {}".format(num, num*num))
@interact(num=(0., 10., 0.5))
def square(num):
print("{} squared is {}".format(num, num*num))
"""
Explanation: Float (FloatSlider)
End of explanation
"""
@interact(upper=False)
def greeting(upper):
text = "hello"
if upper:
print(text.upper())
else:
print(text.lower())
"""
Explanation: Boolean (Checkbox)
End of explanation
"""
@interact(name=["John", "Bob", "Alice"])
def greeting(name):
print("Hello {}".format(name))
"""
Explanation: List (Dropdown)
End of explanation
"""
@interact(word={"One": "Un", "Two": "Deux", "Three": "Trois"})
def translate(word):
print(word)
x = np.arange(-2 * np.pi, 2 * np.pi, 0.1)
@interact(function={"Sin": np.sin, "Cos": np.cos})
def plot(function):
y = function(x)
plt.plot(x, y)
"""
Explanation: Dictionnary (Dropdown)
End of explanation
"""
@interact
def greeting(text="World"):
print("Hello {}".format(text))
"""
Explanation: Using interact as a decorator
Text
End of explanation
"""
@interact
def square(num=2):
print("{} squared is {}".format(num, num*num))
@interact
def square(num=(0, 100)):
print("{} squared is {}".format(num, num*num))
@interact
def square(num=(0, 100, 10)):
print("{} squared is {}".format(num, num*num))
"""
Explanation: Integer (IntSlider)
End of explanation
"""
@interact
def square(num=5.):
print("{} squared is {}".format(num, num*num))
@interact
def square(num=(0., 10.)):
print("{} squared is {}".format(num, num*num))
@interact
def square(num=(0., 10., 0.5)):
print("{} squared is {}".format(num, num*num))
"""
Explanation: Float (FloatSlider)
End of explanation
"""
@interact
def greeting(upper=False):
text = "hello"
if upper:
print(text.upper())
else:
print(text.lower())
"""
Explanation: Boolean (Checkbox)
End of explanation
"""
@interact
def greeting(name=["John", "Bob", "Alice"]):
print("Hello {}".format(name))
"""
Explanation: List (Dropdown)
End of explanation
"""
@interact
def translate(word={"One": "Un", "Two": "Deux", "Three": "Trois"}):
print(word)
x = np.arange(-2 * np.pi, 2 * np.pi, 0.1)
@interact
def plot(function={"Sin": np.sin, "Cos": np.cos}):
y = function(x)
plt.plot(x, y)
"""
Explanation: Dictionnary (Dropdown)
End of explanation
"""
def greeting(text):
print("Hello {}".format(text))
interact(greeting, text="IPython Widgets")
"""
Explanation: Using interact as a function
To me, this is the best option for multiple usage functions...
Text
End of explanation
"""
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=5)
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=(0, 100))
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=(0, 100, 10))
"""
Explanation: Integer (IntSlider)
End of explanation
"""
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=5.)
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=(0., 10.))
def square(num):
print("{} squared is {}".format(num, num*num))
interact(square, num=(0., 10., 0.5))
"""
Explanation: Float (FloatSlider)
End of explanation
"""
def greeting(upper):
text = "hello"
if upper:
print(text.upper())
else:
print(text.lower())
interact(greeting, upper=False)
"""
Explanation: Boolean (Checkbox)
End of explanation
"""
def greeting(name):
print("Hello {}".format(name))
interact(greeting, name=["John", "Bob", "Alice"])
"""
Explanation: List (Dropdown)
End of explanation
"""
def translate(word):
print(word)
interact(translate, word={"One": "Un", "Two": "Deux", "Three": "Trois"})
x = np.arange(-2 * np.pi, 2 * np.pi, 0.1)
def plot(function):
y = function(x)
plt.plot(x, y)
interact(plot, function={"Sin": np.sin, "Cos": np.cos})
"""
Explanation: Dictionnary (Dropdown)
End of explanation
"""
@interact(upper=False, name=["john", "bob", "alice"])
def greeting(upper, name):
text = "hello {}".format(name)
if upper:
print(text.upper())
else:
print(text.lower())
"""
Explanation: Example of using multiple widgets on one function
End of explanation
"""
from IPython.display import Image
Image("fourier.gif")
"""
Explanation: Display images (PNG, JPEG, GIF, ...)
Within a code cell (using IPython.display)
End of explanation
"""
from IPython.display import Audio
"""
Explanation: Within a Markdown cell
Sound player widget
See: https://ipython.org/ipython-doc/dev/api/generated/IPython.display.html#IPython.display.Audio
End of explanation
"""
framerate = 44100
t = np.linspace(0, 5, framerate*5)
data = np.sin(2*np.pi*220*t) + np.sin(2*np.pi*224*t)
Audio(data, rate=framerate)
"""
Explanation: Generate a sound
End of explanation
"""
data_left = np.sin(2 * np.pi * 220 * t)
data_right = np.sin(2 * np.pi * 224 * t)
Audio([data_left, data_right], rate=framerate)
"""
Explanation: Generate a multi-channel (stereo or more) sound
End of explanation
"""
Audio("http://www.nch.com.au/acm/8k16bitpcm.wav")
Audio(url="http://www.w3schools.com/html/horse.ogg")
"""
Explanation: From URL
End of explanation
"""
#Audio('/path/to/sound.wav')
#Audio(filename='/path/to/sound.ogg')
"""
Explanation: From file
End of explanation
"""
#Audio(b'RAW_WAV_DATA..)
#Audio(data=b'RAW_WAV_DATA..)
"""
Explanation: From bytes
End of explanation
"""
from IPython.display import YouTubeVideo
vid = YouTubeVideo("0HlRtU8clt4")
display(vid)
"""
Explanation: Youtube widget
Class for embedding a YouTube Video in an IPython session, based on its video id.
e.g. to embed the video from https://www.youtube.com/watch?v=0HlRtU8clt4 , you would do:
See https://ipython.org/ipython-doc/dev/api/generated/IPython.display.html#IPython.display.YouTubeVideo
End of explanation
"""
|
bzamecnik/ml
|
snippets/keras/sine_phases_autoencoder.ipynb
|
mit
|
%pylab inline
import keras
import numpy as np
import keras
N = 50
# phase_step = 1 / (2 * np.pi)
t = np.arange(50)
phases = np.linspace(0, 1, N) * 2 * np.pi
x = np.array([np.sin(2 * np.pi / N * t + phi) for phi in phases])
print(x.shape)
imshow(x);
plot(x[0]);
plot(x[1]);
plot(x[2]);
from keras.models import Sequential
from keras.layers import containers
from keras.layers.core import Dense, AutoEncoder
encoder = containers.Sequential([
Dense(25, input_dim=50),
Dense(12)
])
decoder = containers.Sequential([
Dense(25, input_dim=12),
Dense(50)
])
model = Sequential()
model.add(AutoEncoder(encoder=encoder, decoder=decoder, output_reconstruction=True))
model.compile(loss='mean_squared_error', optimizer='sgd')
plot(model.predict(x)[0]);
from loss_history import LossHistory
loss_history = LossHistory()
model.fit(x, x, nb_epoch=1000, batch_size=50, callbacks=[loss_history])
plot(model.predict(x)[0])
plot(x[0])
plot(model.predict(x)[10])
plot(x[10])
print('last loss:', loss_history.losses[-1])
plot(loss_history.losses);
imshow(model.get_weights()[0], interpolation='nearest', cmap='gray');
imshow(model.get_weights()[2], interpolation='nearest', cmap='gray');
"""
Explanation: Sinusoid autoencoder trained with multiple phases
Let's provide more training examples - sinusoid with various phases.
End of explanation
"""
x_noised = x + 0.2 * np.random.random(len(x[0]))
plot(x_noised[0], label='input')
plot(model.predict(x_noised)[0], label='predicted')
legend();
"""
Explanation: The model should be able to handle noise-corrupted input signal.
End of explanation
"""
x_shifted = np.cos(2*np.pi/N * t.reshape(1, -1))
plot(x_shifted[0], label='input')
plot(model.predict(x_shifted)[0], label='predicted')
legend();
"""
Explanation: This time the model should be able to handle also phase-shifted signal since it was trained such.
End of explanation
"""
|
sdpython/ensae_teaching_cs
|
_doc/notebooks/2a/cffi_linear_regression.ipynb
|
mit
|
from jyquickhelper import add_notebook_menu
add_notebook_menu()
memo_time = []
import timeit
def unit(x):
if x >= 1: return "%1.2f s" % x
elif x >= 1e-3: return "%1.2f ms" % (x* 1000)
elif x >= 1e-6: return "%1.2f µs" % (x* 1000**2)
elif x >= 1e-9: return "%1.2f ns" % (x* 1000**3)
else:
return "%1.2g s" % x
def timeexe(legend, code, number=100, repeat=1000):
rep = timeit.repeat(code, number=number, repeat=repeat, globals=globals())
ave = sum(rep) / (number * repeat)
std = (sum((x/number - ave)**2 for x in rep) / repeat)**0.5
fir = rep[0]/number
fir3 = sum(rep[:3]) / (3 * number)
las3 = sum(rep[-3:]) / (3 * number)
rep.sort()
mini = rep[len(rep)//20] / number
maxi = rep[-len(rep)//20] / number
print("Moyenne: %s Ecart-type %s (with %d runs) in [%s, %s]" % (
unit(ave), unit(std), number, unit(mini), unit(maxi)))
return dict(legend=legend, average=ave, deviation=std, first=fir, first3=fir3,
last3=las3, repeat=repeat, min5=mini, max5=maxi, code=code, run=number)
"""
Explanation: Optimisation de code avec cffi, numba, cython
L'idée est de recoder une fonction en C. On prend comme exemple la fonction de prédiction de la régression linéaire de scikit-learn et de prévoir le gain de temps qu'on obtient en recodant la fonction dans un langage plus rapide.
End of explanation
"""
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split
diabetes = load_diabetes()
diabetes_X_train, diabetes_X_test, diabetes_y_train, diabetes_y_test = train_test_split(diabetes.data, diabetes.target)
from sklearn.linear_model import LinearRegression
clr = LinearRegression()
clr.fit(diabetes_X_train, diabetes_y_train)
clr.coef_
clr.intercept_
z = diabetes_X_test[0:1,:]
memo_time.append(timeexe("sklearn.predict", "clr.predict(z)"))
%timeit clr.predict(z)
"""
Explanation: Régression linéaire
End of explanation
"""
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("int linreg(int, double *, double *, double, double *);")
ffibuilder.set_source("_linear_regression",
r"""
static int linreg(int dimension, double * x, double *coef, double intercept, double * out)
{
for(; dimension > 0; --dimension, ++x, ++coef)
intercept += *x * *coef;
*out = intercept;
return 1;
}
""")
ffibuilder.compile(verbose=True)
"""
Explanation: optimisation avec cffi
On s'inspire de l'exemple Purely for performance (API level, out-of-line).
End of explanation
"""
from _linear_regression import ffi, lib
lib.linreg
"""
Explanation: La fonction compilée est accessible comme suit.
End of explanation
"""
import numpy
out = numpy.zeros(1)
ptr_coef = clr.coef_.__array_interface__['data'][0]
cptr_coef = ffi.cast ( "double*" , ptr_coef )
x = diabetes_X_test[0:1,:]
ptr_x = x.__array_interface__['data'][0]
cptr_x = ffi.cast ( "double*" , ptr_x )
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast ( "double*" , ptr_out )
n = len(clr.coef_)
lib.linreg(n, cptr_x, cptr_coef, clr.intercept_, cptr_out)
out
"""
Explanation: On s'inspire de l'exemple How to pass a Numpy array into a cffi function and how to get one back out?.
End of explanation
"""
clr.predict(x)
"""
Explanation: On vérifie qu'on obtient bien la même chose.
End of explanation
"""
memo_time.append(timeexe("cffi-linreg", "lib.linreg(n, cptr_x, cptr_coef, clr.intercept_, cptr_out)"))
"""
Explanation: Et on mesure le temps d'exécution :
End of explanation
"""
def predict_clr(x, clr):
out = numpy.zeros(1)
ptr_coef = clr.coef_.__array_interface__['data'][0]
cptr_coef = ffi.cast ( "double*" , ptr_coef )
ptr_x = x.__array_interface__['data'][0]
cptr_x = ffi.cast ( "double*" , ptr_x )
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast ( "double*" , ptr_out )
lib.linreg(len(x), cptr_x, cptr_coef, clr.intercept_, cptr_out)
return out
predict_clr(x, clr)
memo_time.append(timeexe("cffi-linreg-wrapped", "predict_clr(x, clr)"))
"""
Explanation: C'est beaucoup plus rapide. Pour être totalement honnête, il faut mesurer les étapes qui consiste à extraire les pointeurs.
End of explanation
"""
res = " + ".join("{0}*x[{1}]".format(c, i) for i, c in enumerate(clr.coef_))
res
code = """
static int linreg_custom(double * x, double * out)
{{
out[0] = {0} + {1};
}}
""".format(clr.intercept_, res)
print(code)
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("int linreg_custom(double *, double *);")
ffibuilder.set_source("_linear_regression_custom", code)
ffibuilder.compile(verbose=True)
from _linear_regression_custom.lib import linreg_custom
linreg_custom(cptr_x, cptr_out)
out
memo_time.append(timeexe("cffi-linreg-custom", "linreg_custom(cptr_x, cptr_out)"))
"""
Explanation: Cela reste plus rapide.
cffi - seconde version
Comme on construit la fonction en dynamique (le code est connu lors de l'exécution), on peut facilement se passer de la boucle et écrire le code sans boucle et avec les coefficients.
End of explanation
"""
def predict_clr_custom(x):
out = numpy.zeros(1)
ptr_x = x.__array_interface__['data'][0]
cptr_x = ffi.cast("double*", ptr_x)
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast("double*", ptr_out)
linreg_custom(cptr_x, cptr_out)
return out
predict_clr_custom(x)
memo_time.append(timeexe("cffi-linreg-custom wrapped", "predict_clr_custom(x)"))
"""
Explanation: On a gagné un facteur 2.
End of explanation
"""
res = " + ".join("{0}f*x[{1}]".format(c, i) for i, c in enumerate(clr.coef_))
res
code = """
static int linreg_custom_float(float * x, float * out)
{{
out[0] = {0}f + {1};
}}
""".format(clr.intercept_, res)
print(code)
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("int linreg_custom_float(float *, float *);")
ffibuilder.set_source("_linear_regression_custom_float", code)
ffibuilder.compile(verbose=True)
from _linear_regression_custom_float.lib import linreg_custom_float
def predict_clr_custom_float(x):
out = numpy.zeros(1, dtype=numpy.float32)
ptr_x = x.__array_interface__['data'][0]
cptr_x = ffi.cast ( "float*" , ptr_x )
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast ( "float*" , ptr_out )
linreg_custom_float(cptr_x, cptr_out)
return out
"""
Explanation: C'est un peu plus rapide.
et en float?
L'ordinateur fait la distinction entre les double code sur 64 bit et les float codé sur 32 bits. La précision est meilleure dans le premier cas et les calculs sont plus rapides dans le second. Dans le cas du machine learning, on préfère la rapidité à une perte précision en précision qui est souvent compensée par l'optimisation inhérente à tout problème de machine learning. Ce qu'on perd sur une observation, on le retrouve sur une autre.
End of explanation
"""
x32 = x.astype(numpy.float32)
predict_clr_custom(x32)
memo_time.append(timeexe("cffi-linreg-custom-float wrapped", "predict_clr_custom(x32)"))
"""
Explanation: Avant d'appeler la fonction, on doit transformer le vecteur iniatial en float32.
End of explanation
"""
out = numpy.zeros(1, dtype=numpy.float32)
ptr_x = x32.__array_interface__['data'][0]
cptr_x = ffi.cast ( "float*" , ptr_x )
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast ( "float*" , ptr_out )
memo_time.append(timeexe("cffi-linreg-custom-float32", "linreg_custom_float(cptr_x, cptr_out)"))
"""
Explanation: La différence n'est pas flagrante. Mesurons le code C uniquement même si la partie Python ne peut pas être complètement évitée.
End of explanation
"""
code = """
#include <xmmintrin.h>
static int linreg_custom_float_simd(float * x, float * out)
{
__m128 c1 = _mm_set_ps(0.3034995490664121f, -237.63931533353392f, 510.5306054362245f, 327.7369804093466f);
__m128 c2 = _mm_set_ps(-814.1317093725389f, 492.81458798373245f, 102.84845219168025f, 184.60648905984064f);
__m128 r1 = _mm_set_ss(152.76430691633442f);
r1 = _mm_add_ss(r1, _mm_mul_ps(c1, _mm_load_ps(x)));
r1 = _mm_add_ss(r1, _mm_mul_ps(c2, _mm_load_ps(x+4)));
float r[4];
_mm_store_ps(r, r1);
out[0] = r[0] + r[1] + r[2] + r[3] + 743.5196167505419f * x[8] + 76.095172216624f * x[9];
return 1;
}
"""
from cffi import FFI
ffibuilder = FFI()
ffibuilder.cdef("int linreg_custom_float_simd(float *, float *);")
ffibuilder.set_source("_linear_regression_custom_float_simd", code)
ffibuilder.compile(verbose=True)
from _linear_regression_custom_float_simd.lib import linreg_custom_float_simd
out = numpy.zeros(1, dtype=numpy.float32)
ptr_x = x32.__array_interface__['data'][0]
cptr_x = ffi.cast ( "float*" , ptr_x )
ptr_out = out.__array_interface__['data'][0]
cptr_out = ffi.cast ( "float*" , ptr_out )
linreg_custom_float_simd(cptr_x, cptr_out)
out
memo_time.append(timeexe("cffi-linreg-custom-float32-simd", "linreg_custom_float_simd(cptr_x, cptr_out)"))
"""
Explanation: La différence n'est pas significative.
SIMD
C'est un ensemble d'instructions processeur pour faire des opérations terme à terme sur 4 float32 aussi rapidement qu'une seule. Le processeur ne peut faire des opérations que les nombres sont copiés dans ses registres. Le programme passe alors son temps à copier des nombres depuis la mémoire vers les registres du processeur puis à faire la copie dans le chemin inverse pour le résultat. Les instructions SIMD font gagner du temps du niveau du calcul. Au lieu de faire 4 opérations de multiplication terme à terme, il n'en fait plus qu'une. Il suffit de savoir comment utiliser ces instructions. Avec Visual Studio, elles sont accessible via ces fonctions Memory and Initialization Using Streaming SIMD Extensions.
Le code suivant n'est probablement pas optimal mais il n'est pas trop compliqué à suivre.
End of explanation
"""
coef = clr.coef_
list(coef)
code = str(clr.intercept_) + "+" + "+".join("x[{0}]*({1})".format(i, c) for i, c in enumerate(coef))
code
def predict_clr_python(x):
return 152.764306916+x[0]*0.3034995490664121+x[1]*(-237.63931533353392)+x[2]*510.5306054362245+ \
x[3]*327.7369804093466+ \
x[4]*(-814.1317093725389)+x[5]*492.81458798373245+x[6]*102.84845219168025+ \
x[7]*184.60648905984064+x[8]*743.5196167505419+x[9]*76.095172216624
predict_clr_python(x[0])
z = list(x[0])
memo_time.append(timeexe("python-linreg-custom", "predict_clr_python(z)"))
"""
Explanation: C'est légèrement mieux, quelques références :
aligned_vs_unaligned_load.c : c'est du code mais facile à lire.
How to Write Fast Numerical Code
Les processeurs évoluent au fil du temps, 4 float, 8 float, SIMD2, FMA4 Intrinsics Added for Visual Studio 2010 SP1, AVX.
Réécriture purement Python
On continue avec uniquement du Python sans numpy.
End of explanation
"""
def predict_clr_python_loop(x, coef, intercept):
return intercept + sum(a*b for a, b in zip(x, coef))
predict_clr_python_loop(x[0], list(clr.coef_), clr.intercept_)
coef = list(clr.coef_)
intercept = clr.intercept_
memo_time.append(timeexe("python-linreg", "predict_clr_python_loop(z, coef, intercept)"))
"""
Explanation: De façon assez surprenante, c'est plutôt rapide. Et si on y mettait une boucle.
End of explanation
"""
def predict_clr_numpy(x, coef, intercept):
return intercept + numpy.dot(coef, x).sum()
predict_clr_numpy(x[0], clr.coef_, clr.intercept_)
memo_time.append(timeexe("numpy-linreg-numpy", "predict_clr_numpy(z, coef, clr.intercept_)"))
"""
Explanation: A peine plus long.
Réécriture avec Python et numpy
End of explanation
"""
from numba import jit
@jit
def predict_clr_numba(x, coef, intercept):
s = intercept
for i in range(0, len(x)):
s += coef[i] * x[i]
return s
predict_clr_numba(z, clr.coef_, clr.intercept_)
memo_time.append(timeexe("numba-linreg-notype", "predict_clr_numba(z, clr.coef_, clr.intercept_)"))
"""
Explanation: Les dimensions des tableaux sont trop petites pour que le calcul matriciel apporte une différence. On se retrouve dans le cas cffi où les échanges Python - C grignotent tout le temps de calcul.
numba
numba essaye de compiler à la volée des bouts de codes écrits en Python. On induque quelle fonction optimiser en faisant précéder la fonction de @jit. Toutes les écritures ne fonctionnent, typiquement, certaines listes en compréhension soulèvent une exception. Il faut donc écrire son code en Python d'une façon assez proche de ce qu'il serait en C.
End of explanation
"""
@jit('double(double[:], double[:], double)')
def predict_clr_numba_cast(x, coef, intercept):
s = intercept
for i in range(0, len(x)):
s += coef[i] * x[i]
return s
# La fonction ne fonctionne qu'avec un numpy.array car le langage C est fortement typé.
predict_clr_numba_cast(x[0], clr.coef_, clr.intercept_)
memo_time.append(timeexe("numba-linreg-type", "predict_clr_numba_cast(x[0], clr.coef_, clr.intercept_)"))
"""
Explanation: Plutôt rapide !
End of explanation
"""
@jit('float32(float32[:], float32[:], float32)')
def predict_clr_numba_cast_float(x, coef, intercept):
s = intercept
for i in range(0, len(x)):
s += coef[i] * x[i]
return s
# La fonction ne fonctionne qu'avec un numpy.array car le langage C est fortement typé.
x32 = x[0].astype(numpy.float32)
c32 = clr.coef_.astype(numpy.float32)
i32 = numpy.float32(clr.intercept_)
predict_clr_numba_cast_float(x32, c32, i32)
memo_time.append(timeexe("numba-linreg-type-float32", "predict_clr_numba_cast_float(x32, c32, i32)"))
"""
Explanation: On voit que plus on donne d'information au compilateur, plus il est capable d'optimiser.
End of explanation
"""
@jit('double(double[:])')
def predict_clr_numba_cast_custom(x):
coef = [ 3.03499549e-01, -2.37639315e+02, 5.10530605e+02, 3.27736980e+02,
-8.14131709e+02, 4.92814588e+02, 1.02848452e+02, 1.84606489e+02,
7.43519617e+02, 7.60951722e+01]
s = 152.76430691633442
for i in range(0, len(x)):
s += coef[i] * x[i]
return s
predict_clr_numba_cast_custom(x[0])
memo_time.append(timeexe("numba-linreg-type-custom", "predict_clr_numba_cast_custom(x[0])"))
"""
Explanation: On essaye avec les coefficients dans la fonction.
End of explanation
"""
@jit('double(double[:], double[:], double)')
def predict_clr_numba_numpy(x, coef, intercept):
return intercept + numpy.dot(coef, x).sum()
predict_clr_numba_numpy(x[0], clr.coef_, clr.intercept_)
memo_time.append(timeexe("numba-linreg-type-numpy", "predict_clr_numba_numpy(x[0], clr.coef_, clr.intercept_)"))
"""
Explanation: On se rapproche des temps obtenus avec cffi sans wrapping, cela signifie que numba fait un bien meilleur travail à ce niveau que le wrapper rapidement créé.
End of explanation
"""
%load_ext cython
%%cython
def predict_clr_cython(x, coef, intercept):
s = intercept
for i in range(0, len(x)):
s += coef[i] * x[i]
return s
predict_clr_cython(x[0], clr.coef_, clr.intercept_)
memo_time.append(timeexe("cython-linreg", "predict_clr_cython(x[0], clr.coef_, clr.intercept_)"))
"""
Explanation: numba est moins performant quand numpy est impliqué car le code de numpy n'est pas réécrit, il est appelé.
cython
cython permet de créer des extensions C de plus grande envergure que numba. C'est l'option choisie par scikit-learn. Il vaut mieux connaître le C pour s'en servir et là encore, l'objectif est de réduire les échanges Python / C qui coûtent cher.
End of explanation
"""
%%cython
cimport numpy as npc
def predict_clr_cython_type(npc.ndarray[double, ndim=1, mode='c'] x,
npc.ndarray[double, ndim=1, mode='c'] coef,
double intercept):
cdef double s = intercept
for i in range(0, x.shape[0]):
s += coef[i] * x[i]
return s
predict_clr_cython_type(x[0], clr.coef_, clr.intercept_)
memo_time.append(timeexe(
"cython-linreg-type", "predict_clr_cython_type(x[0], clr.coef_, clr.intercept_)"))
"""
Explanation: Cython fait moins bien que numba dans notre cas et l'optimisation proposée est assez proche du temps déjà obtenue avec le langage Python seul. Cela est dû au fait que la plupart des objets tels que du code associé aux listes ou aux dictionnaires ont été réécrits en C.
End of explanation
"""
try:
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
import onnxruntime
import onnx
ok_onnx = True
print("onnx, skl2onnx, onnxruntime sont disponibles.")
def save_model(onnx_model, filename):
with open(filename, "wb") as f:
f.write(onnx_model.SerializeToString())
except ImportError as e:
print("La suite requiert onnx, skl2onnx et onnxruntime.")
print(e)
ok_onnx = False
"""
Explanation: Le temps est quasi identique avec un écart type moins grand de façon significative.
Une dernière option : ONNX
ONNX est un format de sérialisation qui permet de décrire un modèle de modèle de machine learning ou de deep learning. Cela permet de dissocer le modèle de la librairie qui a servi à le produire (voir ML.net and ONNX).
End of explanation
"""
if ok_onnx:
onnx_model = convert_sklearn(
clr, 'model', [('input', FloatTensorType([None, clr.coef_.shape[0]]))],
target_opset=11)
onnx_model.ir_version = 6
save_model(onnx_model, 'model.onnx')
model_onnx = onnx.load('model.onnx')
print("Modèle sérialisé au format ONNX")
print(model_onnx)
else:
print("onnx, onnxmltools, onnxruntime sont disponibles.")
"""
Explanation: On convertit le modèle au format ONNX.
End of explanation
"""
if ok_onnx:
sess = onnxruntime.InferenceSession("model.onnx")
for i in sess.get_inputs():
print('Input:', i)
for o in sess.get_outputs():
print('Output:', o)
def predict_onnxrt(x):
return sess.run(["variable"], {'input': x})
print("Prediction:", predict_onnxrt(x.astype(numpy.float32)))
if ok_onnx:
x32 = x.astype(numpy.float32)
memo_time.append(timeexe("onnxruntime-float32", "predict_onnxrt(x32)"))
memo_time.append(timeexe("onnxruntime-float64", "predict_onnxrt(x.astype(numpy.float32))"))
"""
Explanation: On calcule les prédictions. Le module {onnxruntime](https://docs.microsoft.com/en-us/python/api/overview/azure/onnx/intro?view=azure-onnx-py) optimise les calculs pour des modèles de deep learning. Cela explique pourquoi tous les calculs sont réalisés avec des réels représentés sur 4 octets numpy.float32.
End of explanation
"""
import pandas
df = pandas.DataFrame(data=memo_time)
df = df.set_index("legend").sort_values("average")
df
"""
Explanation: Récapitulatif
End of explanation
"""
cols = ["average", "deviation", "min5", "max5", "run", "code"]
df[cols]
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(14,6))
df[["average", "deviation"]].plot(kind="barh", logx=True, ax=ax, xerr="deviation",
legend=False, fontsize=12, width=0.8)
ax.set_ylabel("")
ax.grid(b=True, which="major")
ax.grid(b=True, which="minor");
"""
Explanation: On enlève quelques colonnes et on rappelle :
cffi: signifie optimisé avec cffi
custom: pas de boucle mais la fonction ne peut prédire qu'une seule régression linéaire
float32: utilise des float et non des double
linreg: régression linéaire
numba: optimisation avec numba
numpy: optimisation avec numpy
python: pas de C, que du python
simd: optimisé avec les instructions SIMD
sklearn: fonction sklearn.predict
static: la fonction utilise des variables statiques
type: la fonction est typée et ne fonctionne qu'avec un type précis en entrée.
wrapped: code optimisé mais embabllé dans une fonction Python qui elle ne l'est pas (les containers sont recréés à chaque fois)
End of explanation
"""
def predict_clr_python_loop_multi(x, coef, intercept):
# On s'attend à deux dimension.
res = numpy.zeros((x.shape[0], 1))
res[:, 0] = intercept
for i in range(0, x.shape[0]):
res[i, 0] += sum(a*b for a, b in zip(x[i, :], coef))
return res
predict_clr_python_loop_multi(diabetes_X_test[:2], clr.coef_, clr.intercept_)
def predict_clr_numpy_loop_multi(x, coef, intercept):
# On s'attend à deux dimension.
res = numpy.ones((x.shape[0], 1)) * intercept
res += x @ coef.reshape((len(coef), 1))
return res
predict_clr_numpy_loop_multi(diabetes_X_test[:2], clr.coef_, clr.intercept_)
def predict_clr_numba_cast_multi(X, coef, intercept):
return [predict_clr_numba_cast(x, coef, intercept) for x in X]
predict_clr_numba_cast_multi(diabetes_X_test[:2], clr.coef_, clr.intercept_)
def predict_clr_cython_type_multi(X, coef, intercept):
return [predict_clr_cython_type(x, coef, intercept) for x in X]
predict_clr_cython_type_multi(diabetes_X_test[:2], clr.coef_, clr.intercept_)
memo = []
batch = [1, 10, 100, 200, 500, 1000, 2000, 3000, 4000, 5000, 10000,
20000, 50000, 75000, 100000, 150000, 200000, 300000, 400000,
500000, 600000]
number = 10
for i in batch:
if i <= diabetes_X_test.shape[0]:
mx = diabetes_X_test[:i]
else:
mxs = [diabetes_X_test] * (i // diabetes_X_test.shape[0] + 1)
mx = numpy.vstack(mxs)
mx = mx[:i]
print("batch", "=", i)
repeat=20 if i >= 5000 else 100
memo.append(timeexe("sklearn.predict %d" % i, "clr.predict(mx)",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "sklearn"
if i <= 1000:
# très lent
memo.append(timeexe("python %d" % i, "predict_clr_python_loop_multi(mx, clr.coef_, clr.intercept_)",
repeat=20, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "python"
memo.append(timeexe("numpy %d" % i, "predict_clr_numpy_loop_multi(mx, clr.coef_, clr.intercept_)",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "numpy"
if i <= 10000:
# très lent
memo.append(timeexe("numba %d" % i, "predict_clr_numba_cast_multi(mx, clr.coef_, clr.intercept_)",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "numba"
if i <= 1000:
# très lent
memo.append(timeexe("cython %d" % i, "predict_clr_cython_type_multi(mx, clr.coef_, clr.intercept_)",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "cython"
if ok_onnx:
memo.append(timeexe("onnxruntime %d" % i, "predict_onnxrt(mx.astype(numpy.float32))",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "onnxruntime"
dfb = pandas.DataFrame(memo)[["average", "lib", "batch"]]
piv = dfb.pivot("batch", "lib", "average")
piv
for c in piv.columns:
piv["ave_" + c] = piv[c] / piv.index
piv
libs = list(c for c in piv.columns if "ave_" in c)
ax = piv.plot(y=libs, logy=True, logx=True, figsize=(10, 5))
ax.set_title("Evolution du temps de prédiction selon la taille du batch")
ax.grid(True);
"""
Explanation: Il manque à ce comparatif le GPU mais c'est un peu plus complexe à mettre en oeuvre, il faut une carte GPU et la parallélisation n'apporterait pas énormément compte tenu de la faible dimension du problème.
Prédiction one-off et biais de mesure
Le graphique précédent montre que la fonction predict de scikit-learn est la plus lente. La première raison est que ce code est valable pour toutes les régresssions linéaires alors que toutes les autres fonctions sont spécialisées pour un seul modèle. La seconde raison est que le code de scikit-learn est optimisé pour le calcul de plusieurs prédictions à la fois alors que toutes les autres fonctions n'en calcule qu'une seule (scénario dit one-off). On compare à ce que donnerait unev version purement python et numpy.
End of explanation
"""
from pyquickhelper.helpgen import NbImage
NbImage("pycpp.png")
"""
Explanation: Le minimum obtenu est pour $10^{-8} s$ soit 10 ns. Cela montre que la comparaisson précédente était incomplète voire biaisée. Tout dépend de l'usage qu'on fait de la fonction de prédiction même s'il sera toujours possible de d'écrire un code spécialisé plus rapide que toute autre fonction générique. En général, plus on reste du côté Python, plus le programme est lent. Le nombre de passage de l'un à l'autre, selon la façon dont il est fait ralenti aussi. En tenant compte de cela, le programme rouge sera plus lent que le vert.
End of explanation
"""
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
diabetes_X_train = diabetes.data[:-20]
diabetes_X_test = diabetes.data[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=10)
rf.fit(diabetes_X_train, diabetes_y_train)
memo_time = []
x = diabetes_X_test[:1]
memo_time.append(timeexe("sklearn-rf", "rf.predict(x)", repeat=100, number=20))
"""
Explanation: Ces résultats sont d'une façon générale assez volatile car le temps de calcul est enrobé dans plusieurs fonctions Python qui rendent une mesure précise difficile. Il reste néanmoins une bonne idée des ordres de grandeurs.
Random Forest
On reproduit les mêmes résultats pour une random forest mais la réécriture n'est plus aussi simple qu'une régression linéaire.
Une prédiction à la fois
End of explanation
"""
if ok_onnx:
onnxrf_model = convert_sklearn(
rf, 'model', [('input', FloatTensorType([None, clr.coef_.shape[0]]))],
target_opset=11)
onnxrf_model.ir_version = 6
save_model(onnxrf_model, 'model_rf.onnx')
model_onnx = onnx.load('model_rf.onnx')
if ok_onnx:
sess = onnxruntime.InferenceSession("model_rf.onnx")
for i in sess.get_inputs():
print('Input:', i)
for o in sess.get_outputs():
print('Output:', o)
def predict_onnxrt_rf(x):
return sess.run(["variable"], {'input': x})
print(predict_onnxrt_rf(x.astype(numpy.float32)))
memo_time.append(timeexe("onnx-rf", "predict_onnxrt_rf(x.astype(numpy.float32))",
repeat=100, number=20))
"""
Explanation: C'est beaucoup plus long que la régression linéaire. On essaye avec onnx.
End of explanation
"""
import pandas
df2 = pandas.DataFrame(data=memo_time)
df2 = df2.set_index("legend").sort_values("average")
df2
fig, ax = plt.subplots(1, 1, figsize=(14,4))
df2[["average", "deviation"]].plot(kind="barh", logx=True, ax=ax, xerr="deviation",
legend=False, fontsize=12, width=0.8)
ax.set_ylabel("")
ax.grid(b=True, which="major")
ax.grid(b=True, which="minor");
"""
Explanation: C'est beaucoup plus rapide.
End of explanation
"""
memo = []
batch = [1, 10, 100, 200, 500, 1000, 2000, 3000, 4000, 5000, 10000,
20000, 50000, 75000, 100000, 150000, 200000, 300000, 400000,
500000, 600000]
number = 10
repeat = 10
for i in batch[:15]:
if i <= diabetes_X_test.shape[0]:
mx = diabetes_X_test[:i]
else:
mxs = [diabetes_X_test] * (i // diabetes_X_test.shape[0] + 1)
mx = numpy.vstack(mxs)
mx = mx[:i]
print("batch", "=", i)
memo.append(timeexe("sklearn.predict %d" % i, "rf.predict(mx)",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "sklearn"
if ok_onnx:
memo.append(timeexe("onnxruntime %d" % i,
"predict_onnxrt_rf(mx.astype(numpy.float32))",
repeat=repeat, number=number))
memo[-1]["batch"] = i
memo[-1]["lib"] = "onnxruntime"
dfbrf = pandas.DataFrame(memo)[["average", "lib", "batch"]]
pivrf = dfbrf.pivot("batch", "lib", "average")
for c in pivrf.columns:
pivrf["ave_" + c] = pivrf[c] / pivrf.index
libs = list(c for c in pivrf.columns if "ave_" in c)
ax = pivrf.plot(y=libs, logy=True, logx=True, figsize=(10, 5))
ax.set_title("Evolution du temps de prédiction selon la taille du batch\nrandom forest")
ax.grid(True);
"""
Explanation: Prédiction en batch
End of explanation
"""
|
turbomanage/training-data-analyst
|
courses/machine_learning/deepdive2/text_classification/labs/reusable_embeddings.ipynb
|
apache-2.0
|
import os
from google.cloud import bigquery
import pandas as pd
%load_ext google.cloud.bigquery
"""
Explanation: Reusable Embeddings
Learning Objectives
1. Learn how to use a pre-trained TF Hub text modules to generate sentence vectors
1. Learn how to incorporate a pre-trained TF-Hub module into a Keras model
1. Learn how to deploy and use a text model on CAIP
Introduction
In this notebook, we will implement text models to recognize the probable source (Github, Tech-Crunch, or The New-York Times) of the titles we have in the title dataset.
First, we will load and pre-process the texts and labels so that they are suitable to be fed to sequential Keras models with first layer being TF-hub pre-trained modules. Thanks to this first layer, we won't need to tokenize and integerize the text before passing it to our models. The pre-trained layer will take care of that for us, and consume directly raw text. However, we will still have to one-hot-encode each of the 3 classes into a 3 dimensional basis vector.
Then we will build, train and compare simple DNN models starting with different pre-trained TF-Hub layers.
End of explanation
"""
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # Replace with your REGION
SEED = 0
"""
Explanation: Replace the variable values in the cell below:
End of explanation
"""
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
"""
Explanation: Create a Dataset from BigQuery
Hacker news headlines are available as a BigQuery public dataset. The dataset contains all headlines from the sites inception in October 2006 until October 2015.
Here is a sample of the dataset:
End of explanation
"""
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
"""
Explanation: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http://mobile.nytimes.com/...., I want to be left with <i>nytimes</i>
End of explanation
"""
regex = '.*://(.[^/]+)/'
sub_query = """
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
""".format(regex)
query = """
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
""".format(sub_query=sub_query)
print(query)
"""
Explanation: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
End of explanation
"""
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
"""
Explanation: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
End of explanation
"""
print("The full dataset contains {n} titles".format(n=len(title_dataset)))
"""
Explanation: AutoML for text classification requires that
* the dataset be in csv form with
* the first column being the texts to classify or a GCS path to the text
* the last colum to be the text labels
The dataset we pulled from BiqQuery satisfies these requirements.
End of explanation
"""
title_dataset.source.value_counts()
"""
Explanation: Let's make sure we have roughly the same number of labels for each of our three labels:
End of explanation
"""
DATADIR = './data/'
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = 'titles_full.csv'
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding='utf-8')
"""
Explanation: Finally we will save our data, which is currently in-memory, to disk.
We will create a csv file containing the full dataset and another containing only 1000 articles for development.
Note: It may take a long time to train AutoML on the full dataset, so we recommend to use the sample dataset for the purpose of learning the tool.
End of explanation
"""
sample_title_dataset = title_dataset.sample(n=1000)
sample_title_dataset.source.value_counts()
"""
Explanation: Now let's sample 1000 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
End of explanation
"""
SAMPLE_DATASET_NAME = 'titles_sample.csv'
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding='utf-8')
import datetime
import os
import shutil
import pandas as pd
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
from tensorflow_hub import KerasLayer
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
print(tf.__version__)
%matplotlib inline
"""
Explanation: Let's write the sample datatset to disk.
End of explanation
"""
MODEL_DIR = "./text_models"
DATA_DIR = "./data"
"""
Explanation: Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located:
End of explanation
"""
ls ./data/
DATASET_NAME = "titles_full.csv"
TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME)
COLUMNS = ['title', 'source']
titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS)
titles_df.head()
"""
Explanation: Loading the dataset
As in the previous labs, our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, Tech-Crunch, or the New-York Times):
End of explanation
"""
titles_df.source.value_counts()
"""
Explanation: Let's look again at the number of examples per label to make sure we have a well-balanced dataset:
End of explanation
"""
CLASSES = {
'github': 0,
'nytimes': 1,
'techcrunch': 2
}
N_CLASSES = len(CLASSES)
def encode_labels(sources):
classes = [CLASSES[source] for source in sources]
one_hots = to_categorical(classes, num_classes=N_CLASSES)
return one_hots
encode_labels(titles_df.source[:4])
"""
Explanation: Preparing the labels
In this lab, we will use pre-trained TF-Hub embeddings modules for english for the first layer of our models. One immediate
advantage of doing so is that the TF-Hub embedding module will take care for us of processing the raw text.
This also means that our model will be able to consume text directly instead of sequences of integers representing the words.
However, as before, we still need to preprocess the labels into one-hot-encoded vectors:
End of explanation
"""
N_TRAIN = int(len(titles_df) * 0.95)
titles_train, sources_train = (
titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN])
titles_valid, sources_valid = (
titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:])
"""
Explanation: Preparing the train/test splits
Let's split our data into train and test splits:
End of explanation
"""
sources_train.value_counts()
sources_valid.value_counts()
"""
Explanation: To be on the safe side, we verify that the train and test splits
have roughly the same number of examples per class.
Since it is the case, accuracy will be a good metric to use to measure
the performance of our models.
End of explanation
"""
X_train, Y_train = titles_train.values, encode_labels(sources_train)
X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid)
X_train[:3]
Y_train[:3]
"""
Explanation: Now let's create the features and labels we will feed our models with:
End of explanation
"""
NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2"
nnlm_module = KerasLayer(# TODO)
"""
Explanation: NNLM Model
We will first try a word embedding pre-trained using a Neural Probabilistic Language Model. TF-Hub has a 50-dimensional one called
nnlm-en-dim50-with-normalization, which also
normalizes the vectors produced.
Lab Task 1a: Import NNLM TF Hub module into KerasLayer
Once loaded from its url, the TF-hub module can be used as a normal Keras layer in a sequential or functional model. Since we have enough data to fine-tune the parameters of the pre-trained embedding itself, we will set trainable=True in the KerasLayer that loads the pre-trained embedding:
End of explanation
"""
nnlm_module(tf.constant([# TODO]))
"""
Explanation: Note that this TF-Hub embedding produces a single 50-dimensional vector when passed a sentence:
Lab Task 1b: Use module to encode a sentence string
End of explanation
"""
SWIVEL = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim-with-oov/1"
swivel_module = KerasLayer(# TODO)
"""
Explanation: Swivel Model
Then we will try a word embedding obtained using Swivel, an algorithm that essentially factorizes word co-occurrence matrices to create the words embeddings.
TF-Hub hosts the pretrained gnews-swivel-20dim-with-oov 20-dimensional Swivel module.
Lab Task 1c: Import Swivel TF Hub module into KerasLayer
End of explanation
"""
swivel_module(tf.constant([# TODO]))
"""
Explanation: Similarly as the previous pre-trained embedding, it outputs a single vector when passed a sentence:
Lab Task 1d: Use module to encode a sentence string
End of explanation
"""
def build_model(hub_module, name):
model = Sequential([
# TODO
Dense(16, activation='relu'),
Dense(N_CLASSES, activation='softmax')
], name=name)
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
return model
"""
Explanation: Building the models
Let's write a function that
takes as input an instance of a KerasLayer (i.e. the swivel_module or the nnlm_module we constructed above) as well as the name of the model (say swivel or nnlm)
returns a compiled Keras sequential model starting with this pre-trained TF-hub layer, adding one or more dense relu layers to it, and ending with a softmax layer giving the probability of each of the classes:
Lab Task 2: Incorporate a pre-trained TF Hub module as first layer of Keras Sequential Model
End of explanation
"""
def train_and_evaluate(train_data, val_data, model, batch_size=5000):
X_train, Y_train = train_data
tf.random.set_seed(33)
model_dir = os.path.join(MODEL_DIR, model.name)
if tf.io.gfile.exists(model_dir):
tf.io.gfile.rmtree(model_dir)
history = model.fit(
X_train, Y_train,
epochs=100,
batch_size=batch_size,
validation_data=val_data,
callbacks=[EarlyStopping(), TensorBoard(model_dir)],
)
return history
"""
Explanation: Let's also wrap the training code into a train_and_evaluate function that
* takes as input the training and validation data, as well as the compiled model itself, and the batch_size
* trains the compiled model for 100 epochs at most, and does early-stopping when the validation loss is no longer decreasing
* returns an history object, which will help us to plot the learning curves
End of explanation
"""
data = (X_train, Y_train)
val_data = (X_valid, Y_valid)
nnlm_model = build_model(nnlm_module, 'nnlm')
nnlm_history = train_and_evaluate(data, val_data, nnlm_model)
history = nnlm_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
"""
Explanation: Training NNLM
End of explanation
"""
swivel_model = build_model(swivel_module, name='swivel')
swivel_history = train_and_evaluate(data, val_data, swivel_model)
history = swivel_history
pd.DataFrame(history.history)[['loss', 'val_loss']].plot()
pd.DataFrame(history.history)[['accuracy', 'val_accuracy']].plot()
"""
Explanation: Training Swivel
End of explanation
"""
OUTPUT_DIR = "./savedmodels"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, 'swivel')
os.environ['EXPORT_PATH'] = EXPORT_PATH
shutil.rmtree(EXPORT_PATH, ignore_errors=True)
tf.saved_model.save(swivel_model, EXPORT_PATH)
"""
Explanation: Swivel trains faster but achieves a lower validation accuracy, and requires more epochs to train on.
Deploying the model
The first step is to serialize one of our trained Keras model as a SavedModel:
End of explanation
"""
%%bash
# TODO 5
PROJECT=# TODO: Change this to your PROJECT
BUCKET=${PROJECT}
REGION=us-east1
MODEL_NAME=title_model
VERSION_NAME=swivel
EXPORT_PATH=$EXPORT_PATH
if [[ $(gcloud ai-platform models list --format='value(name)' | grep $MODEL_NAME) ]]; then
echo "$MODEL_NAME already exists"
else
echo "Creating $MODEL_NAME"
gcloud ai-platform models create --regions=$REGION $MODEL_NAME
fi
if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep $VERSION_NAME) ]]; then
echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... "
echo yes | gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME
echo "Please run this cell again if you don't see a Creating message ... "
sleep 2
fi
echo "Creating $MODEL_NAME:$VERSION_NAME"
gcloud beta ai-platform versions create $VERSION_NAME\
--model=$MODEL_NAME \
--framework=# TODO \
--python-version=# TODO \
--runtime-version=1.15 \
--origin=# TODO \
--staging-bucket=# TODO\
--machine-type n1-standard-4
"""
Explanation: Then we can deploy the model using the gcloud CLI as before:
Lab Task 3a: Complete the following script to deploy the swivel model
End of explanation
"""
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
"""
Explanation: Before we try our deployed model, let's inspect its signature to know what to send to the deployed API:
End of explanation
"""
%%writefile input.json
{# TODO}
!gcloud ai-platform predict \
--model title_model \
--json-instances input.json \
--version swivel
"""
Explanation: Let's go ahead and hit our model:
Lab Task 3b: Create the JSON object to send a title to the API you just deployed
(Hint: Look at the 'saved_model_cli show' command output above.)
End of explanation
"""
|
luofan18/deep-learning
|
tv-script-generation/dlnd_tv_script_generation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
"""
Explanation: TV Script Generation
In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern.
Get the Data
The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc..
End of explanation
"""
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
vocabs = set(text)
vocab_to_int = {word: i for i, word in enumerate(vocabs)}
int_to_vocab = {i: word for i, word in enumerate(vocabs)}
return vocab_to_int, int_to_vocab
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
"""
Explanation: Implement Preprocessing Functions
The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below:
- Lookup Table
- Tokenize Punctuation
Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call vocab_to_int
- Dictionary to go from the id to word, we'll call int_to_vocab
Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab)
End of explanation
"""
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
return {
'.': '||period||',
',': '||comma||',
'"': '||quotationMark||',
';': '||semicolon||',
'!': '||exclamationMark||',
'?': '||questionMark||',
'(': '||leftParentheses||',
')': '||rightParentheses||',
'--': '||dash||',
'\n': '||return||'
}
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
"""
Explanation: Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!".
Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( . )
- Comma ( , )
- Quotation Mark ( " )
- Semicolon ( ; )
- Exclamation mark ( ! )
- Question mark ( ? )
- Left Parentheses ( ( )
- Right Parentheses ( ) )
- Dash ( -- )
- Return ( \n )
This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||".
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a RNN by implementing the following functions below:
- get_inputs
- get_init_cell
- get_embed
- build_rnn
- build_nn
- get_batches
Check the Version of TensorFlow and Access to GPU
End of explanation
"""
def get_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
"""
# TODO: Implement Function
input_ = tf.placeholder(tf.int32, (None, None), name='input')
targets = tf.placeholder(tf.int32, (None, None), name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
return input_, targets, learning_rate
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_inputs(get_inputs)
"""
Explanation: Input
Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
- Input text placeholder named "input" using the TF Placeholder name parameter.
- Targets placeholder
- Learning Rate placeholder
Return the placeholders in the following tuple (Input, Targets, LearningRate)
End of explanation
"""
def get_init_cell(batch_size, rnn_size):
"""
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
"""
# TODO: Implement Function
lstm_layers = 2
def make_cell(rnn_size):
return tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for i in range(lstm_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
initial_state = tf.identity(initial_state, name='initial_state')
return cell, initial_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_init_cell(get_init_cell)
"""
Explanation: Build RNN Cell and Initialize
Stack one or more BasicLSTMCells in a MultiRNNCell.
- The Rnn size should be set using rnn_size
- Initalize Cell State using the MultiRNNCell's zero_state() function
- Apply the name "initial_state" to the initial state using tf.identity()
Return the cell and initial state in the following tuple (Cell, InitialState)
End of explanation
"""
def get_embed(input_data, vocab_size, embed_dim):
"""
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
"""
# TODO: Implement Function
with tf.name_scope(name='embedding'):
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1, name='embeding_w'))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_embed(get_embed)
"""
Explanation: Word Embedding
Apply embedding to input_data using TensorFlow. Return the embedded sequence.
End of explanation
"""
def build_rnn(cell, inputs):
"""
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
"""
# TODO: Implement Function
rnn_out, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return rnn_out, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_rnn(build_rnn)
"""
Explanation: Build RNN
You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN.
- Build the RNN using the tf.nn.dynamic_rnn()
- Apply the name "final_state" to the final state using tf.identity()
Return the outputs and final_state state in the following tuple (Outputs, FinalState)
End of explanation
"""
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
"""
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
"""
# TODO: Implement Function
embed = get_embed(input_data, vocab_size, embed_dim)
rnn_out, final_state = build_rnn(cell, embed)
out = tf.layers.dense(rnn_out, vocab_size, activation=None)
return out, final_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_build_nn(build_nn)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
- Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function.
- Build RNN using cell and your build_rnn(cell, inputs) function.
- Apply a fully connected layer with a linear activation and vocab_size as the number of outputs.
Return the logits and final state in the following tuple (Logits, FinalState)
End of explanation
"""
def get_batches(int_text, batch_size, seq_length):
"""
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
"""
# TODO: Implement Function
batch_nums = len(int_text) // (batch_size * seq_length)
train_text = int_text[:batch_nums*batch_size*seq_length]
target_text = np.zeros_like(train_text)
target_text[:-1] = int_text[1:batch_nums*batch_size*seq_length]
target_text[-1] = int_text[0]
train_text = np.array(train_text)
target_text = np.array(target_text)
train_batch = np.reshape(train_text, (batch_nums, batch_size, seq_length))
target_batch = np.reshape(target_text, (batch_nums, batch_size, seq_length))
batches = np.zeros((batch_nums, 2, batch_size, seq_length))
for num_i in range(batch_nums):
# fill in data
for num_j in range(batch_size):
idx = num_i * batch_size + num_j
batches[idx%batch_nums, 0, idx//batch_nums] = train_batch[num_i, num_j]
batches[idx%batch_nums, 1, idx//batch_nums] = target_batch[num_i, num_j]
return batches
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_batches(get_batches)
# print (get_batches(np.array(range(1, 19)), 3, 2))
"""
Explanation: Batches
Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements:
- The first element is a single batch of input with the shape [batch size, sequence length]
- The second element is a single batch of targets with the shape [batch size, sequence length]
If you can't fill the last batch with enough data, drop the last batch.
For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following:
```
[
# First Batch
[
# Batch of Input
[[ 1 2], [ 7 8], [13 14]]
# Batch of targets
[[ 2 3], [ 8 9], [14 15]]
]
# Second Batch
[
# Batch of Input
[[ 3 4], [ 9 10], [15 16]]
# Batch of targets
[[ 4 5], [10 11], [16 17]]
]
# Third Batch
[
# Batch of Input
[[ 5 6], [11 12], [17 18]]
# Batch of targets
[[ 6 7], [12 13], [18 1]]
]
]
```
Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive.
End of explanation
"""
# Number of Epochs
num_epochs = 1000
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 256
# Sequence Length
seq_length = 64
# Learning Rate
learning_rate = 0.001
# Show stats for every n number of batches
show_every_n_batches = 32
tf.reset_default_graph()
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
save_dir = './save'
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set num_epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set embed_dim to the size of the embedding.
Set seq_length to the length of sequence.
Set learning_rate to the learning rate.
Set show_every_n_batches to the number of batches the neural network should print progress.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
"""
Explanation: Save Parameters
Save seq_length and save_dir for generating a new TV script.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def get_tensors(loaded_graph):
"""
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
"""
# TODO: Implement Function
return (
loaded_graph.get_tensor_by_name('input:0'),
loaded_graph.get_tensor_by_name('initial_state:0'),
loaded_graph.get_tensor_by_name('final_state:0'),
loaded_graph.get_tensor_by_name('probs:0')
)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_get_tensors(get_tensors)
"""
Explanation: Implement Generate Functions
Get Tensors
Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names:
- "input:0"
- "initial_state:0"
- "final_state:0"
- "probs:0"
Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
End of explanation
"""
def pick_word(probabilities, int_to_vocab):
"""
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
"""
# TODO: Implement Function
return int_to_vocab[np.random.choice(range(len(int_to_vocab)), p=probabilities)]
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_pick_word(pick_word)
"""
Explanation: Choose Word
Implement the pick_word() function to select the next word using probabilities.
End of explanation
"""
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
"""
Explanation: Generate TV Script
This will generate the TV script for you. Set gen_length to the length of TV script you want to generate.
End of explanation
"""
|
GoogleCloudPlatform/ml-design-patterns
|
02_data_representation/text_embeddings.ipynb
|
apache-2.0
|
import tensorflow as tf
import tensorflow_hub as tfhub
model = tf.keras.Sequential()
model.add(tfhub.KerasLayer("https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1",
output_shape=[20], input_shape=[], dtype=tf.string))
model.summary()
model.predict(["""
Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially. At the stroke of the midnight hour, when the world sleeps, India will awake to life and freedom.
A moment comes, which comes but rarely in history, when we step out from the old to the new -- when an age ends, and when the soul of a nation, long suppressed, finds utterance.
"""])
"""
Explanation: Document embeddings in BigQuery
This notebook shows how to do use a pre-trained embedding as a vector representation of a natural language text column.
Given this embedding, we can use it in machine learning models.
Embedding model for documents
We're going to use a model that has been pretrained on Google News. Here's an example of how it works in Python. We will use it directly in BigQuery, however.
End of explanation
"""
%%bash
BUCKET=ai-analytics-solutions-kfpdemo # CHANGE AS NEEDED
rm -rf tmp
mkdir tmp
FILE=swivel.tar.gz
wget --quiet -O tmp/swivel.tar.gz https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1?tf-hub-format=compressed
cd tmp
tar xvfz swivel.tar.gz
cd ..
mv tmp swivel
gsutil -m cp -R swivel gs://${BUCKET}/swivel
rm -rf swivel
echo "Model artifacts are now at gs://${BUCKET}/swivel/*"
"""
Explanation: Loading model into BigQuery
The Swivel model above is already available in SavedModel format. But we need it on Google Cloud Storage before we can load it into BigQuery.
End of explanation
"""
%%bigquery
CREATE OR REPLACE MODEL advdata.swivel_text_embed
OPTIONS(model_type='tensorflow', model_path='gs://ai-analytics-solutions-kfpdemo/swivel/*')
"""
Explanation: Let's load the model into a BigQuery dataset named advdata (create it if necessary)
End of explanation
"""
%%bigquery
SELECT output_0 FROM
ML.PREDICT(MODEL advdata.swivel_text_embed,(
SELECT "Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially." AS sentences))
"""
Explanation: From the BigQuery web console, click on "schema" tab for the newly loaded model. We see that the input is called sentences and the output is called output_0:
<img src="swivel_schema.png" />
End of explanation
"""
%%bigquery
CREATE OR REPLACE TABLE advdata.comments_embedding AS
SELECT
output_0 as comments_embedding,
comments
FROM ML.PREDICT(MODEL advdata.swivel_text_embed,(
SELECT comments, LOWER(comments) AS sentences
FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports`
))
"""
Explanation: Create lookup table
Let's create a lookup table of embeddings. We'll use the comments field of a storm reports table from NOAA.
This is an example of the Feature Store design pattern.
End of explanation
"""
|
usantamaria/iwi131
|
ipynb/22-Actividad-DiccionariosYConjuntos/Actividad4.ipynb
|
cc0-1.0
|
# Cargar datos
miembros_fsociety= {
'Eliott': [(2015, 10, 20), 'Seguridad', 'New York',
{'Darlene'}],
'Darlene': [(2013, 3, 22), 'Malware', 'New York',
{'Eliott', 'Cisco'}],
'Cisco': [(2012, 2, 4), 'Sistemas Distribuidos', 'San Francisco',
{'Darlene', 'Romero', 'Eliott'}],
'Mr. Robot': [(2003, 5, 3), 'Analisis de Datos', 'New York',
{'Eliott', 'Darlene', 'Romero', 'Cisco'}]
# ...
}
"""
Explanation: <header class="w3-container w3-teal">
<img src="images/utfsm.png" alt="" align="left"/>
<img src="images/inf.png" alt="" align="right"/>
</header>
<br/><br/><br/><br/><br/>
IWI131
Programación de Computadores
Sebastián Flores
http://progra.usm.cl/
https://www.github.com/usantamaria/iwi131
Grupos
ALMEIDA, CURIHUAL, ARRATIA
ALVARADO, ETEROVIC, CANALES
GALLARDO, REQUENA, CODDOU
PROBOSTE, REYES, SANDOVAL
SCHWERTER, ZEGERS, TORREBLANCA
CATALAN, CHAURA, SALAS
COLLAO, ESTRADA, BAHAMONDE
DURÁN, MORALES,CANTILLANA
GRUNERT, KANZUA, CASTRO
LOPEZ, LOPEZ, ESTAY
MEDIANO, LOBOS, FLORES
OGALDE, CARRIEL, HERRERA
SANTELICES, PÉREZ
CASTILLO, ARÉVALO
Revisión de la Actividad 4 - Diccionarios y Conjuntos
Enunciado
El lider del grupo de hackers ’fsociety’ guarda la información de sus miembros en un diccionario con la siguiente información: apodo_miembro: [fecha_ingreso_al_grupo, experticia, ciudad_de_residencia, cjto_conocidos].
El conjunto cjto_conocidos muestra todos los miembros que conoce el hacker: {miembro_1, miembro_2, ..., miembro_n}. Que un miembro conozca la verdadera identidad de otro miembro no significa que el otro miembro conozca la del primero.
Como ejemplo, considere la siguiente estructura:
End of explanation
"""
# Estructura: apodo_miembro: [fecha_ingreso, experticia, ciudad, cjto_conocidos]
def miembro_con_mas_conocidos(miembros):
n_mas_conocidos = 0
apodo_mas_conocidos = ""
for apodo, datos in miembros.items():
n_conocidos = len(datos[-1])
if n_conocidos>n_mas_conocidos:
n_mas_conocidos = n_conocidos
apodo_mas_conocidos = apodo
return apodo_mas_conocidos
print miembro_con_mas_conocidos(miembros_fsociety)
"""
Explanation: Pregunta 1
Se necesita conocer el apodo del miembro con mayor número de conocidos.
End of explanation
"""
# Estructura: apodo_miembro: [fecha_ingreso, experticia, ciudad, cjto_conocidos]
def actualizar_miembros(miembros, apodo_viejo, apodo_nuevo, ciudad_nueva):
for apodo, datos in miembros.items():
# Cambiar todas las instancias del apodo viejo
cjto_conocidos = datos[-1]
if apodo_viejo in cjto_conocidos:
cjto_conocidos.remove(apodo_viejo)
cjto_conocidos.add(apodo_nuevo)
# Actualizar registro del miembro
if apodo==apodo_viejo:
fecha, experticia, ciudad_vieja, cjto_conocidos = datos
dato_nuevo = (fecha, experticia, ciudad_nueva, cjto_conocidos)
del miembros[apodo_viejo]
miembros[apodo_nuevo] = dato_nuevo
return miembros
for key, val in miembros_fsociety.items():
print key, ":", val
print ""
miembros_fsociety = actualizar_miembros(miembros_fsociety, "Darlene", "Leia", "Valparaiso")
for key, val in miembros_fsociety.items():
print key, ":", val
"""
Explanation: Pregunta 2
Cuando algun miembro del grupo corre peligro de ser descubierto es necesario borrar el
registro de ese miembro, cambiarle el apodo por otro y trasladarlo de ciudad. Debe recordar
actualizar el diccionario completo.
End of explanation
"""
# Estructura: apodo_miembro: [fecha_ingreso, experticia, ciudad, cjto_conocidos]
def miembros_segun_fecha(miembros, fecha_min, fecha_max):
miembros_en_fechas = []
for apodo, datos in miembros.items():
fecha, experticia, ciudad_vieja, cjto_conocidos = datos
if fecha_min <= fecha <= fecha_max:
miembros_en_fechas.append(apodo)
return miembros_en_fechas
fecha_min = (2000, 1, 1)
fecha_max = (2016, 1, 1)
print miembros_segun_fecha(miembros_fsociety, fecha_min, fecha_max)
fecha_min = (2012, 2, 5)
fecha_max = (2015, 10, 20)
print miembros_segun_fecha(miembros_fsociety, fecha_min, fecha_max)
"""
Explanation: Pregunta 3
Se necesita conocer los nombres de todos los miembros que hayan ingresado a la organizacion
entre 2 fechas (inclusive).
End of explanation
"""
|
CtheDataIO-sdpenaloza/Kaggle-Titanic-Machine-Learning-from-Disaster
|
Manual-Titanic/Kaggle - Titanic - Manual.ipynb
|
gpl-3.0
|
# Imports for pandas, and numpy
import numpy as np
import pandas as pd
# imports for seaborn to and matplotlib to allow graphing
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid")
%matplotlib inline
# import Titanic CSV - NOTE: adjust file path as neccessary
dTitTrain_DF = pd.read_csv('train.csv')
# Clearing of Columns not neccesary for statistical analysis
dTitTrain_DF = dTitTrain_DF.drop(["Name", "Ticket"], axis=1)
dTitTrain_DF.info()
dTitTrain_DF.describe()
titAge = dTitTrain_DF.dropna(subset=['Age'])
# Distribution gender (adult and male)
ACmenData = dTitTrain_DF[dTitTrain_DF.Sex == 'male']
ACwomenData = dTitTrain_DF[dTitTrain_DF.Sex == 'female']
ACmenDataCount = float(ACmenData['Sex'].count())
ACwomenDataCount = float(ACwomenData['Sex'].count())
# Gender Specific DFs
AmenData = dTitTrain_DF[dTitTrain_DF.Sex == 'male'][dTitTrain_DF.Age >= 21]
AwomenData = dTitTrain_DF[dTitTrain_DF.Sex == 'female'][dTitTrain_DF.Age >= 21]
AmenDataCount = float(AmenData['Sex'].count())
AwomenDataCount = float(AwomenData['Sex'].count())
# print(menDataCount)
# print(womenDataCount)
# Age Specific Groups
adultData = titAge[titAge.Age >= 21]
childData = titAge[titAge.Age < 21]
adultDataCount = float(adultData['Age'].count())
childDataCount = float(childData['Age'].count())
#print(childDataCount)
#print(adultDataCount)
# Pclass
titClass1 = dTitTrain_DF[dTitTrain_DF.Pclass == 1]
titClass2 = dTitTrain_DF[dTitTrain_DF.Pclass == 2]
titClass3 = dTitTrain_DF[dTitTrain_DF.Pclass == 3]
# Alone or Family
dTitTrain_DF['SoloOrFamily'] = dTitTrain_DF.SibSp + dTitTrain_DF.Parch
dTitTrain_DF['SoloOrFamily'].loc[dTitTrain_DF['SoloOrFamily'] > 0] = 'Family'
dTitTrain_DF['SoloOrFamily'].loc[dTitTrain_DF['SoloOrFamily'] == 0] = 'Alone'
# Survivor Column (Yes or no)
dTitTrain_DF['Survivor']= dTitTrain_DF.Survived.map({0:'No', 1:'Yes'})
titCabin = dTitTrain_DF.dropna(subset=['Cabin'])
# Locational Data Groups
titDecks = titCabin['Cabin']
def deckGrab(tDK, cabLetter):
deckLevels = []
for level in tDK:
deckLevels.append(level[0])
TDF = pd.DataFrame(deckLevels)
TDF.columns = ['Cabin']
TDF = TDF[TDF.Cabin == cabLetter]
return TDF
def deckCount(tDK, cabLetter):
TDF = deckGrab(tDK, cabLetter)
return TDF[TDF.Cabin == cabLetter].count()['Cabin']
# print(deckCount(titDecks, "A"))
# print(deckCount(titDecks, "B"))
# print(deckCount(titDecks, "C"))
# print(deckCount(titDecks, "D"))
# print(deckCount(titDecks, "E"))
# print(deckCount(titDecks, "F"))
# print(deckCount(titDecks, "G"))
# embarked
titCherbourg = dTitTrain_DF[dTitTrain_DF.Embarked == 'C']
titQueenstown = dTitTrain_DF[dTitTrain_DF.Embarked == 'Q']
titSouthampton = dTitTrain_DF[dTitTrain_DF.Embarked == 'S']
"""
Explanation: Table of Contents:
Distribution of Passengers:
<a href="#Data Organization (Data Wrangling)">Data Organization (Data Wrangling)</a>
<a href="#Gender - Analysis | Graph">Gender</a>
<a href="#Age - Analysis | Graph">Age</a>
<a href="#Alone or Family">Alone or Family</a>
<a href="#Locational: Cabin Analysis | Graph">Locational (Cabin)</a>
<a href="#Locational: Disembark Analysis | Graph">Locational (Disembark)</a>
Surivival Graph Comparison:
<a href="#Survival Count (Overall)">Survival Count (Overall)</a>
<a href="#Survival by Gender">Survival By Gender</a>
<a href="#Survival by Pclass">Survival By Pclass</a>
<a href="#Survival Pclass and Gender">Survival By Pclass and Gender</a>
<a href="#Survival By Pclass and Age Group (Adult (Male / Female) / Child)">Survival By Pclass and Age Group (Adult (Male / Female) / Child)</a>
<a href="#Survival by Age Distribution">Survival by Age Distribution</a>
<a href="#Survival by Alone or with Family">Survival by Alone or with Family</a>
<a href="#Survival pClass by Age Distribution">Survival pClass by Age Distribution</a>
<a href="#Survival Gender by Age Distribution">Survival Gender by Age Distribution</a>
Process CSV - Generation of Estimation Survival Table
In Process
<a id="Data Organization (Data Wrangling)"></a>
Data Organization (Data Wrangling)
End of explanation
"""
printG = "Men account for " + str(ACmenDataCount) + " and " + "Women account for " + str(ACwomenDataCount) + " (Total Passengers: " + str(dTitTrain_DF.count()['Age']) + ")"
print(printG)
gSSC = sns.factorplot('Sex', data=dTitTrain_DF, kind='count')
gSSC.despine(left=True)
gSSC.set_ylabels("count of passengers")
"""
Explanation: Distribution of Passengers
Gender - Analysis | Graph
<a id="Gender - Analysis | Graph"></a>
Distribution of Genders in Passenger Population
End of explanation
"""
gGCSC= sns.factorplot('Pclass',order=[1,2,3], data=dTitTrain_DF, hue='Sex', kind='count')
gGCSC.despine(left=True)
gGCSC.set_ylabels("count of passengers")
"""
Explanation: Distribution of Genders in pClass populations
End of explanation
"""
printA = "Youngest Passenger in the passenger list was " + str(titAge['Age'].min()) + " years of age." \
+ "\n" + "Oldest Passenger in the passenger list was " + str(titAge['Age'].max()) + " years of age." \
+ "\n" + "Mean of Passengers ages in the passenger list is " + str(titAge['Age'].mean()) + " years of age."
print(printA)
"""
Explanation: Age - Analysis | Graph
<a id="Age - Analysis | Graph"></a>
End of explanation
"""
titAge['Age'].hist(bins=80)
"""
Explanation: Distrbution of Age in passenger population
End of explanation
"""
gCPS = sns.FacetGrid(titAge,hue='Pclass', aspect=4, hue_order=[1,2,3])
gCPS.map(sns.kdeplot,'Age', shade=True)
gCPS.set(xlim=(0,titAge['Age'].max()))
gCPS.add_legend()
"""
Explanation: Distribution of Age in pClass population
End of explanation
"""
# splits passengers into 3 categories (male of female if considered adult, and child if below 21 of age)
def minorOrAdult(passenger):
age, sex = passenger
if age < 21:
return 'child'
else:
return sex
# adds new column to dataframe that distinguishes a passenger as a child or an adult
dTitTrain_DF['PersonStatus'] = dTitTrain_DF[['Age', 'Sex']].apply(minorOrAdult, axis=1)
dTitTrain_DF['PersonStatus'].value_counts()
"""
Explanation: Distribution of passengers into adult and children age groups (Child = less than 21 years of age)
Reference:
Source: http://history.stackexchange.com/questions/17481/what-was-the-age-of-majority-in-1900-united-states
By the common law the age of majority is fixed at twenty-one years for both sexes, and, in the absence of any statute to >the contrary, every person under that age, whether male or female, is an infant. (21)
-- The American and English Encyclopedia of Law, Garland and McGeehee, 1900
By the common law, every person is, technically, an infant, until he is twenty-one years old; and, in legal presumption, is >not of sufficient discretion to contract an obligation at an earlier age.
-- Institutes of the Lawes of England by Coke (1628-1644). The laws on infants are at 171b.
End of explanation
"""
gACPS = sns.FacetGrid(dTitTrain_DF, hue='PersonStatus', aspect=4, hue_order=['child', 'male', 'female'])
gACPS.map(sns.kdeplot,'Age', shade=True)
gACPS.set(xlim=(0,titAge['Age'].max()))
gACPS.add_legend()
"""
Explanation: Distribution of child and adult (male and female) age groups by age
End of explanation
"""
gGAC= sns.factorplot('Pclass', order=[1,2,3], data=dTitTrain_DF, hue='PersonStatus', kind='count',hue_order=['child','male','female'])
gGAC.despine(left=True)
gGAC.set_ylabels("count of passengers")
"""
Explanation: Distribution of child and adult (male and female) by pClass
End of explanation
"""
sns.factorplot('SoloOrFamily', data=dTitTrain_DF, kind='count')
print("Alone: " + str(dTitTrain_DF[dTitTrain_DF.SoloOrFamily == "Alone"].count()['SoloOrFamily']))
print("Family: " + str(dTitTrain_DF[dTitTrain_DF.SoloOrFamily == "Family"].count()['SoloOrFamily']))
"""
Explanation: Alone or Family
<a id="Alone or Family"></a>
End of explanation
"""
def prepareDeckGraph(titDecksDF):
deckLevels = []
for level in titDecksDF:
deckLevels.append(level[0])
T_DF = pd.DataFrame(deckLevels)
T_DF.columns = ['Cabin']
T_DF = T_DF[T_DF.Cabin != 'T']
return T_DF
gTD_DF = prepareDeckGraph(titDecks)
sns.factorplot('Cabin', order=['A','B','C','D','E','F','G'], data=gTD_DF, kind='count')
print("A: " + str(deckCount(titDecks, "A")))
print("B: " + str(deckCount(titDecks, "B")))
print("C: " + str(deckCount(titDecks, "C")))
print("D: " + str(deckCount(titDecks, "D")))
print("E: " + str(deckCount(titDecks, "E")))
print("F: " + str(deckCount(titDecks, "F")))
print("G: " + str(deckCount(titDecks, "G")))
"""
Explanation: Locational: Cabin Analysis | Graph
<a id="Locational: Cabin Analysis | Graph"></a>
End of explanation
"""
sns.factorplot('Embarked', order=['C','Q','S'], data=dTitTrain_DF, hue='Pclass', kind='count', hue_order=[1,2,3])
# titCherbourg
# titQueenstown
# titSouthampton
print("Total:")
print("Cherbourg: " + str(titCherbourg.count()['Embarked']))
print("Queenstown: " + str(titQueenstown.count()['Embarked']))
print("Southampton: " + str(titSouthampton.count()['Embarked']))
print("")
print("Cherbourg: ")
print("Pclass 1 - " + str(titCherbourg[titCherbourg.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titCherbourg[titCherbourg.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titCherbourg[titCherbourg.Pclass == 3].count()['Embarked']))
print("")
print("Queenstown: ")
print("Pclass 1 - " + str(titQueenstown[titQueenstown.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titQueenstown[titQueenstown.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titQueenstown[titQueenstown.Pclass == 3].count()['Embarked']))
print("")
print("Southampton: ")
print("Pclass 1 - " + str(titSouthampton[titSouthampton.Pclass == 1].count()['Embarked']))
print("Pclass 2 - " + str(titSouthampton[titSouthampton.Pclass == 2].count()['Embarked']))
print("Pclass 3 - " + str(titSouthampton[titSouthampton.Pclass == 3].count()['Embarked']))
"""
Explanation: Locational: Disembark Analysis | Graph
<a id="Locational: Disembark Analysis | Graph"></a>
End of explanation
"""
# Survivors Overall
gSOA = sns.factorplot('Survivor', data=dTitTrain_DF, kind='count')
gSOA.despine(left=True)
gSOA.set_ylabels("count of passengers")
print("Survivor: " + str(dTitTrain_DF[dTitTrain_DF.Survivor == "Yes"].count()['Survivor']))
print("Non-Survivor: " + str(dTitTrain_DF[dTitTrain_DF.Survivor == "No"].count()['Survivor']))
"""
Explanation: Surivival Graph Comparison
Survival Count (Overall)
<a id="Survival Count (Overall)"></a>
End of explanation
"""
# Series probability - access probability of survived in men and women
menProb = ACmenData.groupby('Sex').Survived.mean()
womenProb = ACwomenData.groupby('Sex').Survived.mean()
menPercent = menProb[0]*100
womenPercent = womenProb[0]*100
print("Men Survivalbility: ")
print(menProb[0])
print("Women Survivalbility: ")
print(womenProb[0])
gSSP = sns.factorplot("Sex", "Survived", data=dTitTrain_DF, kind="bar", size=5)
gSSP.despine(left=True)
gSSP.set_ylabels("survival probability")
"""
Explanation: Survival by Gender
<a id="Survival by Gender"></a>
End of explanation
"""
# Determines the probability of survival for a given Pclass
def define_pClassProb(dataFrameIN, numClass):
classEntries = dataFrameIN[dataFrameIN.Pclass == numClass]
sClassEntries = classEntries[classEntries.Survived == 1]
cClassEntries = (classEntries.count(numeric_only=True)['Pclass']).astype(float)
cSClassEntries = (sClassEntries.count(numeric_only=True)['Pclass']).astype(float)
return (cSClassEntries/cClassEntries)
print("Class 1 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 1))
print("Class 2 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 2))
print("Class 3 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 3))
gCS = sns.factorplot("Pclass", "Survived",order=[1,2,3],data=dTitTrain_DF, kind="bar", size=5)
gCS.despine(left=True)
gCS.set_ylabels("survival probability")
print("Class 1 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 1))
print("Class 2 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 2))
print("Class 3 Survivality: ")
print(define_pClassProb(dTitTrain_DF, 3))
sns.factorplot("Pclass", "Survived",order=[1,2,3], data=dTitTrain_DF, kind='point')
"""
Explanation: Survival by Pclass
<a id="Survival by Pclass"></a>
End of explanation
"""
# determines the probability of survival for genders in a given Pclass
def define_pClassProbSex(dataFrameIN, numClass, sex):
classEntries = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.Sex == sex]
sClassEntries = classEntries[classEntries.Survived == 1]
cClassEntries = (classEntries.count(numeric_only=True)['Pclass']).astype(float)
cSClassEntries = (sClassEntries.count(numeric_only=True)['Pclass']).astype(float)
return (cSClassEntries/cClassEntries)
print("Class 1 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 1, 'male'))
print("Class 1 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 1, 'female'))
print("Class 2 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 2, 'male'))
print("Class 2 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 2, 'female'))
print("Class 3 Survivality(MALE): ")
print(define_pClassProbSex(dTitTrain_DF, 3, 'male'))
print("Class 3 Survivality(FEMALE): ")
print(define_pClassProbSex(dTitTrain_DF, 3, 'female'))
gGCSP = sns.factorplot("Pclass", "Survived",order=[1,2,3],data=dTitTrain_DF,hue='Sex', kind='bar')
gGCSP.despine(left=True)
gGCSP.set_ylabels("survival probability")
sns.factorplot("Pclass", "Survived", hue='Sex',order=[1,2,3], data=dTitTrain_DF, kind='point')
"""
Explanation: Survival Pclass and Gender
<a id="Survival Pclass and Gender"></a>
End of explanation
"""
#Determine probability of survival of children in a given Pclass
def define_pClassChildProb(dataFrameIN, numClass):
ChildDF = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == 'child']
ChildSurvived = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == 'child'][dataFrameIN.Survivor == 'Yes']
totalCChild = ChildDF.count()['PassengerId'].astype(float)
CChildSurvived = ChildSurvived.count()['PassengerId'].astype(float)
return CChildSurvived/totalCChild
def define_pClassAdultProb(dataFrameIN, numClass, sex):
AdultDF = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == sex]
AdultSurvived = dataFrameIN[dataFrameIN.Pclass == numClass][dataFrameIN.PersonStatus == sex][dataFrameIN.Survivor == 'Yes']
totalCAdult = AdultDF.count()['PassengerId'].astype(float)
CAdultSurvived = AdultSurvived.count()['PassengerId'].astype(float)
return CAdultSurvived/totalCAdult
print("PClass 1 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 1))
print("PClass 1 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 1, 'female'))
print("PClass 1 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 1, 'male'))
print("-----------")
print("PClass 2 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 2))
print("PClass 2 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 2, 'female'))
print("PClass 2 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 2, 'male'))
print("-----------")
print("PClass 3 Survival Child: ")
print(define_pClassChildProb(dTitTrain_DF, 3))
print("PClass 3 Survival Female: ")
print(define_pClassAdultProb(dTitTrain_DF, 3, 'female'))
print("PClass 3 Survival Male: ")
print(define_pClassAdultProb(dTitTrain_DF, 3, 'male'))
sns.factorplot("Pclass", "Survived", hue='PersonStatus',order=[1,2,3], data=dTitTrain_DF, kind='point')
"""
Explanation: Survival By Pclass and Age Group (Adult (Male / Female) / Child)
<a id="Survival By Pclass and Age Group (Adult (Male / Female) / Child)"></a>
End of explanation
"""
#sns.lmplot('Age', 'Survived', data=dTitTrain_DF)
pSBA = sns.boxplot(data=dTitTrain_DF, x='Survived', y='Age')
pSBA.set(title='Age Distribution by Survival',
xlabel = 'Survival',
ylabel = 'Age Distrobution',
xticklabels = ['Died', 'Survived'])
"""
Explanation: Survival by Age Distribution
<a id="Survival by Age Distribution"></a>
End of explanation
"""
# Using Solo or family column created earlier in passenger distributions section created a separate dataframes for traveling
#alone and with family passengers
familyPass = dTitTrain_DF[dTitTrain_DF['SoloOrFamily'] == "Family"]
alonePass = dTitTrain_DF[dTitTrain_DF['SoloOrFamily'] == "Alone"]
# Creates a list of surviving family and alone passengers
AFamilyPass = familyPass[familyPass.Survivor == "Yes"]
AAlonePass = alonePass[alonePass.Survivor == "Yes"]
# Determines the probability of survival for passengers that traveled alone and with family
pAF = float(AFamilyPass['SoloOrFamily'].count()) / float(familyPass['SoloOrFamily'].count())
pAA = float(AAlonePass['SoloOrFamily'].count()) / float(alonePass['SoloOrFamily'].count())
print("Probability of Survival being with Family: ")
print(pAF)
print("")
print("Probability of Survival being alone: ")
print(pAA)
gSSP = sns.factorplot("SoloOrFamily", "Survived", data=dTitTrain_DF, kind="bar", size=5)
gSSP.despine(left=True)
gSSP.set_ylabels("survival probability")
"""
Explanation: Survival by Alone or with Family
<a id="Survival by Alone or with Family"></a>
End of explanation
"""
#sns.lmplot('Age', 'Survived',hue='Pclass', data=dTitanic_DF, hue_order=[1,2,3])
pACSB = sns.boxplot(data = dTitTrain_DF.dropna(subset = ['Age']).sort_values('Pclass'), x='Pclass', y='Age', hue='Survivor')
pACSB.set(title='Age by Class and Survival - Box Plot', xlabel='Pclass')
pACSB.legend(bbox_to_anchor=(1.05, .7), loc=2, title = 'Survived',borderaxespad=0.)
"""
Explanation: Survival pClass by Age Distribution
<a id="Survival pClass by Age Distribution"></a>
End of explanation
"""
#sns.lmplot('Age', 'Survived', hue='Sex' ,data=dTitanic_DF)
pAGSB = sns.boxplot(data=dTitTrain_DF.dropna(subset = ['Age']), x= 'Sex', y= 'Age', hue='Survivor')
pAGSB.set(title='Age by Gender and Survival - Box Plot')
pAGSB.legend(bbox_to_anchor=(1.05, .7), loc=2, title = 'Survived',borderaxespad=0.)
"""
Explanation: Survival Gender by Age Distribution
<a id="Survival Gender by Age Distribution"></a>
End of explanation
"""
# Determining better odds which will be compared to test group (First comparison - Pclass and age group)
import csv
# # Manual - Age Group and gender adult with highest above 49%
# print(define_pClassChildProb(dTitTrain_DF, 1))
# print(define_pClassAdultProb(dTitTrain_DF, 1, 'female'))
# print(define_pClassChildProb(dTitTrain_DF, 2))
# print(define_pClassAdultProb(dTitTrain_DF, 2, 'female'))
# print(define_pClassAdultProb(dTitTrain_DF, 3, 'female'))
# #sibsp and parch
test_file = open('test.csv', 'rb')
test_file_object = csv.reader(test_file)
header = test_file_object.next()
prediction_file = open("genderPclassbasedmodel.csv", "wb")
prediction_file_object = csv.writer(prediction_file)
prediction_file_object.writerow(["PassengerId", "Survived"])
for row in test_file_object: # For each row in test.csv
weight = 0.0
if row[1] == 1:
weight = weight + 9
elif row[1] == 2:
weight = weight + 8
else:
weight = 5
if row[3] == 'female':
weight = weight + 8
else:
weight = weight + 2
if row[4] < 21:
# child
weight = weight + 6
else:
# adult
weight = weight + 5
aFam = row[5] + row[6]
if aFam > 0:
weight = weight + 5
else:
weight = weight + 3
weightScore = weight/40.0
print(str(weightScore))
if(weightScore >= .5):
prediction_file_object.writerow([row[0],'1'])
else:
prediction_file_object.writerow([row[0],'0'])
#prediction_file_object.writerow([row[0],'1'])
#prediction_file_object.writerow([row[0],'0'])
test_file.close()
prediction_file.close()
"""
Explanation: Process CSV - Generation of Estimation Survival Table
End of explanation
"""
|
intel-analytics/analytics-zoo
|
apps/variational-autoencoder/using_variational_autoencoder_and_deep_feature_loss_to_generate_faces.ipynb
|
apache-2.0
|
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.dataset import mnist
import datetime as dt
from glob import glob
import os
import numpy as np
from utils import *
import imageio
image_size = 148
Z_DIM = 100
ENCODER_FILTER_NUM = 32
# we use the vgg16 model, it should work on other popular CNN models
# You can download them here (https://github.com/intel-analytics/analytics-zoo/tree/master/models
# download the data CelebA, and may repalce with your own data path
DATA_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational-autoencoder/img_align_celeba"
VGG_PATH = os.getenv("ANALYTICS_ZOO_HOME")+"/apps/variational-autoencoder/analytics-zoo_vgg-16_imagenet_0.1.0.model"
from zoo.common.nncontext import *
sc = init_nncontext("Variational Autoencoder Example")
sc.addFile(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/variational-autoencoder/utils.py")
"""
Explanation: Using Variational Autoencoder and Deep Feature Loss to Generate Faces
From the "Using Variational Autoencoder to Generate Faces" example, we see that using VAE, we can generate realistic human faces, but the generated image is a little blury. Though, you can continue to tuning the hyper paramters or using more data to get a better result, in this example, we adopted the approach in this paper. That is, instead of using pixel-by-pixel loss of between the original images and the generated images, we use the feature map generated by a pre-trained CNN network to define a feature perceptual loss. As you will see, the generated images will become more vivid.
End of explanation
"""
def conv_bn_lrelu(in_channels, out_channles, kw=4, kh=4, sw=2, sh=2, pw=-1, ph=-1):
model = Sequential()
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def upsample_conv_bn_lrelu(in_channels, out_channles, out_width, out_height, kw=3, kh=3, sw=1, sh=1, pw=-1, ph=-1):
model = Sequential()
model.add(ResizeBilinear(out_width, out_height))
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def get_encoder_cnn():
input0 = Input()
#CONV
conv1 = conv_bn_lrelu(3, ENCODER_FILTER_NUM)(input0) # 32 * 32 * 32
conv2 = conv_bn_lrelu(ENCODER_FILTER_NUM, ENCODER_FILTER_NUM*2)(conv1) # 16 * 16 * 64
conv3 = conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM*4)(conv2) # 8 * 8 * 128
conv4 = conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*8)(conv3) # 4 * 4 * 256
view = View([4*4*ENCODER_FILTER_NUM*8])(conv4)
# fully connected to generate mean and log-variance
mean = Linear(4*4*ENCODER_FILTER_NUM*8, Z_DIM)(view)
log_variance = Linear(4*4*ENCODER_FILTER_NUM*8, Z_DIM)(view)
model = Model([input0], [mean, log_variance])
return model
def get_decoder_cnn():
input0 = Input()
linear = Linear(Z_DIM, 4*4*ENCODER_FILTER_NUM*8)(input0)
reshape = Reshape([ENCODER_FILTER_NUM*8, 4, 4])(linear)
bn = SpatialBatchNormalization(ENCODER_FILTER_NUM*8)(reshape)
# upsampling
up1 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*8, ENCODER_FILTER_NUM*4, 8, 8)(bn) # 8 * 8 * 128
up2 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*2, 16, 16)(up1) # 16 * 16 * 64
up3 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM, 32, 32)(up2) # 32 * 32 * 32
up4 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM, 3, 64, 64)(up3) # 64 * 64 * 3
output = Tanh()(up4)
model = Model([input0], [output])
return model
def get_autoencoder_cnn():
input0 = Input()
encoder = get_encoder_cnn()(input0)
sampler = GaussianSampler()(encoder)
decoder_model = get_decoder_cnn()
decoder = decoder_model(sampler)
model = Model([input0], [encoder, decoder])
return model, decoder_model
"""
Explanation: Define the Model
We are uing the same model as "Using Variational Autoencoder to Generate Faces" example.
End of explanation
"""
def get_vgg():
# we use the vgg16 model, it should work on other popular CNN models
# You can download them here (https://github.com/intel-analytics/analytics-zoo/tree/master/models)
vgg_whole = Model.from_jvalue(Model.loadModel(VGG_PATH).value)
# we only use one feature map here for the sake of simlicity and efficiency
# You can and other feature to the outputs to mix high-level and low-level
# feature to get higher quality images
outputs = [vgg_whole.node(name) for name in ["relu1_2"]]
inputs = [vgg_whole.node(name) for name in ["data"]]
outputs[0].remove_next_edges()
vgg_light = Model(inputs, outputs).freeze()
return vgg_light
vgg = get_vgg()
model, decoder = get_autoencoder_cnn()
"""
Explanation: Load the pre-trained CNN model
End of explanation
"""
def get_data():
data_files = glob(os.path.join(DATA_PATH, "*.jpg"))
rdd_train_images = sc.parallelize(data_files[:100000]) \
.map(lambda path: get_image(path, image_size).transpose(2, 0, 1))
rdd_train_sample = rdd_train_images.map(lambda img: Sample.from_ndarray(img, [np.array(0.0), img]))
return rdd_train_sample
train_data = get_data()
"""
Explanation: Load the Datasets
End of explanation
"""
criterion = ParallelCriterion()
criterion.add(KLDCriterion(), 0.005) # You may want to twick this parameter
criterion.add(TransformerCriterion(MSECriterion(), vgg, vgg), 1.0)
"""
Explanation: Define the Training Objective
End of explanation
"""
batch_size = 64
# Create an Optimizer
optimizer = Optimizer(
model=model,
training_rdd=train_data,
criterion=criterion,
optim_method=Adam(0.0005),
end_trigger=MaxEpoch(1),
batch_size=batch_size)
app_name='vae-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
train_summary = TrainSummary(log_dir='/tmp/vae',
app_name=app_name)
optimizer.set_train_summary(train_summary)
print ("saving logs to ",app_name)
"""
Explanation: Define the Optimizer
End of explanation
"""
redire_spark_logs()
show_bigdl_info_logs()
def gen_image_row():
decoder.evaluate()
return np.column_stack([decoder.forward(np.random.randn(1, Z_DIM)).reshape(3, 64,64).transpose(1, 2, 0) for s in range(8)])
def gen_image():
return inverse_transform(np.row_stack([gen_image_row() for i in range(8)]))
for i in range(1, 6):
optimizer.set_end_when(MaxEpoch(i))
trained_model = optimizer.optimize()
image = gen_image()
if not os.path.exists("./images"):
os.makedirs("./images")
if not os.path.exists("./models"):
os.makedirs("./models")
# you may change the following directory accordingly and make sure the directory
# you are writing to exists
imageio.imwrite("./images/image_vgg_%s.png" % i, image)
decoder.saveModel("./models/decoder_vgg_%s.model" % i, over_write = True)
import matplotlib
matplotlib.use('Agg')
%pylab inline
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
loss = np.array(train_summary.read_scalar("Loss"))
plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
"""
Explanation: Spin Up the Training
This could take a while. It took about 6 hours on a desktop with a intel i7-6700 cpu and 40GB java heap memory. You can reduce the training time by using less data (some changes in the "Load the Dataset" section), but the performce may not as good.
End of explanation
"""
from matplotlib.pyplot import imshow
img = gen_image()
imshow(img)
"""
Explanation: Random Sample Some Images
End of explanation
"""
|
spacedrabbit/PythonBootcamp
|
Iterators and Generators Homework.ipynb
|
mit
|
def gensquares(N):
for i in range(N):
yield i**2
for x in gensquares(10):
print x
"""
Explanation: Iterators and Generators Homework
Problem 1
Create a generator that generates the squares of numbers up to some number N.
End of explanation
"""
import random
random.randint(1,10)
def rand_num(low,high,n):
for i in range(n+1):
yield random.randint(low, high)
for num in rand_num(1,10,12):
print num
"""
Explanation: Problem 2
Create a generator that yields "n" random numbers between a low and high number (that are inputs). Note: Use the random library. For example:
End of explanation
"""
s = 'hello'
#code here
for letter in iter(s):
print letter
"""
Explanation: Problem 3
Use the iter() function to convert the string below
End of explanation
"""
my_list = [1,2,3,4,5]
gencomp = (item for item in my_list if item > 3)
for item in gencomp:
print item
"""
Explanation: Problem 4
Explain a use case for a generator using a yield statement where you would not want to use a normal function with a return statement.
A generator, utilizing a yield statement, returns an iterator object. The iterator object will yield/return a value each time it is called upon to iterate through its code. So in cases where a return statement would be used to return the entirely of a list, the generator would only return the current iteration of the list, remembering its state where it was last yielded.
Extra Credit!
Can you explain what gencomp is in the code below? (Note: We never covered this in lecture! You will have to do some googling/Stack Overflowing!)
End of explanation
"""
|
fabge/fabge.github.io
|
_notebooks/test.ipynb
|
apache-2.0
|
#hide
import pandas as pd
import altair as alt
"""
Explanation: Example Fastpages Notebook
An example fastpages notebook
toc: True
See fastpages/_notebooks/README.md for a detailed explanation on how to use notebooks with fastpages. This notebook is a demonstration of some of fastpages's capabilities with notebooks
With fastpages you can save your jupyter notebooks into the _notebooks folder at the root of your repository, and they will be automatically be converted to Jekyll compliant blog posts!
put a #hide flag at the top of any cell you want to completely hide in the docs
End of explanation
"""
# hide
cars = 'https://vega.github.io/vega-datasets/data/cars.json'
movies = 'https://vega.github.io/vega-datasets/data/movies.json'
sp500 = 'https://vega.github.io/vega-datasets/data/sp500.csv'
stocks = 'https://vega.github.io/vega-datasets/data/stocks.csv'
flights = 'https://vega.github.io/vega-datasets/data/flights-5k.json'
# hide
df = pd.read_json(movies) # load movies data
genres = df['Major_Genre'].unique() # get unique field values
genres = list(filter(lambda d: d is not None, genres)) # filter out None values
genres.sort() # sort alphabetically
#hide
mpaa = ['G', 'PG', 'PG-13', 'R', 'NC-17', 'Not Rated']
"""
Explanation: Interactive Charts With Altair
Charts made with Altair remain interactive. Example charts taken from this repo, specifically this notebook.
End of explanation
"""
# single-value selection over [Major_Genre, MPAA_Rating] pairs
# use specific hard-wired values as the initial selected values
selection = alt.selection_single(
name='Select',
fields=['Major_Genre', 'MPAA_Rating'],
init={'Major_Genre': 'Drama', 'MPAA_Rating': 'R'},
bind={'Major_Genre': alt.binding_select(options=genres), 'MPAA_Rating': alt.binding_radio(options=mpaa)}
)
# scatter plot, modify opacity based on selection
alt.Chart(movies).mark_circle().add_selection(
selection
).encode(
x='Rotten_Tomatoes_Rating:Q',
y='IMDB_Rating:Q',
tooltip='Title:N',
opacity=alt.condition(selection, alt.value(0.75), alt.value(0.05))
)
"""
Explanation: Example 1: DropDown
End of explanation
"""
brush = alt.selection_interval(
encodings=['x'] # limit selection to x-axis (year) values
)
# dynamic query histogram
years = alt.Chart(movies).mark_bar().add_selection(
brush
).encode(
alt.X('year(Release_Date):T', title='Films by Release Year'),
alt.Y('count():Q', title=None)
).properties(
width=650,
height=50
)
# scatter plot, modify opacity based on selection
ratings = alt.Chart(movies).mark_circle().encode(
x='Rotten_Tomatoes_Rating:Q',
y='IMDB_Rating:Q',
tooltip='Title:N',
opacity=alt.condition(brush, alt.value(0.75), alt.value(0.05))
).properties(
width=650,
height=400
)
alt.vconcat(years, ratings).properties(spacing=5)
"""
Explanation: Example 2: Use One Visualization To Filter Another
End of explanation
"""
alt.Chart(movies).mark_circle().add_selection(
alt.selection_interval(bind='scales', encodings=['x'])
).encode(
x='Rotten_Tomatoes_Rating:Q',
y=alt.Y('IMDB_Rating:Q', axis=alt.Axis(minExtent=30)), # use min extent to stabilize axis title placement
tooltip=['Title:N', 'Release_Date:N', 'IMDB_Rating:Q', 'Rotten_Tomatoes_Rating:Q']
).properties(
width=600,
height=400
)
"""
Explanation: Example 3: Tooltips
End of explanation
"""
# select a point for which to provide details-on-demand
label = alt.selection_single(
encodings=['x'], # limit selection to x-axis value
on='mouseover', # select on mouseover events
nearest=True, # select data point nearest the cursor
empty='none' # empty selection includes no data points
)
# define our base line chart of stock prices
base = alt.Chart().mark_line().encode(
alt.X('date:T'),
alt.Y('price:Q', scale=alt.Scale(type='log')),
alt.Color('symbol:N')
)
alt.layer(
base, # base line chart
# add a rule mark to serve as a guide line
alt.Chart().mark_rule(color='#aaa').encode(
x='date:T'
).transform_filter(label),
# add circle marks for selected time points, hide unselected points
base.mark_circle().encode(
opacity=alt.condition(label, alt.value(1), alt.value(0))
).add_selection(label),
# add white stroked text to provide a legible background for labels
base.mark_text(align='left', dx=5, dy=-5, stroke='white', strokeWidth=2).encode(
text='price:Q'
).transform_filter(label),
# add text labels for stock prices
base.mark_text(align='left', dx=5, dy=-5).encode(
text='price:Q'
).transform_filter(label),
data=stocks
).properties(
width=700,
height=400
)
"""
Explanation: Example 4: More Tooltips
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.3/tutorials/optimizing.ipynb
|
gpl-3.0
|
#!pip install -I "phoebe>=2.3,<2.4"
import phoebe
b = phoebe.default_binary()
"""
Explanation: Advanced: Optimizing Performance with PHOEBE
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
print(phoebe.conf.interactive_checks)
phoebe.interactive_checks_off()
print(phoebe.conf.interactive_checks)
"""
Explanation: Interactivity Options
When running in an interactive Python session, PHOEBE updates all constraints and runs various checks after each command. Although this is convenient, it does take some time, and it can sometimes be advantageous to disable this to save computation time.
Interactive Checks
By default, interactive checks are enabled when PHOEBE is being run in an interactive session (either an interactive python, IPython, or Jupyter notebook session), but disabled when PHOEBE is run as a script directly from the console. When enabled, PHOEBE will re-run the system checks after every single change to the bundle, raising warnings via the logger as soon as they occur.
This default behavior can be changed via phoebe.interactive_checks_on() or phoebe.interactive_checks_off(). The current value can be accessed via phoebe.conf.interactive_checks.
End of explanation
"""
print(b.run_checks())
b.set_value('requiv', component='primary', value=50)
print(b.run_checks())
"""
Explanation: If disabled, you can always manually run the checks via b.run_checks().
End of explanation
"""
print(phoebe.conf.interactive_constraints)
print(b.filter('mass', component='primary'))
b.set_value('sma@binary', 10)
print(b.filter('mass', component='primary'))
"""
Explanation: Interactive Constraints
By default, interactive constraints are always enabled in PHOEBE, unless explicitly disabled. Whenever a value is changed in the bundle that affects the value of a constrained value, that constraint is immediately executed and all applicable values updated. The ensures that all constrained values are "up-to-date".
If disabled, constraints are delayed and only executed when needed by PHOEBE (when calling run_compute, for example). This can save significant time, as each value that needs updating only needs to have its constraint executed once, instead of multiple times.
This default behavior can be changed via phoebe.interactive_constraints_on() or phoebe.interactive_constraints_off(). The current value can be accessed via phoebe.conf.interactive_constraints.
Let's first look at the default behavior with interactive constraints on.
End of explanation
"""
phoebe.interactive_constraints_off()
print(phoebe.conf.interactive_constraints)
print(b.filter('mass', component='primary'))
b.set_value('sma@binary', 15)
print(b.filter('mass', component='primary'))
"""
Explanation: Note that the mass has already updated, according to the constraint, when the value of the semi-major axes was changed. If we disable interactive constraints this will not be the case.
End of explanation
"""
b.run_delayed_constraints()
print(b.filter('mass', component='primary'))
phoebe.reset_settings()
"""
Explanation: No need to worry though - all constraints will be run automatically before passing to the backend. If you need to access the value of a constrained parameter, you can explicitly ask for all delayed constraints to be executed via b.run_delayed_constraints().
End of explanation
"""
b.add_dataset('lc')
print(b.get_dataset())
"""
Explanation: Filtering Options
check_visible
By default, everytime you call filter or set_value, PHOEBE checks to see if the current value is visible (meaning it is relevant given the value of other parameters). Although not terribly expensive, these checks can add up... so disabling these checks can save time. Note that these are automatically temporarily disabled during run_compute. If disabling these checks, be aware that changing the value of some parameters may have no affect on the resulting computations. You can always manually check the visibility/relevance of a parameter by calling parameter.is_visible.
This default behavior can be changed via phoebe.check_visible_on() or phoebe.check_visible_off().
Let's first look at the default behavior with check_visible on.
End of explanation
"""
phoebe.check_visible_off()
print(b.get_dataset())
"""
Explanation: Now if we disable check_visible, we'll see the same thing as if we passed check_visible=False to any filter call.
End of explanation
"""
print(b.get_parameter(qualifier='ld_coeffs_source', component='primary').visible_if)
"""
Explanation: Now the same filter is returning additional parameters. For example, ld_coeffs_source parameters were initially hidden because ld_mode is set to 'interp'. We can see the rules that are being followed:
End of explanation
"""
print(b.get_parameter(qualifier='ld_coeffs_source', component='primary').is_visible)
phoebe.reset_settings()
"""
Explanation: and can still manually check to see that it shouldn't be visible (isn't currently relevant given the value of ld_func):
End of explanation
"""
print(b.get_dataset())
print(b.get_dataset(check_default=False))
phoebe.check_default_off()
print(b.get_dataset())
phoebe.reset_settings()
"""
Explanation: check_default
Similarly, PHOEBE automatically excludes any parameter which is tagged with a '_default' tag. These parameters exist to provide default values when a new component or dataset are added to the bundle, but can usually be ignored, and so are excluded from any filter calls. Although not at all expensive, this too can be disabled at the settings level or by passing check_default=False to any filter call.
This default behavior can be changed via phoebe.check_default_on() or phoebe.check_default_off().
End of explanation
"""
phoebe.get_download_passband_defaults()
"""
Explanation: Passband Options
PHOEBE automatically fetches necessary tables from tables.phoebe-project.org. By default, only the necessary tables for each passband are fetched (except when calling download_passband manually) and the fits files are fetched uncompressed.
For more details, see the API docs on download_passband and update_passband as well as the passband updating tutorial.
The default values mentioned in the API docs for content and gzipped can be exposed via phoebe.get_download_passband_defaults and changed via phoebe.set_download_passband_defaults. Note that setting gzipped to True will minimize file storage for the passband files and will result in faster download speeds, but take significantly longer to load by PHOEBE as they have to be uncompressed each time they are loaded. If you have a large number of installed passbands, this could significantly slow importing PHOEBE.
End of explanation
"""
|
lucasmaystre/choix
|
notebooks/choicerank-tutorial.ipynb
|
mit
|
import choix
import networkx as nx
import numpy as np
%matplotlib inline
"""
Explanation: Using ChoiceRank to understand network traffic
This notebook provides a quick example on how to use ChoiceRank to estimate transitions along the edges of a network based only on the marginal traffic at the nodes.
End of explanation
"""
n_items = 8
p_edge = 0.3
n_samples = 3000
# 1. Generate a network.
graph = nx.erdos_renyi_graph(n_items, p_edge, directed=True)
# 2. Generate a parameter for each node.
params = choix.generate_params(n_items, interval=2.0)
# 3. Generate samples of choices in the network.
transitions = np.zeros((n_items, n_items))
for _ in range(n_samples):
src = np.random.choice(n_items)
neighbors = list(graph.successors(src))
if len(neighbors) == 0:
continue
dst = choix.compare(neighbors, params)
transitions[src, dst] += 1
"""
Explanation: 1. Generating sample data
First, we will generate sample data.
This includes
generating a network,
generating a parameter for each node of the network,
generating samples of choices in the network.
End of explanation
"""
nx.draw(graph, with_labels=True)
"""
Explanation: The network looks as follows
End of explanation
"""
traffic_in = transitions.sum(axis=0)
traffic_out = transitions.sum(axis=1)
print("incoming traffic:", traffic_in)
print("outgoing traffic:", traffic_out)
"""
Explanation: Now we aggregate the all the transitions into incoming traffic and outgoing traffic.
End of explanation
"""
params = choix.choicerank(graph, traffic_in, traffic_out)
"""
Explanation: 2. Estimating transitions using ChoiceRank
ChoiceRank can be used to recover the transitions on the network based only on:
information about the structure of the network, and
the (marginal) incoming and outgoing traffic at each node.
ChoiceRank works under the assumption that each node has a latent "preference" score, and that transitions follow Luce's choice model.
End of explanation
"""
est = np.zeros((n_items, n_items))
for src in range(n_items):
neighbors = list(graph.successors(src))
if len(neighbors) == 0:
continue
probs = choix.probabilities(neighbors, params)
est[src,neighbors] = traffic_out[src] * probs
print("True transition matrix:")
print(transitions)
print("\nEstimated transition matrix:")
print(np.round_(est))
print("\nDifference:")
print(np.round_(transitions - est))
"""
Explanation: We can attempt to reconstruct the transition matrix using the marginal traffic data and the parameters.
End of explanation
"""
|
astroumd/GradMap
|
notebooks/Lectures2017/Lecture1/GradMap_L1.ipynb
|
gpl-3.0
|
## You can use Python as a calculator:
5*7 #This is a comment and does not affect your code.
#You can have as many as you want.
#No worries.
5+7
5-7
5/7
"""
Explanation: Introduction to "Doing Science" in Python for REAL Beginners
Python is one of many languages you can use for research and HW purposes. In the next few days, we will work through many of the tool, tips, and tricks that we as graduate students (and PhD researchers) use on a daily basis. We will NOT attempt to teach you all of Python--there isn't time. We will however build up a set of code(s) that will allow you to read and write data, make beautiful publish-worthy plots, fit a line (or any function) to data, and set up algorithms. You will also begin to learn the syntax of Python and can hopefuly apply this knowledge to your current and future work.
Before we begin, a few words on navigating the iPython Notebook:
There are two main types of cells : Code and Text
In "code" cells "#" at the beginning of a line marks the line as comment
In "code" cells every non commented line is intepreted
In "code" cells, commands that are preceded by % are "magics" and are special commands in Ipython to add some functionality to the runtime interactive environment.
Shift+Return shortcut to execute a cell
Alt+Return shortcut to execute a cell and create another one below
Here you can find a complete documentation about the notebook.
http://ipython.org/ipython-doc/1/interactive/notebook.html
In particular have a look at the section about the keyboard shortcuts.
And remember that :
Indentation has a meaning ( we'll talk about this when we cover loops)
Indexes start from 0 ( similar to C )
We will discuss more about these concepts while doing things. Let's get started now!!!!
A. Numbers and Calculations
Note: To insert comments to yourself (this is always a great idea), use the # symbol.
End of explanation
"""
a = 5
b = 7
print(a)
print(b)
print(a*b , a+b, a/b)
a = 5.
b = 7
print(a*b, a+b, a/b)
"""
Explanation: These simple operations on numbers in Python 3 works exactly as you'd expect, but that's not true across all programming languages.
For example, in Python 2, an older version of Python that is still used often in scientific programming:
$5/7$ $\neq$ $5./7$
The two calculations below would be equal on most calculators, but they are not equal to Python 2 and many other languages. The first, $5/7$ is division between integers and the answer will be an integer. The second, $5./7$ is division between a float (a number with a decimal) and an integer.
In Python 2,
$5/7$ = $0$
Since division of integers must return an integer, in this case 0. This is not the same for 5./7, which is
$5./7$ = $0.7142857142857143$
This is something to keep in mind when working with programming languages, but Python 3 takes care of this for you.
However, for the sake of consistency, it is best to use float division rather than integer division.
Let's assign some variables and print() them to the screen.
End of explanation
"""
c = [0,1,2,3,4,5,6,7,8,9]
print(c)
"""
Explanation: Next, let's create a list of numbers and do math to that list.
End of explanation
"""
len(c)
"""
Explanation: How many elements or numbers does the list c contain? Yes, this is easy to count now, but you will eventually work with lists that contains MANY numbers. To get the length of a list (or array), use len().
End of explanation
"""
type(c) #pick a variable: a, b, or c and type it in the parentheses
"""
Explanation: What exactly is c? It looks like an array because of the square brackets, but it isn't. To see what any variable is, use type().
End of explanation
"""
d = c**2
"""
Explanation: Now, some math... Let's square each value in c and put those values in a new list called d. To square a variable (or number), you use **. So $3^{**}2=9$. The rest of the math operations (+ / - x) are 'normal.'
End of explanation
"""
import numpy
"""
Explanation: This should not have worked. Why? The short answer is that a list is very useful, but it is not an array. However, you can convert your lists to arrays (and back again if you feel you need to). In order to do this conversion (and just about anything else), we need something extra.
Python is a fantastic language because it is very powerful and flexible. Also, it is like modular furniture or modular building. You have the Python foundation and choose which modules you want/need and load them before you start working. One of the most loved here at UMD is the NumPy module (pronounced num-pie). This is the something extra (the module), that we need. For more information, see www.numpy.org/.
When we plot this data below, we will also need a module for plotting.
First, let us import NumPy.
End of explanation
"""
c = numpy.array(c)
d = c**2
print(d)
type(d)
"""
Explanation: To convert our list $c = [0,1,2,3,4,5,6,7,8,9]$ to an array we use numpy.array(),
End of explanation
"""
import numpy as np
"""
Explanation: Great! However, typing numpy over and over again can get tiresome, so we can import it and give it a shorter name. It is common to use the following:
End of explanation
"""
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
x = c
y = d
p = plt.plot(x,y)
p = plt.plot(x,y**2)
"""
Explanation: In this notation, converting a list to an array would be np.array(c).
B. Our first plot!
Now lets make a quick plot! We'll use the values from our list turned array, $c$, and plot $c^2$.
End of explanation
"""
np.arange(0,10,2) #here the step size is 2. So you'll get even numbers.
np.linspace(0,10,2) #here you're asking for 2 values. Guess what they'll be!
"""
Explanation: C. Arrays of numbers
You can also create arrays of numbers using NumPy. Two very useful ways to create arrays are by selecting the interval you care about (say, 0 to 10) and either specifying how far apart you want your values to be (numpy.arange), OR the total number of values you want (np.linspace). Let's look at some examples.
End of explanation
"""
import ___ as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
___ = np.linspace(____)
___ = np.arange(____)
# Clear the plotting field.
plt.clf() # No need to add anything inside these parentheses.
plt.plot(__,__,'ro') # The 'ro' says you want to use Red o plotting symbols.
"""
Explanation: Next make an array with endpoints 0 and 1 (include 0 and 1), that has 50 values in it. You can use either (both?) np.arange or np.linspace. Which is easier to you? How many numbers do you get? Are these numbers integers or floats (decimal place)?
Next make an array with endpoints 0 and 2.5 (include 0 and 2.5), that has values spaced out in increments of 0.05. For example: 0, 0.05, 0.1, 0.15... You can use either np.arange or np.linspace. Which is easier to you? How many numbers do you get? Are these numbers integers or floats (decimal place)?
Next, let's plot these two arrays. Call them $a$ and $b$, or $x$ and $y$ (whichever you prefer--this is your code!), for example: a = np.linspace(). Fill in the missing bits in the code below.
End of explanation
"""
import ___ as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
x = np.linspace(-1,1,100)
y = np.sqrt(______)
# Clear the plotting field.
plt.clf() # No need to add anything inside these parentheses.
plt.plot(x,y,'ro') # The 'ro' says you want to use Red o plotting symbols.
plt.xlim([-2,2])
plt.ylim([-2,2])
"""
Explanation: For all the possible plotting symbols, see: http://matplotlib.org/api/markers_api.html. Next, let's plot the positive half of a circle.
End of explanation
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
x = np.linspace(100,2000,10000)*1e-9 #wavelength, we want a range of 100 nm to 10000 nm, but in METERS
Blam = 2.0*6.626e-34*2.998e8**2/x**5/(np.exp(6.626e-34*2.998e8/(x*1.381e-23*5800.0))-1.0)
plt.clf()
p = plt.plot(x*1e9,Blam) #we multiply by 1e9 so that the x axis shows nm
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel('Spectral Radiance ()') #What are the units?
"""
Explanation: D. A more complicated function: The Planck Blackbody Curve
Any object at a temperature above absolute zero (at 0 Kelvin, atoms stop moving), emits light of all wavelengths with varying degrees of efficiency. Recall that light is electromagnetic radiation. We see this radiation with our eyes when the wavelength is 400-700 nm, and we feel it as heat when the wavelength is 700 nm - 1mm. A blackbody is an object that is a perfect emitter. This means that it perfectly absorbs any energy that hits it and re-emits this energy over ALL wavelengths. The spectrum of a blackbody looks like a rainbow to our eyes.
The expression for this "rainbow" over all wavelengths is given by,
$$
B(\lambda,T) = \dfrac{2hc^2}{\lambda^5} \dfrac{1}{e^{\frac{hc}{\lambda kT}}-1}
$$
where $\lambda$ is the wavelength, $T$ is the temperature of the object, $h$ is Planck's constant, $c$ is the speed of light, and $k$ is the Boltzmann constant.
https://en.wikipedia.org/wiki/Black-body_radiation
Plotting a blackbody curve:
First, let us look at code that might be confusing to read later on.
End of explanation
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 # J s
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's pick the sun. YOU will need to PICK the temperature and then a range of
# frequencies or wavelengths that "make sense" for that temperature.
# We know that the sun peaks in visible part of the spectrum. This wavelength
# is close to 500 nm. Let's have the domain (x values) go from 100 nm to 2000 nm.
# 1 nm = 10^-9 m = 10^-7 cm.
lam = np.linspace(100,2000,10000)*1e-9 #wavelength in nm
nu = c/lam
T = 5800.0
exp = np.exp(h*c/(lam*k*T))
num = 2.0 * h * c**2
denom = lam**5 * (exp - 1.0)
Blam = num/denom
plt.clf()
p = plt.plot(lam*1e9,Blam)
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel(r'Spectral Radiance (W m$^{-3}$)') #What are the units?
# Try a log-log plot.
#p = plt.loglog(wav,Bnu)
"""
Explanation: Would this code be easy to edit for other temperatures?
Now, let's look at code that does the same thing, but is more documented:
End of explanation
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 #
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's try to recreate the plot above.
# Pick temperatures: T1 = 7000 K , T2= 5800 K, and T3 = 4000 K.
# Let's have the domain (x values) go from 100 nm to 2000 nm.
# 1 nm = 10^-9 m.
wav = np.linspace(100,2000,10000)*1e-9 #in meters
T1 = 7000.
T2 = 5800.
T3 = 4000.
num = 2.0 * h * c**2
exp1 = np.exp(h*c/(wav*k*T1))
denom1 = wav**5 * (exp1 - 1.0)
exp2 = np.exp(h*c/(wav*k*T2))
denom2 = wav**5 * (exp2 - 1.0)
exp3 = np.exp(h*c/(wav*k*T3))
denom3 = wav**5 * (exp3 - 1.0)
Bnu1 = num/denom1
Bnu2 = num/denom2
Bnu3 = num/denom3
plt.clf()
p1 = plt.plot(wav*1e9,Bnu1,label='T =7000 K')
p2 = plt.plot(wav*1e9,Bnu2,label='T = 5800 K')
p3 = plt.plot(wav*1e9,Bnu3,label='T = 4000 K')
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel(r'Spectral Radiance (W m$^{-3}$)')
l = plt.legend()
"""
Explanation: Plotting Multiple Curves:
End of explanation
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 #
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's try to recreate the plot above.
# Pick three temperatures.
# Decide on a domain in Hertz (frequency) that makes sense.
# c = nu x lambda, nu = c/lambda
#### Put your code here ###
"""
Explanation: Next, let's have you try an example. We mentioned above that wavelength and frequency are related by the speed of light,
$$
c = \lambda \nu.
$$
We also described a blackbody in terms of the wavelength. However, we can also describe a blackbody in terms of frequency,
$$
B(\nu,T) = \dfrac{2h\nu^3}{c^2} \dfrac{1}{e^{\frac{h\nu}{kT}}-1}.
$$
We can do this because
$$
B_\nu d\nu = B_\lambda d\lambda
$$
where
$$
\nu = \frac{c}{\lambda} \quad \text{and} \quad \frac{d\nu}{d\lambda} = \left| - \frac{c}{\lambda^2}\right|.
$$
EXERCISE: Make the same plot above (for 3 separate temperatures) with the x axis showing frequency (in Hertz).
End of explanation
"""
|
SCPSscience/Notebooks
|
PropertiesofStars.ipynb
|
mit
|
# Import modules that contain functions we need
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
# Read in data that will be used for the calculations.
# Using pandas read_csv method, we can create a data frame
data = pd.read_csv("https://github.com/adamlamee/CODINGinK12-data/raw/master/stars.csv")
# We wish too look at the first 3 rows of our data set
data.head(3)
"""
Explanation: Properties of the Stars
Analyzing Appearance and Properties
Importing the functions and getting the data...
End of explanation
"""
fig = plt.figure(figsize=(15, 6))
plt.scatter(data.ra,data.dec, s=0.01)
plt.xlim(24, 0)
plt.title("All the Stars in the Catalogue")
plt.xlabel('Right Ascension (Hours)')
plt.ylabel('Declination (Degrees)')
from IPython.display import Image
from IPython.core.display import HTML
Image(url= 'http://www.hpcf.upr.edu/~abel/phl/nearby_stars_with_exoplanets.png')
"""
Explanation: PART 1: All the Stars in Our Catalogue
<b>Declination</b> is the distance a star is North or South of the Celestial Equator, similar to <u><i>lattitude</u></i> on Earth. <b>Right</b> <b>Ascension</b> is how far east or west a star is, similar to <u><i>longitude</u></i> on Earth.
End of explanation
"""
# format the points on the graph
transparency = 1
size = 1
# draws a scatter plot
fig = plt.figure(figsize=(20, 4.5))
plt.scatter(data.temp, data.lum, s=size, edgecolors='none', alpha=transparency)
plt.xlim(2000,15000)
plt.ylim(0,1000)
plt.title("Does hotter mean brighter?")
plt.ylabel("Luminosity")
plt.xlabel("Temperature (K)")
Image(url= 'http://hmxearthscience.com/Galaxies%20and%20Stars/HR%20Lab%202.jpg')
"""
Explanation: PART 2: Relationships Between Two Properties
End of explanation
"""
# These are the abbreviations for all the constellations
data.sort_values('con').con.unique()
"""
Explanation: PART 3: Constellations and Star Properties
End of explanation
"""
# This shows just one constellation
data_con = data.query('con == "Vir"')
# This plots where the brightest 10 stars are in the sky
data_con = data_con.sort_values('mag').head(10)
plt.scatter(data_con.ra,data_con.dec)
plt.gca().invert_xaxis()
plt.title("A constellation in the sky")
plt.xlabel('Right Ascension (degrees)')
plt.ylabel('Declination (Hours)')
"""
Explanation: Choose a Constellation from the list above and insert the 3 letter code below in the " ", for example "Vir".
End of explanation
"""
# format the points on the graph
transparency = 0.2
size = 1
# draws a scatter plot
fig = plt.figure(figsize=(6, 4.5))
plt.scatter(data.temp, data.absmag, s=size, edgecolors='none', alpha=transparency)
plt.scatter(data_con.temp, data_con.absmag, color='red', edgecolors='none')
plt.xlim(17000,2000)
plt.ylim(18,-18)
plt.title("Types of stars in a constellation")
plt.ylabel("Absolute Magnitude")
plt.xlabel("Temperature (K)")
"""
Explanation: Links for Question #9
Can we see your constellation now?
Which types of stars make up your constellation?
End of explanation
"""
|
kgourgou/stochastic-simulations-class
|
ipython_notebooks/BrownianMotion.ipynb
|
mit
|
# Setting up some parameters.
T = 1; # Final time
n = 500; # Number of points to use in discretization
Dt = float(T)/n;
print 'Stepsize =', Dt,'.'
def pathGenerate(npath,n, Dt=0.002):
# Function that generates discrete approximations to a brownian path.
Wiener = np.zeros([n,npath])
for j in xrange(npath):
for i in xrange(n-1):
Wiener[i+1,j] = Wiener[i,j]+np.sqrt(Dt)*randn()
return Wiener
t = np.linspace(0,T,n)
Wiener = pathGenerate(10, n)
WienerMean = np.mean(Wiener,axis=1)
WienerVar = np.var(Wiener,axis=1)
pl.errorbar(t, WienerMean,yerr=np.sqrt(WienerVar),
color=sns.xkcd_rgb['pale red'],ecolor=sns.xkcd_rgb['denim blue'],linewidth=5)
pl.legend(['Mean of paths', 'Uncertainty (standard deviation)'],loc=0)
"""
Explanation: Discrete approximation to Brownian motion
End of explanation
"""
Wiener = pathGenerate(100, n)
WienerMean = np.mean(Wiener,axis=1)
WienerVar = np.var(Wiener,axis=1)
pl.errorbar(t, WienerMean,yerr=np.sqrt(WienerVar),
color=sns.xkcd_rgb['pale red'],ecolor=sns.xkcd_rgb['denim blue'],linewidth=5)
pl.legend(['Mean of paths', 'Uncertainty (standard deviation)'],loc=0)
"""
Explanation: First case is with ten paths. We can see that the mean path hasn't quite converged yet.
End of explanation
"""
Wiener = pathGenerate(1000, n)
WienerMean = np.mean(Wiener,axis=1)
WienerVar = np.var(Wiener,axis=1)
pl.errorbar(t, WienerMean,yerr=np.sqrt(WienerVar),
color=sns.xkcd_rgb['pale red'],ecolor=sns.xkcd_rgb['denim blue'],linewidth=5)
pl.legend(['Mean of paths', 'Uncertainty (standard deviation)'],loc=0)
pl.title('Using a thousand simulated paths.',fontsize=15)
"""
Explanation: The picture looks much better with a hundred paths.
End of explanation
"""
n=10
t = np.linspace(0,T,n)
Wiener = pathGenerate(10, n)
WienerMean = np.mean(Wiener,axis=1)
WienerVar = np.var(Wiener,axis=1)
pl.errorbar(range(0,n), WienerMean,yerr=np.sqrt(WienerVar),
color=sns.xkcd_rgb['pale red'],ecolor=sns.xkcd_rgb['denim blue'],linewidth=5)
pl.legend(['Mean of paths', 'Uncertainty (standard deviation)'],loc=0)
pl.title('For comparison, here is another plot with only 10 points', fontsize=15)
"""
Explanation: And with a thousand paths, we can clearly see the convergence of the mean. Also, the confidence region shows the correct growth.
End of explanation
"""
|
moustakas/hizea
|
doc/nb/massprofiles-sg.ipynb
|
gpl-2.0
|
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import fitsio
import astropy.units as u
from astropy.io import ascii
from astropy.table import Table
from astropy.cosmology import FlatLambdaCDM
%pylab inline
mpl.rcParams.update({'font.size': 18})
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
"""
Explanation: Stellar mass profiles based on sg_fluxtable
Preliminary stellar mass profiles of the HST sample based on the radial aperture photometry sg_fluxtable_nm.txt generated in July 2017 at Bates.
End of explanation
"""
massdir = os.path.join( os.getenv('HIZEA_PROJECT'), 'massprofiles', 'isedfit' )
etcdir = os.path.join( os.getenv('HIZEA_DIR'), 'etc' )
photfile = os.path.join(etcdir, 'sg_fluxtable_nm.txt')
isedfile = os.path.join(massdir, 'massprofiles_fsps_v2.4_miles_chab_charlot_sfhgrid01.fits.gz')
kcorrfile = os.path.join(massdir, 'massprofiles_fsps_v2.4_miles_chab_charlot_sfhgrid01_kcorr.z0.0.fits.gz')
print('Reading {}'.format(photfile))
phot = ascii.read(photfile)
phot[:2]
print('Reading {}'.format(isedfile))
ised = Table(fitsio.read(isedfile, ext=1, upper=True))
ised[:2]
print('Reading {}'.format(kcorrfile))
kcorr = Table(fitsio.read(kcorrfile, ext=1, upper=True))
kcorr[:2]
galaxy = [gg[:5] for gg in phot['ID'].data]
galaxy = np.unique(galaxy)
ngal = len(galaxy)
"""
Explanation: Read the original photometry, the fitting results, and the K-corrections.
End of explanation
"""
nrad = 40
radpix = np.linspace(1.0, 40.0, nrad) # [pixels]
radarcsec = radpix * 0.05 # [arcsec]
mstar = ised['MSTAR_AVG'].data.reshape(ngal, nrad)
mstar_err = ised['MSTAR_ERR'].data.reshape(ngal, nrad)
redshift = phot['z'].data.reshape(ngal, nrad)[:, 0]
area = np.pi * np.insert(np.diff(radarcsec**2), 0, radarcsec[0]**2) # aperture annulus [arcsec2]
sigma = np.zeros_like(mstar) # surface mass density [Mstar/kpc2]
radkpc = np.zeros_like(mstar) # radius [comoving kpc]
for igal in range(ngal):
arcsec2kpc = cosmo.arcsec_per_kpc_comoving(redshift[igal]).value
radkpc[igal, :] = radarcsec / arcsec2kpc
areakpc2 = area / arcsec2kpc**2
sigma[igal, :] = np.log10( 10**mstar[igal, :] / areakpc2 )
massrange = (8, 10.2)
sigmarange = (6, 9.6)
fig, ax = plt.subplots(3, 4, figsize=(14, 8), sharey=True, sharex=True)
for ii, thisax in enumerate(ax.flat):
thisax.errorbar(radarcsec, mstar[ii, :], yerr=mstar_err[ii, :],
label=galaxy[ii])
thisax.set_ylim(massrange)
#thisax.legend(loc='upper right', frameon=False)
thisax.annotate(galaxy[ii], xy=(0.9, 0.9), xycoords='axes fraction',
size=16, ha='right', va='top')
fig.text(0.0, 0.5, r'$\log_{10}\, (M / M_{\odot})$', ha='center',
va='center', rotation='vertical')
fig.text(0.5, 0.0, 'Radius (arcsec)', ha='center',
va='center')
fig.subplots_adjust(wspace=0.05, hspace=0.05)
fig.tight_layout()
fig, ax = plt.subplots(figsize=(10, 7))
for igal in range(ngal):
ax.plot(radkpc[igal, :], np.log10(np.cumsum(10**mstar[igal, :])), label=galaxy[igal])
ax.legend(loc='lower right', fontsize=16, ncol=3, frameon=False)
ax.set_xlabel(r'Galactocentric Radius $r_{kpc}$ (Comoving kpc)')
ax.set_ylabel(r'$\log_{10}\, M(<r_{kpc})\ (M_{\odot})$')
fig, ax = plt.subplots(figsize=(10, 7))
for igal in range(ngal):
ax.plot(radkpc[igal, :], sigma[igal, :], label=galaxy[igal])
ax.legend(loc='upper right', fontsize=16, ncol=3, frameon=False)
ax.set_xlabel(r'Galactocentric Radius $r_{kpc}$ (Comoving kpc)')
ax.set_ylabel(r'$\log_{10}\, \Sigma\ (M_{\odot}\ /\ {\rm kpc}^2)$')
ax.set_ylim(sigmarange)
"""
Explanation: Plot the individual stellar mass profiles.
Fluxes were measured in circular apertures with radii ranging from 1-40 pixels (0.05-2 arcsec). Below we calculate the surface mass density in units of Mstar per comoving kpc2.
End of explanation
"""
|
GoogleCloudPlatform/tensorflow-without-a-phd
|
tensorflow-rnn-tutorial/old-school-tensorflow/tutorial/00_RNN_predictions_solution.ipynb
|
apache-2.0
|
import numpy as np
import utils_datagen
import utils_display
from matplotlib import pyplot as plt
import tensorflow as tf
print("Tensorflow version: " + tf.__version__)
"""
Explanation: An RNN for short-term predictions
This model will try to predict the next value in a short sequence based on historical data. This can be used for example to forecast demand based on a couple of weeks of sales data.
<div class="alert alert-block alert-warning">
This is the solution file. The corresponding tutorial file is [00_RNN_predictions_playground.ipynb](00_RNN_predictions_playground.ipynb)
</div>
<div class="alert alert-block alert-info">
**Assignment #5**: The [final cell](#benchmark) in this notebook has a loop that benchmarks all the neural network architectures. Run it once, then if you have the time, try reducing the data sequence length from 16 to 8 (SEQLEN=8) and see how well you can still predict the next value.
</div>
End of explanation
"""
DATA_SEQ_LEN = 1024*128
data = np.concatenate([utils_datagen.create_time_series(waveform, DATA_SEQ_LEN) for waveform in utils_datagen.Waveforms])
utils_display.picture_this_1(data, DATA_SEQ_LEN)
"""
Explanation: Generate fake dataset
End of explanation
"""
NB_EPOCHS = 10 # number of times the data is repeated during training
RNN_CELLSIZE = 32 # size of the RNN cells
SEQLEN = 16 # unrolled sequence length
BATCHSIZE = 32 # mini-batch size
"""
Explanation: Hyperparameters
End of explanation
"""
utils_display.picture_this_2(data, BATCHSIZE, SEQLEN) # execute multiple times to see different sample sequences
"""
Explanation: Visualize training sequences
This is what the neural network will see during training.
End of explanation
"""
# tree simplistic predictive models: can you beat them ?
def simplistic_models(X):
# "random" model
Yrnd = tf.random_uniform([tf.shape(X)[0]], -2.0, 2.0) # tf.shape(X)[0] is the batch size
# "same as last" model
Ysal = X[:,-1]
# "trend from last two" model
Ytfl = X[:,-1] + (X[:,-1] - X[:,-2])
return Yrnd, Ysal, Ytfl
# linear model (RMSE: 0.36, with shuffling: 0.17)
def linear_model(X):
Yout = tf.layers.dense(X, 1) # output shape [BATCHSIZE, 1]
return Yout
# 2-layer dense model (RMSE: 0.38, with shuffling: 0.15-0.18)
def DNN_model(X):
Y = tf.layers.dense(X, SEQLEN//2, activation=tf.nn.relu)
Yout = tf.layers.dense(Y, 1, activation=None) # output shape [BATCHSIZE, 1]
return Yout
# convolutional (RMSE: 0.31, with shuffling: 0.16)
def CNN_model(X):
X = tf.expand_dims(X, axis=2) # [BATCHSIZE, SEQLEN, 1] is necessary for conv model
Y = tf.layers.conv1d(X, filters=8, kernel_size=4, activation=tf.nn.relu, padding="same") # [BATCHSIZE, SEQLEN, 8]
Y = tf.layers.conv1d(Y, filters=16, kernel_size=3, activation=tf.nn.relu, padding="same") # [BATCHSIZE, SEQLEN, 8]
Y = tf.layers.conv1d(Y, filters=8, kernel_size=1, activation=tf.nn.relu, padding="same") # [BATCHSIZE, SEQLEN, 8]
Y = tf.layers.max_pooling1d(Y, pool_size=2, strides=2) # [BATCHSIZE, SEQLEN//2, 8]
Y = tf.layers.conv1d(Y, filters=8, kernel_size=3, activation=tf.nn.relu, padding="same") # [BATCHSIZE, SEQLEN//2, 8]
Y = tf.layers.max_pooling1d(Y, pool_size=2, strides=2) # [BATCHSIZE, SEQLEN//4, 8]
# mis-using a conv layer as linear regression :-)
Yout = tf.layers.conv1d(Y, filters=1, kernel_size=SEQLEN//4, activation=None, padding="valid") # output shape [BATCHSIZE, 1, 1]
Yout = tf.squeeze(Yout, axis=-1) # output shape [BATCHSIZE, 1]
return Yout
"""
Explanation: The model definition
When executed, these functions instantiates the Tensorflow graph for our model.
End of explanation
"""
# RNN model (RMSE: 0.38, with shuffling 0.14, the same with loss on last 8)
def RNN_model(X, n=1):
# 2-layer RNN
X = tf.expand_dims(X, axis=2) # [BATCHSIZE, SEQLEN, 1] is necessary for RNN model
cell1 = tf.nn.rnn_cell.GRUCell(RNN_CELLSIZE)
cell2 = tf.nn.rnn_cell.GRUCell(RNN_CELLSIZE)
cell = tf.nn.rnn_cell.MultiRNNCell([cell1, cell2], state_is_tuple=False)
Yn, H = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) # Yn [BATCHSIZE, SEQLEN, RNN_CELLSIZE]
# regression head
batchsize = tf.shape(X)[0]
Yn = tf.reshape(Yn, [batchsize*SEQLEN, RNN_CELLSIZE])
Yr = tf.layers.dense(Yn, 1) # Yr [BATCHSIZE*SEQLEN, 1]
Yr = tf.reshape(Yr, [batchsize, SEQLEN, 1]) # Yr [BATCHSIZE, SEQLEN, 1]
# In this RNN model, you can compute the loss on the last predicted item or the lats n predicted items
# Last n is slightly better.
Yout = Yr[:,-n:SEQLEN,:] # last item(s) in sequence: output shape [BATCHSIZE, n, 1]
Yout = tf.squeeze(Yout, axis=-1)
return Yout
def RNN_model_N(X): return RNN_model(X, n=SEQLEN//2)
def model_fn(features, labels, model):
X = features # shape [BATCHSIZE, SEQLEN]
Y = model(X)
last_label = labels[:, -1] # last item in sequence: the target value to predict
last_labels = labels[:, -tf.shape(Y)[1]:SEQLEN] # last p items in sequence (as many as in Y), useful for RNN_model(X, n>1)
loss = tf.losses.mean_squared_error(Y, last_labels) # loss computed on last label(s)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss)
Yrnd, Ysal, Ytfl = simplistic_models(X)
eval_metrics = {"RMSE": tf.sqrt(loss),
# compare agains three simplistic predictive models: can you beat them ?
"RMSErnd": tf.sqrt(tf.losses.mean_squared_error(Yrnd, last_label)),
"RMSEsal": tf.sqrt(tf.losses.mean_squared_error(Ysal, last_label)),
"RMSEtfl": tf.sqrt(tf.losses.mean_squared_error(Ytfl, last_label))}
Yout = Y[:,-1]
return Yout, loss, eval_metrics, train_op
"""
Explanation: <div style="text-align: right; font-family: monospace">
X shape [BATCHSIZE, SEQLEN, 1]<br/>
Y shape [BATCHSIZE, SEQLEN, 1]<br/>
H shape [BATCHSIZE, RNN_CELLSIZE*NLAYERS]
</div>
End of explanation
"""
# training to predict the same sequence shifted by one (next value)
labeldata = np.roll(data, -1)
# slice data into sequences
traindata = np.reshape(data, [-1, SEQLEN])
labeldata = np.reshape(labeldata, [-1, SEQLEN])
# also make an evaluation dataset by randomly subsampling our fake data
EVAL_SEQUENCES = DATA_SEQ_LEN*4//SEQLEN//4
joined_data = np.stack([traindata, labeldata], axis=1) # new shape is [N_sequences, 2(train/eval), SEQLEN]
joined_evaldata = joined_data[np.random.choice(joined_data.shape[0], EVAL_SEQUENCES, replace=False)]
evaldata = joined_evaldata[:,0,:]
evallabels = joined_evaldata[:,1,:]
def datasets(nb_epochs):
# Dataset API for batching, shuffling, repeating
dataset = tf.data.Dataset.from_tensor_slices((traindata, labeldata))
dataset = dataset.repeat(NB_EPOCHS)
dataset = dataset.shuffle(DATA_SEQ_LEN*4//SEQLEN) # important ! Number of sequences in shuffle buffer: all of them
dataset = dataset.batch(BATCHSIZE)
# Dataset API for batching
evaldataset = tf.data.Dataset.from_tensor_slices((evaldata, evallabels))
evaldataset = evaldataset.repeat()
evaldataset = evaldataset.batch(EVAL_SEQUENCES) # just one batch with everything
# Some boilerplate code...
# this creates a Tensorflow iterator of the correct type and shape
# compatible with both our training and eval datasets
tf_iter = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
# it can be initialized to iterate through the training dataset
dataset_init_op = tf_iter.make_initializer(dataset)
# or it can be initialized to iterate through the eval dataset
evaldataset_init_op = tf_iter.make_initializer(evaldataset)
# Returns the tensorflow nodes needed by our model_fn.
features, labels = tf_iter.get_next()
# When these nodes will be executed (sess.run) in the training or eval loop,
# they will output the next batch of data.
# Note: when you do not need to swap the dataset (like here between train/eval) just use
# features, labels = dataset.make_one_shot_iterator().get_next()
# TODO: easier with tf.estimator.inputs.numpy_input_fn ???
return features, labels, dataset_init_op, evaldataset_init_op
"""
Explanation: prepare training dataset
End of explanation
"""
tf.reset_default_graph() # restart model graph from scratch
# instantiate the dataset
features, labels, dataset_init_op, evaldataset_init_op = datasets(NB_EPOCHS)
# instantiate the model
Yout, loss, eval_metrics, train_op = model_fn(features, labels, RNN_model_N)
"""
Explanation: Instantiate the model
End of explanation
"""
# variable initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
"""
Explanation: Initialize Tensorflow session
This resets all neuron weights and biases to initial random values
End of explanation
"""
count = 0
losses = []
indices = []
sess.run(dataset_init_op)
while True:
try: loss_, _ = sess.run([loss, train_op])
except tf.errors.OutOfRangeError: break
# print progress
if count%300 == 0:
epoch = count // (DATA_SEQ_LEN*4//BATCHSIZE//SEQLEN)
print("epoch " + str(epoch) + ", batch " + str(count) + ", loss=" + str(loss_))
if count%10 == 0:
losses.append(np.mean(loss_))
indices.append(count)
count += 1
# final evaluation
sess.run(evaldataset_init_op)
eval_metrics_, Yout_ = sess.run([eval_metrics, Yout])
print("Final accuracy on eval dataset:")
print(str(eval_metrics_))
plt.ylim(ymax=np.amax(losses[1:])) # ignore first value(s) for scaling
plt.plot(indices, losses)
plt.show()
# execute multiple times to see different sample sequences
utils_display.picture_this_3(Yout_, evaldata, evallabels, SEQLEN)
"""
Explanation: The training loop
You can re-execute this cell to continue training
End of explanation
"""
NB_EPOCHS = 10
sess.close()
models = [linear_model, DNN_model, CNN_model, RNN_model, RNN_model_N]
for model in models:
# reset tensorflow graph: start from scratch
tf.reset_default_graph()
# instantiate the dataset
features, labels, dataset_init_op, evaldataset_init_op = datasets(NB_EPOCHS)
# instantiate model
Yout, loss, eval_metrics, train_op = model_fn(features, labels, model)
init = tf.global_variables_initializer()
with tf.Session() as sess:
# training loop
sess.run([init, dataset_init_op])
while True:
try: sess.run(train_op)
except tf.errors.OutOfRangeError: break
# evaluation
sess.run(evaldataset_init_op)
eval_metrics_ = sess.run(eval_metrics)
print(str(model))
print(str(eval_metrics_))
"""
Explanation: <a name="benchmark"></a>
Benchmark
Benchmark all the algorithms. This takes a while (approx. 10 min).
End of explanation
"""
|
conversationai/unintended-ml-bias-analysis
|
archive/unintended_ml_bias/metric_heatmap_example.ipynb
|
apache-2.0
|
model_bias_analysis.plot_auc_heatmap(madlibs_results, models)
"""
Explanation: AUC Heatmap
The heatmap below shows the three AUC-based metrics for two models. Each column is labeled with "MODEL_NAME"_"METRIC_NAME"
Metrics:
* <b>Subgroup AUC</b>: AUC of examples within the identity subgroup.
* <b>Negative Cross AUC</b>: AUC of negative (out of class, i.e. non-toxic) examples in the identity subgroup, and positive examples outside the identity subgroup.
* <b>Positive Cross AUC</b>: AUC of negative (out of class, i.e. non-toxic) examples outside the identity subgroup, and positive examples in the identity subgroup.
Values should range between 0.5-1.0 and higher is better.
End of explanation
"""
model_bias_analysis.plot_aeg_heatmap(madlibs_results, models)
model_bias_analysis.plot_auc_heatmap(real_data_results, real_data_models)
model_bias_analysis.plot_aeg_heatmap(real_data_results, real_data_models)
"""
Explanation: AEG Heatmap
The heatmap below shows the two Average Equality Gap metrics for two models.
Metrics:
* <b>Negative AEG</b>: Measures the difference between the distributions of out-of-class examples within the subgroup and outside the subgroup.
* <b>Positive AEG</b>: Measures the difference between the distributions of in-class examples within the subgroup and outside the subgroup.
0 is the ideal for this metric. Postive values indicate a skew towards higher scores, negative values indicate a skew towards lower scores.
End of explanation
"""
|
udapi/udapi-python
|
tutorial/01-visualizing.ipynb
|
gpl-3.0
|
!pip3 install --user --upgrade git+https://github.com/udapi/udapi-python.git
"""
Explanation: Introduction
Udapi is an API and framework for processing Universal Dependencies. In this tutorial, we will focus on the Python version of Udapi. Perl and Java versions are available as well, but they are missing some of the features.
Udapi can be used from the shell (e.g. Bash), using the wrapper script udapy. It can be also used as a library, from Python, IPython or Jupyter notebooks. We will show both of these ways bellow.
This tutorial uses Details sections for extra info (if you want to know more or if you run into problems). You need to click on it to show its content.
<details><summary>Details</summary>
It is a substitute for footnotes. The content may be long and showing it in the main text may be distracting.
</details>
Install (upgrade) Udapi
First, make sure you have the newest version of Udapi. If you have already installed Udapi using git clone, just run git pull. If you have not installed Udapi yet, run
<details><summary>Details</summary>
<ul>
<li> The command below installs Udapi from GitHub (from the master branch). With <code>pip3 install --user --upgrade udapi</code>, you can install the last version released on PyPI (possibly older).
<li> The exclamation mark (!) in Jupyter or IPython means that the following command will be executed by the system shell (e.g. Bash).
</ul>
</details>
End of explanation
"""
!udapy -h
"""
Explanation: Now, make sure you can run the command-line interface udapy, e.g. by printing the help message.
End of explanation
"""
!wget http://ufal.mff.cuni.cz/~popel/udapi/ud20sample.tgz
!tar -xf ud20sample.tgz
%cd sample
"""
Explanation: <details><summary>Details: If the previous command fails with "udapy: command not found"</summary>
This means that Udapi is not properly installed. When installing Udapi with <code>pip3 --user</code>, it is installed into <code>~/.local/lib/python3.6/site-packages/udapi/</code> (or similar depending on your Python version) and the wrapper into <code>~/.local/bin</code>. Thus you need to
<pre>
export PATH="$HOME/.local/bin/:$PATH"
</pre>
</details>
Browse CoNLL-U files
Get sample UD data
Download and extract ud20sample.tgz. There are just 100 sentences for each of the 70 treebanks (sample.conllu), plus 4 bigger files (train.conllu and dev.conllu) for German, English, French and Czech. For full UD (2.0 or newer), go to Lindat.
End of explanation
"""
cat UD_Ancient_Greek/sample.conllu | head
"""
Explanation: Let's choose one of the sample files and see the raw CoNLL-U format.
<details><summary>Details: executing from Bash, IPython, Jupyter</summary>
<ul>
<li>If you see "No such file or directory" error, make sure you executed the previous cell. Note that the <code>cd</code> command is not prefixed by an exclamation mark because that would run in a sub-shell, which "forgets" the changed directory when finished. It is prefixed by a percent sign, which marks it as <a href="https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd">IPython magic</a>.
<li><code>cat</code> is another IPython magic command, this time an alias for the shell command of the same name (so you can prefix <code>cat</code> with an exclamation mark, if you prefer), which prints a given file. With <code>automagic</code> on, you can use it without the percent sign.
<li>In this tutorial, we use <code>| head</code> to show just the first 10 lines of the output (preventing thus big ipynb file size). You can ignore the "cat: write error: Broken pipe" warning.
<li>When using Jupyter, you can omit the <code>| head</code> because long outputs are automatically wrapped in a text box with a scrollbar.
<li>When running this from IPython or Bash, you can use a pager: <code>less UD_Ancient_Greek/sample.conllu</code>
</ul>
</details>
End of explanation
"""
cat UD_Ancient_Greek/sample.conllu | udapy -T | head -n 20
"""
Explanation: Browse conllu files with udapy -T
While the CoNLL-U format was designed with readibility (by both machines and humans) on mind, it may be still a bit difficult to read and interpret by humans. Let's visualize the dependency tree structure using ASCII-art by piping the conllu file into udapy -T.
End of explanation
"""
cat UD_Ancient_Greek/sample.conllu | udapy -q write.TextModeTrees color=1 layout=align attributes=form,lemma,upos,deprel,misc | head -n 20
"""
Explanation: <details><summary>Details:</summary>
<ul>
<li>You may be used to see dependency trees where the root node is on the top and words are ordered horizontally (left to right). Here, the root is on left and words are ordered vertically (top to bottom).
<li>The colors are implemented using the <a href="https://pypi.org/project/colorama/">colorama package</a> and ANSI escape codes. When running this from IPython or Bash and using <code>less</code>, you need to instruct it to display the colors with <code>-R</code>:
<code>
cat UD_Ancient_Greek/sample.conllu | udapy -T | less -R
</code>
<li>You can also use <code>udapy -T -N</code> to disable the colors.
<li><code>udapy -q</code> suppresses all Udapi messages (warnings, info, debug) printed on the standard error output, so only fatal errors are printed. By default only debug messages are suppresses, but these can be printed with <code>udapy -v</code>.
<li>But you already know this because you have read <code>udapy -h</code>, am I right?
</ul>
</details>
udapy -T is a shortcut for udapy write.TextModeTrees color=1, where write.TextModeTrees is a so-called block (a basic Udapi processing unit) and color=1 is its parameter. See the documentation (or even the source code of write.TextModeTrees to learn about further parameters. Now, let's print also the LEMMA and MISC columns and display the columns vertically aligned using parameters layout=align attributes=form,lemma,upos,deprel,misc.
End of explanation
"""
import udapi
doc = udapi.Document("UD_English/sample.conllu")
doc[5].draw()
"""
Explanation: Browse conllu files from IPython/Jupyter
So far, we were using Udapi only via its command-line interface udapy, which is handy, but not very Pythonic. So let's now use Udapi as a library and load the English conllu sample file into a document doc and visualize the sixth tree (i.e. doc[5] in zero-based indexing).
End of explanation
"""
doc[5].draw(layout="align", attributes="ord,form,feats")
"""
Explanation: <details><summary>Details:</summary>
<ul>
<li><code>doc = udapi.Document(filename)</code> is a shortcut for
<pre>
import udapi.core.document
doc = udapi.core.document.Document(filename)
</pre>
<li>We can print the whole document using <code>doc.draw()</code>.
<li><code>doc.draw(**kwargs)</code> is a shortcut for creating a <code>write.TextModeTrees</code> block and applying it on the document:
<pre>
import udapi.block.write.textmodetrees
block = udapi.block.write.textmodetrees.TextModeTrees(**kwargs)
block.run(doc)
</pre>
</ul>
</details>
The draw() method takes the same parameters as the write.TextModeTrees block, so we can for example display only the node ID (aka ord, i.e. word-order index), form and universal (morpho-syntactic) features.
End of explanation
"""
prepositions, postpositions = 0, 0
# Iterate over all nodes in the document (in all trees)
for node in doc.nodes:
if node.upos == "ADP":
# TODO: fix this code to actually distinguish prepositions and postpositions
prepositions += 1
# Print the results
prepositions, postpositions
"""
Explanation: Document representation in Udapi
Udapi document consists of a sequence of so-called bundles, mirroring a sequence of sentences in a typical natural language text.
A bundle corresponds to a sentence,
possibly in multiple versions or with different representations, such as sentence-tuples from parallel corpora, or paraphrases in the same language or alternative analyses (e.g. parses produced by different parsers). If there are more trees in a bundle, they must be distinguished by a so-called zone (a label which contains the language code).
Each tree is represented by a special (artificial) root node, which is added to the top of a CoNLL-U tree in the Udapi model. The root node bears the ID of a given tree/sentence (sent_id) and its word order (ord) is 0. Technically, Root is subclass of Node, with some extra methods.
The Node class corresponds to a node
of a dependency tree. It provides access to all the CoNLL-U-defined attributes (ord, form, lemma, upos, xpos, feats, deprel, deps, misc). There are methods for tree traversal (parent, root, children, descendants); word-order traversal (next_node, prev_node); tree manipulation (parent setter) including word-order changes (shift_after_node(x), shift_before_subtree(x), etc.); and utility methods: is_descendant_of(x), is_nonprojective(), precedes(x), is_leaf(), is_root(), get_attrs([]), compute_text(), draw().
Exercise 1: Count prepositions and postpositions
Prepositions and postpositions are together called adpositions and assigned the ADP universal part-of-speech tag (upos) in UD. Some languages (e.g. English) use mostly prepositions, others mostly postpositions.
* Do you know any English postpositions?
* Guess the typical adposition type (i.e. whether a given language uses more prepositions or postpositions) for at least 10 languages of your choice (from those in UD2.0).
* Complete the following code and find out how many prepositions and postpositions are in UD_English/sample.conllu (which has been loaded into doc).
End of explanation
"""
# For the statistics, you may find useful: count["any string"] += 1
import collections
count = collections.Counter()
big_doc = udapi.Document("UD_English/train.conllu")
for node in doc.nodes:
# TODO detect postposition
pass
# Print the statistics
count.most_common()
"""
Explanation: If you don't know how to proceed click on the following hints.
<details><summary>Hint 1:</summary>
In some dependency grammars, adpositions govern noun (i.e. adposition is the *parent* of a given noun node). In other dependency grammars, adpositions depend on nouns (i.e. noun is the *parent* of a given adposition). Find out which style is being used by UD. Check <a href="https://universaldependencies.org/u/overview/syntax.html">the UD documentation</a> or inspect some of the tree visualizations and guess.
</details>
<details><summary>Hint 2:</summary>
See the <a href="https://udapi.readthedocs.io/en/latest/udapi.core.html#module-udapi.core.node">Node documentation</a> and find out how to obtain dependency parent and dependency children. Note that these are properties of a given node, rather than methods, so you should not write parentheses () after the property name.
</details>
<details><summary>Hint 3:</summary>
<code>doc.nodes</code> iterates over all nodes in the document sorted by the word order, but this would be cumbersome to exploit. Find a method of <code>Node</code> to detect the relative word order of two nodes (within the same tree/sentence).
</details>
<details><summary>Hint 4:</summary>
Use <code>node.parent</code> and <code>node.precedes(another_node)</code>.
The latter is a shortcut for <code>node.ord < another_node.ord</code>.
</details>
<details><summary>Solution:</summary>
<pre>
for node in doc.nodes:
if node.upos == "ADP":
if node.precedes(node.parent):
prepositions += 1
else:
postpositions += 1
</pre>
</details>
Exercise 2: Explore English postpositions
The previous exercise indicates there are 7 occurrences of postpositions in the English sample. Find these 7 occurrences and visualize them using node.draw(). Count which adpositions (lemma) with which dependency relations (deprel) are responsible for these occurrences. Recompute these statistics on the bigger English training data. Can you explain these occurrences? What are the reasons? Is any occurrence an annotation error?
End of explanation
"""
|
GoogleCloudPlatform/ml-design-patterns
|
07_responsible_ai/heuristic_benchmark.ipynb
|
apache-2.0
|
%%bigquery
SELECT
bqutil.fn.median(ARRAY_AGG(TIMESTAMP_DIFF(a.creation_date, q.creation_date, SECOND))) AS time_to_answer
FROM `bigquery-public-data.stackoverflow.posts_questions` q
JOIN `bigquery-public-data.stackoverflow.posts_answers` a
ON q.accepted_answer_id = a.id
"""
Explanation: Heuristic Benchmark
This notebook demonstrates the Heuristic Benchmark design pattern
1. Regression on poorly understood features
Problem: Time interval before a question on Stack Overflow is answered.
Benchmark: Median time to first answer over the entire training dataset, so 2120 seconds.
End of explanation
"""
%%bigquery
WITH benchmark_eval AS (
SELECT
2120 - TIMESTAMP_DIFF(a.creation_date, q.creation_date, SECOND) AS error
FROM `bigquery-public-data.stackoverflow.posts_questions` q
JOIN `bigquery-public-data.stackoverflow.posts_answers` a
ON q.accepted_answer_id = a.id
)
SELECT
AVG(ABS(error)) AS mean_absolute_error
FROM
benchmark_eval
"""
Explanation: Find the error metric of always predicting that it will take 2120 seconds to get an answer. This the baseline metric against which to report model performance.
End of explanation
"""
%%bigquery
SELECT
AVG(IF(a.last_edit_date IS NULL, 0, 1)) AS prob_edited
FROM `bigquery-public-data.stackoverflow.posts_questions` q
JOIN `bigquery-public-data.stackoverflow.posts_answers` a
ON q.accepted_answer_id = a.id
"""
Explanation: 2. Classification on poorly understood features
Problem: Whether or not an accepted answer will be edited.
Benchmark: Probability distribution of accepted answers that are edited.
End of explanation
"""
%%bigquery
SELECT
COUNTIF(ENDS_WITH(u.location, 'France')) / COUNT(u.location) AS from_france,
COUNTIF(ENDS_WITH(u.location, 'India')) / COUNT(u.location) AS from_india
FROM `bigquery-public-data.stackoverflow.posts_questions` q
JOIN `bigquery-public-data.stackoverflow.posts_answers` a
ON q.accepted_answer_id = a.id
JOIN `bigquery-public-data.stackoverflow.users` u
ON u.id = a.owner_user_id
"""
Explanation: Problem: Country from which a Stack Overflow question will be answered.
Benchmark: Fractions of answers written by people from France, India, and so on.
End of explanation
"""
%%bigquery
With trips AS (
SELECT
total_amount,
ST_Distance(ST_GeogPoint(pickup_longitude, pickup_latitude),
ST_GeogPoint(dropoff_longitude, dropoff_latitude))/1000 AS dist
FROM `bigquery-public-data.new_york.tlc_yellow_trips_2015`
WHERE pickup_latitude BETWEEN 35 and 45
AND dropoff_latitude BETWEEN 35 and 45
AND pickup_longitude BETWEEN -80 and -70
AND dropoff_longitude BETWEEN -80 and -70
AND total_amount IS NOT NULL
)
SELECT AVG(total_amount)/AVG(dist)
FROM trips
"""
Explanation: 3. Regression with one good numeric feature
Problem: Predict taxi fare amount given pickup and dropoff locations.
The distance between the two points is, intuitively, a key feature.
Benchmark: linear regression based on this feature
End of explanation
"""
%%bigquery
CREATE TEMPORARY FUNCTION is_peak_hour(start_date TIMESTAMP) aS
(EXTRACT(DAYOFWEEK FROM start_date) BETWEEN 2 AND 6 -- weekday
AND (
EXTRACT(HOUR FROM start_date) BETWEEN 6 AND 10
OR
EXTRACT(HOUR FROM start_date) BETWEEN 15 AND 18))
;
SELECT
start_station_name,
is_peak_hour(start_date) AS is_peak,
AVG(duration) AS predicted_duration,
FROM `bigquery-public-data.london_bicycles.cycle_hire`
GROUP BY 1, 2
ORDER BY predicted_duration DESC
LIMIT 10
"""
Explanation: 4. Regression with one or two important features
Problem: Predict duration of bicycle rental.
Benchmark: Lookup table
End of explanation
"""
%%bigquery
CREATE TEMPORARY FUNCTION is_peak_hour(start_date TIMESTAMP) aS
(EXTRACT(DAYOFWEEK FROM start_date) BETWEEN 2 AND 6 -- weekday
AND (
EXTRACT(HOUR FROM start_date) BETWEEN 6 AND 10
OR
EXTRACT(HOUR FROM start_date) BETWEEN 15 AND 18))
;
WITH benchmark AS (
SELECT
start_station_name,
is_peak_hour(start_date) AS is_peak,
AVG(duration) AS predicted_duration,
FROM `bigquery-public-data.london_bicycles.cycle_hire`
GROUP BY 1, 2
)
SELECT
SQRT( SUM( (duration - predicted_duration)*(duration - predicted_duration)) / COUNT(duration) ) AS rmse
FROM `bigquery-public-data.london_bicycles.cycle_hire` c
JOIN benchmark b
ON c.start_station_name = b.start_station_name AND is_peak_hour(c.start_date) = b.is_peak
"""
Explanation: Now, use this benchmark to compute the overall RMSE, so that you can compare with the model.
End of explanation
"""
|
rashikaranpuria/Machine-Learning-Specialization
|
Regression/Assignment_four/week-4-ridge-regression-assignment-2-blank.ipynb
|
mit
|
import graphlab
"""
Explanation: Regression Week 4: Ridge Regression (gradient descent)
In this notebook, you will implement ridge regression via gradient descent. You will:
* Convert an SFrame into a Numpy array
* Write a Numpy function to compute the derivative of the regression weights with respect to a single feature
* Write gradient descent function to compute the regression weights given an initial weight vector, step size, tolerance, and L2 penalty
Fire up graphlab create
Make sure you have the latest version of GraphLab Create (>= 1.7)
End of explanation
"""
sales = graphlab.SFrame('kc_house_data.gl/kc_house_data.gl')
"""
Explanation: Load in house sales data
Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
End of explanation
"""
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe['price']
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
"""
Explanation: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.
Import useful functions from previous notebook
As in Week 2, we convert the SFrame into a 2D Numpy array. Copy and paste get_num_data() from the second notebook of Week 2.
End of explanation
"""
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
"""
Explanation: Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights:
End of explanation
"""
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
# If feature_is_constant is True, derivative is twice the dot product of errors and feature
if(feature_is_constant):
derivative = 2*np.dot(errors,feature)
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
else:
derivative = 2*np.dot(errors,feature) + 2*l2_penalty*weight
return derivative
"""
Explanation: Computing the Derivative
We are now going to move to computing the derivative of the regression cost function. Recall that the cost function is the sum over the data points of the squared difference between an observed output and a predicted output, plus the L2 penalty term.
Cost(w)
= SUM[ (prediction - output)^2 ]
+ l2_penalty*(w[0]^2 + w[1]^2 + ... + w[k]^2).
Since the derivative of a sum is the sum of the derivatives, we can take the derivative of the first part (the RSS) as we did in the notebook for the unregularized case in Week 2 and add the derivative of the regularization part. As we saw, the derivative of the RSS with respect to w[i] can be written as:
2*SUM[ error*[feature_i] ].
The derivative of the regularization term with respect to w[i] is:
2*l2_penalty*w[i].
Summing both, we get
2*SUM[ error*[feature_i] ] + 2*l2_penalty*w[i].
That is, the derivative for the weight for feature i is the sum (over data points) of 2 times the product of the error and the feature itself, plus 2*l2_penalty*w[i].
We will not regularize the constant. Thus, in the case of the constant, the derivative is just twice the sum of the errors (without the 2*l2_penalty*w[0] term).
Recall that twice the sum of the product of two vectors is just twice the dot product of the two vectors. Therefore the derivative for the weight for feature_i is just two times the dot product between the values of feature_i and the current errors, plus 2*l2_penalty*w[i].
With this in mind complete the following derivative function which computes the derivative of the weight given the value of the feature (over all data points) and the errors (over all data points). To decide when to we are dealing with the constant (so we don't regularize it) we added the extra parameter to the call feature_is_constant which you should set to True when computing the derivative of the constant and False otherwise.
End of explanation
"""
import numpy as np
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.])
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False)
print np.sum(errors*example_features[:,1])*2+20.
print ''
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True)
print np.sum(errors)*2.
"""
Explanation: To test your feature derivartive run the following:
End of explanation
"""
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations):
weights = np.array(initial_weights) # make sure it's a numpy array
iters = 0
#while not reached maximum number of iterations:
while iters < max_iterations:
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
for i in xrange(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
if i == 0:
derivative = feature_derivative_ridge(errors, feature_matrix[:,0], weights[0], l2_penalty, True)
else:
derivative = feature_derivative_ridge(errors, feature_matrix[:,i], weights[i], l2_penalty, False)
weights[i] = weights[i] - step_size*derivative
# subtract the step size times the derivative from the current weight
iters = iters + 1
return weights
"""
Explanation: Gradient Descent
Now we will write a function that performs a gradient descent. The basic premise is simple. Given a starting point we update the current weights by moving in the negative gradient direction. Recall that the gradient is the direction of increase and therefore the negative gradient is the direction of decrease and we're trying to minimize a cost function.
The amount by which we move in the negative gradient direction is called the 'step size'. We stop when we are 'sufficiently close' to the optimum. Unlike in Week 2, this time we will set a maximum number of iterations and take gradient steps until we reach this maximum number. If no maximum number is supplied, the maximum should be set 100 by default. (Use default parameter values in Python.)
With this in mind, complete the following gradient descent function below using your derivative function above. For each step in the gradient descent, we update the weight for each feature before computing our stopping criteria.
End of explanation
"""
simple_features = ['sqft_living']
my_output = 'price'
"""
Explanation: Visualizing effect of L2 penalty
The L2 penalty gets its name because it causes weights to have small L2 norms than otherwise. Let's see how large weights get penalized. Let us consider a simple model with 1 feature:
End of explanation
"""
train_data,test_data = sales.random_split(.8,seed=0)
"""
Explanation: Let us split the dataset into training set and test set. Make sure to use seed=0:
End of explanation
"""
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
"""
Explanation: In this part, we will only use 'sqft_living' to predict 'price'. Use the get_numpy_data function to get a Numpy versions of your data with only this feature, for both the train_data and the test_data.
End of explanation
"""
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
"""
Explanation: Let's set the parameters for our optimization:
End of explanation
"""
l2_penalty = 0.0
simple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
print simple_weights_0_penalty
"""
Explanation: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights:
simple_weights_0_penalty
we'll use them later.
End of explanation
"""
l2_penalty = 1e11
simple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
print simple_weights_high_penalty
"""
Explanation: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights:
simple_weights_high_penalty
we'll use them later.
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
"""
Explanation: This code will plot the two learned models. (The blue line is for the model with no regularization and the red line is for the one with high regularization.)
End of explanation
"""
train_data,test_data = sales.random_split(.8,seed=0)
model_features = ['sqft_living']
my_output = 'price'
(test_simple_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
test_predictions = predict_output(test_simple_feature_matrix, simple_weights_high_penalty)
rss = 0
for i in range(0, len(test_predictions)):
error = test_predictions[i] - test_data['price'][i]
rss += error * error
print rss
"""
Explanation: Compute the RSS on the TEST data for the following three sets of weights:
1. The initial weights (all zeros)
2. The weights learned with no regularization
3. The weights learned with high regularization
Which weights perform best?
End of explanation
"""
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
"""
Explanation: QUIZ QUESTIONS
1. What is the value of the coefficient for sqft_living that you learned with no regularization, rounded to 1 decimal place? What about the one with high regularization?
2. Comparing the lines you fit with the with no regularization versus high regularization, which one is steeper?
3. What are the RSS on the test data for each of the set of weights above (initial, no regularization, high regularization)?
Running a multiple regression with L2 penalty
Let us now consider a model with 2 features: ['sqft_living', 'sqft_living15'].
First, create Numpy versions of your training and test data with these two features.
End of explanation
"""
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
"""
Explanation: We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
End of explanation
"""
l2_penalty = 0.0
multiple_weights_0_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
print multiple_weights_0_penalty
"""
Explanation: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights:
multiple_weights_0_penalty
End of explanation
"""
l2_penalty = 1e11
multiple_weights_high_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
print multiple_weights_high_penalty
"""
Explanation: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights:
multiple_weights_high_penalty
End of explanation
"""
train_data,test_data = sales.random_split(.8,seed=0)
model_features = ['sqft_living','sqft_living15']
my_output = 'price'
(test_simple_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
test_predictions = predict_output(test_simple_feature_matrix, multiple_weights_0_penalty)
rss = 0
for i in range(0, len(test_predictions)):
error = test_predictions[i] - test_data['price'][i]
rss += error * error
print rss
"""
Explanation: Compute the RSS on the TEST data for the following three sets of weights:
1. The initial weights (all zeros)
2. The weights learned with no regularization
3. The weights learned with high regularization
Which weights perform best?
End of explanation
"""
|
Cyianor/smc2017
|
solutions/code/Python/fheld/exIV.ipynb
|
mit
|
import numpy as np
from scipy import stats
from tqdm import tqdm_notebook
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style()
"""
Explanation: SMC2017: Exercise sheet IV
Setup
End of explanation
"""
T = 50
xs_sim = np.zeros((T + 1,))
ys_sim = np.zeros((T,))
# Initial state
xs_sim[0] = 0.
for t in range(T):
xs_sim[t + 1] = np.cos(xs_sim[t]) + stats.norm.rvs()
ys_sim = xs_sim[1:] + stats.norm.rvs(0, 1, T)
fig, axs = plt.subplots(2, 1, figsize=(10, 10))
axs[0].plot(xs_sim, 'o-')
axs[1].plot(range(1, T + 1), ys_sim, 'o-r')
"""
Explanation: IV.1 Particle Metropolis-Hastings
Consider the state-space model
$$
\begin{array}{rcll}
x_t & = & \cos\left(\theta x_{t - 1}\right) + v_t, &\qquad v_t \sim \mathcal{N}(0, 1)\
y_t & = & x_t + e_t, &\qquad e_t \sim \mathcal{N}(0, 1) \
x_0 & \sim & \mathcal{N}(0, 1) &
\end{array}
$$
which admits the probabilistic model
$$
\begin{array}{lcl}
p(x_0) & = & \mathcal{N}\left(x_0;\,0,\,1\right) \
p(x_t\,\big|\,x_{t - 1}) & = & \mathcal{N}\left(x_t;\,\cos\left(\theta x_{t - 1}\right),\,1\right) \
p(y_t\,\big|\,x_t) & = & \mathcal{N}\left(y_t;\,x_t,\,1\right)
\end{array}
$$
For now, I will use the bootstrap particle filter (for simplicity).
Simulate data
During the simulation $\theta = 1$ will be assumed. During the inference it will be assumed that $\theta \sim \mathcal{N}(0, 1)$.
End of explanation
"""
def log_likelihood_bootstrap_pf(y, N=20, theta=1):
# Cumulatively build up log-likelihood
ll = 0.0
# Initialisation
samples = stats.norm.rvs(0, 1, N)
weights = 1 / N * np.ones((N,))
# Determine the number of time steps
T = len(y)
# Loop through all time steps
for t in range(T):
# Resample
ancestors = np.random.choice(samples, size=N,
replace=True, p=weights)
# Propagate
samples = stats.norm.rvs(0, 1, N) + np.cos(theta * ancestors)
# Weight
weights = stats.norm.logpdf(y[t], loc=samples, scale=1)
# Calculate the max of the weights
max_weights = np.max(weights)
# Subtract the max
weights = weights - max_weights
# Update log-likelihood
ll += max_weights + np.log(np.sum(np.exp(weights))) - np.log(N)
# Normalize weights to be probabilities
weights = np.exp(weights) / np.sum(np.exp(weights))
return ll
log_likelihood_bootstrap_pf(ys_sim, N=50, theta=3)
"""
Explanation: Bootstrap particle filter giving an estimate $\widehat{z}\theta$ of the joint likelihood $p(y{1:T}\,\big|\,\theta)$.
End of explanation
"""
def particle_metropolis_hastings(y, M=10000, N=20, tau=1):
theta = np.zeros((M + 1,))
alpha = np.zeros((M,))
z = np.zeros((M + 1,))
# Initial state
theta[0] = 0
z[0] = log_likelihood_bootstrap_pf(y, N=N, theta=theta[0])
# Iterate the chain
t = tqdm_notebook(range(M))
for i in t:
# Sample a new value
theta_prop = stats.norm.rvs(theta[i], tau, 1)
# Sample to be compared to the acceptance probability
u = stats.uniform.rvs()
# Terms in the second part of the acceptance probability -
# Proposal is symmetric, so terms containing the proposal will
# cancel each other out
z_prop = log_likelihood_bootstrap_pf(y, N=N, theta=theta_prop)
num = z_prop + stats.norm.logpdf(theta_prop)
denom = z[i] + stats.norm.logpdf(theta[i])
# Acceptance probability
alpha[i] = min(1, np.exp(num - denom))
t.set_postfix({'a_mean': np.mean(alpha[:(i + 1)])})
# Set next state depending on acceptance probability
if u <= alpha[i]:
z[i + 1] = z_prop
theta[i + 1] = theta_prop
else:
z[i + 1] = z[i]
theta[i + 1] = theta[i]
return theta, alpha
theta, alpha = particle_metropolis_hastings(ys_sim, M=10000, N=50, tau=0.7)
np.mean(alpha)
fig, ax = plt.subplots()
ax.plot(theta, '.-')
fig, ax = plt.subplots()
ax.hist(theta[2000:], normed=True, bins=60);
"""
Explanation: As a proposal we can use $q(\theta'\,\big|\,\theta[k - 1]) = \mathcal{N}\left(\theta';\,\theta[k - 1], \tau\right)$ with an appropriately chosen $\tau$.
Implement a Metropolis-Hastings sampler with the above.
End of explanation
"""
def conditional_FAPF(x_ref, y, N=200):
# Determine length of data
T = len(y)
# Save the paths of all final particles
xs = np.zeros((N, T + 1))
# Initialisation
xs[:, 0] = stats.norm.rvs(0, 1, N)
# Replace last state with state from reference trajectory
xs[N - 1, 0] = x_ref[0]
for t in range(T):
# Calculate resampling weights in case of FAPF
ws = stats.norm.logpdf(y[t], loc=2*np.power(np.cos(xs[:, t]), 2),
scale=np.sqrt(4.01))
# Subtract maximum weight
ws -= np.max(ws)
# Normalize the resampling weights
ws = np.exp(ws) / np.sum(np.exp(ws))
# Resample
ancestors = np.random.choice(range(N), size=N, replace=True, p=ws)
# Propagate
xs[:, t + 1] = stats.norm.rvs(0, 1, N) * 0.1 / np.sqrt(4.01) + \
(2 / 4.01) * y[t] + (0.01 / 4.01) * \
np.power(np.cos(xs[ancestors, t]), 2)
# Replace last sample with reference trajectory
ancestors[N - 1] = N - 1
xs[N - 1, t + 1] = x_ref[t + 1]
# Update the ancestor lines
xs[:, 0:t] = xs[ancestors, 0:t]
# Randomly choose trajectory which will be returned
# All normalized weights are 1 / N, so that no draw from
# a categorical distribution is necessary. A uniform draw
# is satisfactory.
b = np.random.randint(N)
return xs[b, :]
"""
Explanation: IV.2 Conditional Particle Filter
I will turn the fully adapted particle filter from exercise II.2 into a conditional particle filter by including a reference state trajectory and in each propagation step the refernence state trajectory delivers one of the particles. States and their ancestors will be saved and the algorithm returns a new state trajectory conditional on the old one.
The state-space model under consideration is (normal distribution parametrized with $\sigma$)
$$
\begin{array}{rll}
x_{t + 1} &= \cos(x_t)^2 + v_t, & v_t \sim N(0, 1) \
y_t &= 2 x_t + e_t, & e_t \sim N(0, 0.1)
\end{array}
$$
which leads to the probabilistic model
$$
\begin{align}
p(x_t\,|\,x_{t - 1}) &= N\left(x_t;\,\cos(x_t)^2,\,1\right) \
p(y_t\,|\,x_t) &= N\left(y_t;\,2 x_t,\,0.1\right)
\end{align}
$$
This admits the necessary pdfs
$$
\begin{align}
p(y_t\,|\,x_{t - 1}) &= N(y_t;\,2 \cos(x_{t - 1})^2,\,\sqrt{4.01}) \
p(x_t\,|\,x_{t - 1},\,y_t) &= N\left(x_t;\,\frac{2 y_t + 0.01 \cos(x_{t - 1})^2}{4.01}, \frac{0.1}{\sqrt{4.01}}\right)
\end{align}
$$
End of explanation
"""
T = 100
# Allocate arrays for results
ys_sim = np.zeros((T,))
xs_sim = np.zeros((T + 1,))
# Initial value for state
xs_sim[0] = 0.1
# Walk through all time steps
for t in range(T):
xs_sim[t + 1] = np.power(np.cos(xs_sim[t]), 2) + stats.norm.rvs(0, 1, 1)
ys_sim[t] = 2 * xs_sim[t + 1] + stats.norm.rvs(0, 0.1, 1)
fig, axs = plt.subplots(2, 1, figsize=(10, 10))
axs[0].plot(range(T + 1), xs_sim, 'o-');
axs[1].plot(range(1, T + 1), ys_sim, 'o-r');
"""
Explanation: Simulate from the model given above.
End of explanation
"""
xs = conditional_FAPF(xs_sim, ys_sim, N=1000)
fig, ax = plt.subplots()
ax.plot(xs_sim, 'o-')
ax.plot(xs, 'x-');
"""
Explanation: This is a Markov kernel which can be used in Gibbs sampling where the parameters and the hidden state are sampled repeatedly consecutively.
End of explanation
"""
def cond_imp_sampling_kernel(x, N=2):
# Sample new proposals
xs = stats.norm.rvs(0, 1, N)
# Set the last sample to the reference
xs[N - 1] = x
# Calculate weights
ws = stats.norm.logpdf(xs, loc=1, scale=1) - \
stats.norm.logpdf(xs, loc=0, scale=1)
ws -= np.max(ws)
ws = np.exp(ws) / np.sum(np.exp(ws))
return xs[np.random.choice(range(N), size=1, p=ws)[0]]
"""
Explanation: IV.3 Conditional importance sampling
a) Conditional importance sampling with few particles
Sample from $\pi(x) = \mathcal{N}\left(x\,\big|\,1,\,1\right)$ by using conditional importance sampling with the proposal $q(x) = \mathcal{N}\left(x\,\big|\,0,\,1\right)$.
End of explanation
"""
def cond_imp_sampling_mcmc(M=1000, N=2):
# Initialisation
xs = np.zeros((M + 1,))
for m in tqdm_notebook(range(M)):
xs[m + 1] = cond_imp_sampling_kernel(xs[m], N=N)
return xs
"""
Explanation: Use that kernel to sample from the target distribution.
End of explanation
"""
xs = cond_imp_sampling_mcmc(M=70000)
"""
Explanation: Run the sampler
End of explanation
"""
fig, ax = plt.subplots()
ax.hist(xs, normed=True, bins=40);
"""
Explanation: Plot the result
End of explanation
"""
M = 50
x0 = np.array([6.0, -5.5])
ns = np.reshape(stats.expon.rvs(scale=2, size=2 * M), (2, M))
bs = np.reshape(np.random.choice([-1, 1], size=2 * M,
replace=True, p=[0.5, 0.5]),
(2, M))
ys = np.reshape(np.repeat(x0, M), (2, M)) + ns * bs
ys = ys.T
"""
Explanation: b) Lower bound for probability that draw from cond. imp. sampling kernel falls in a set $A$
Theoretical exercise. Solution will be in exercises_on_paper.
IV.4 An SMC sampler for localization
A point $x_0$ is supposed to be localized in the plane $[-12,\,12]^2$.
There are some measurements $y_{1:M}$ which are corrupted by heavy-tailed noise from an exponential distribution.
We want to find the distribution $p\left(x_0\,\big|\,y_{1:M}\right)$.
a) Simulate data
$M$ simulated measurements from the model
$$
\begin{align}
y_t^1 &= x_0^1 + n_m^1 b_m^1 \
y_t^2 &= x_0^2 + n_m^2 b_m^2
\end{align}
$$
where
$$
\begin{align}
m &= 1, 2, \dots, M \
x_0 &= \left(x_0^1, x_0^2\right) \
n_m^1, n_m^2 &\sim \mathrm{Exp}\left(2\right) \
\mathbb{P}\left(b_m^1 = 1\right) &= \mathbb{P}\left(b_m^1 = -1\right) = \frac{1}{2}
\end{align}
$$
and analogously for $b_m^2$.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 8))
ax.scatter(ys[:, 0], ys[:, 1])
ax.set_xlim([-12, 12])
ax.set_ylim([-12, 12])
ax.scatter(x0[0], x0[1], facecolors='none', edgecolors='r', s=100)
"""
Explanation: Visualize simulated observations and true $x_0$
End of explanation
"""
def log_likelihood(x, ys):
return np.sum(np.log(0.25) + 0.5 *
np.power(-1, ((ys - x) > 0).astype('int')) * (ys - x))
"""
Explanation: b) Likelihood
As derived on paper, it holds that
$$
p\left(y_m^j\,\big|\,x_0^j\right) =
\begin{cases}
\frac{1}{4} \exp\left(-\frac{y_m^j - x_0^j}{2}\right) & y_m^j > x_0 \
\frac{1}{4} \exp\left(\frac{y_m^j - x_0^j}{2}\right) & y_m^j < x_0
\end{cases}
$$
and since the components of $y_m$ are independent we get
$$
p\left(y_m\,\big|\,x_0\right) = p\left(y_m^1\,\big|\,x_0^1\right) \cdot p\left(y_m^2\,\big|\,x_0^2\right)
$$
End of explanation
"""
def tempered_logpdf(x, ys, k, K=10):
# k / K comes from likelihood tempering
return k / K * log_likelihood(x, ys) + \
stats.multivariate_normal.logpdf(x, mean=[0, 0],
cov=7 * np.eye(2))
"""
Explanation: c) Metropolis-Hastings kernel for $\pi_k$
This function evaluates $\log\left(\pi_k\right)$
End of explanation
"""
def mh_kernel(x, ys, k, K=10, tau=0.5):
# Propose a new value
x_prop = stats.multivariate_normal.rvs(mean=x,
cov=tau**2 * np.eye(2),
size=1)
# Terms in the second part of the acceptance probability
# Proposal is symmetric, so terms containing the proposal will
# cancel each other out
# Acceptance probability
alpha = min(0, tempered_logpdf(x_prop, ys, k, K=K) -
tempered_logpdf(x, ys, k, K=K))
# Sample to be compared to the acceptance probability
u = stats.uniform.rvs()
# Set next state depending on acceptance probability
if np.log(u) <= alpha:
return x_prop, np.exp(alpha)
else:
return x, np.exp(alpha)
mh_kernel(x0, ys, 2)
"""
Explanation: The Metropolis-Hastings kernel produces one new sample of the Markov chain, conditional on the last sample.
End of explanation
"""
def smc_sampler(ys, K=10, N=100, ess_min=50, tau=0.5, progressbar=True):
# Vectors for saving
xs = np.zeros((K + 1, N, 2))
ancs = np.zeros((K, N), dtype='int64')
ws = np.zeros((K + 1, N))
# Initialisation
xs[0, :, :] = stats.multivariate_normal.rvs(mean=[0, 0],
cov=7 * np.eye(2),
size=N)
ws[0, :] = 1 / N * np.ones((N,))
if progressbar:
t = tqdm_notebook(range(K))
else:
t = range(K)
for k in t:
# Update weights
for i in range(N):
ws[k + 1, i] = np.log(ws[k, i]) + \
tempered_logpdf(xs[k, i, :], ys, k=k + 1, K=K) - \
tempered_logpdf(xs[k, i, :], ys, k=k, K=K)
# and normalize them
ws[k + 1, :] -= np.max(ws[k + 1, :])
ws[k + 1, :] = np.exp(ws[k + 1, :]) / np.sum(np.exp(ws[k + 1, :]))
# Resample depending on ESS
if 1 / np.sum(np.power(ws[k + 1, :], 2)) < ess_min:
ancs[k, :] = np.random.choice(range(N), size=N,
replace=True, p=ws[k + 1, :])
ws[k + 1, :] = 1 / N * np.ones((N,))
else:
ancs[k, :] = range(N)
# Propagate / Sample from next element in the sequence
# Here, via a Metropolis-Hastings kernel
for i in range(N):
xs[k + 1, i, :] = mh_kernel(xs[k, ancs[k, i], :], ys,
k=k + 1, K=K, tau=tau)[0]
return xs, ancs, ws
xs, ancs, ws = smc_sampler(ys, N=1000, ess_min=750)
np.sum(xs[10, :, 0] * ws[10])
np.sum(xs[10, :, 1] * ws[10])
"""
Explanation: e) Putting together the actual SMC sampler
End of explanation
"""
x = np.arange(-12, 12, 0.25)
y = np.arange(-12, 12, 0.25)
X, Y = np.meshgrid(x, y)
Z = np.zeros((len(x), len(y), 10))
for k in tqdm_notebook(range(10)):
for i in range(len(x)):
for j in range(len(y)):
Z[i, j, k] = tempered_logpdf(np.array([X[i, j], Y[i, j]]),
ys, k, K=10)
Z[:, :, k] -= np.max(Z[:, :, k])
Z[:, :, k] = np.exp(Z[:, :, k])
fig, axs = plt.subplots(5, 2, figsize=(8.5, 20))
for k in range(10):
levels=np.linspace(np.min(Z[:, :, k]),
np.max(Z[:, :, k]), 8)
axs[k // 2, k % 2].contour(X, Y, Z[:, :, k])
axs[k // 2, k % 2].scatter(x0[0], x0[1],
facecolors='none', edgecolors='r', s=100)
axs[k // 2, k % 2].scatter(xs[k, :, 0], xs[k, :, 1], color='k')
fig.tight_layout()
"""
Explanation: f) Visualisation and testing of the SMC sampling
Sample the probability distributions of interest to be able to draw contour lines.
End of explanation
"""
def mh_sampler(ys, k=10, K=10, M=1000, tau=0.5, progressbar=True):
# Prepare vectors for saving
xs = np.zeros((M + 1, 2))
alpha = np.zeros((M,))
# Initial state
# Choose zero as the initial state
# Iterate the chain
if progressbar:
t = tqdm_notebook(range(M))
else:
t = range(M)
for i in t:
xs[i + 1], alpha[i] = mh_kernel(xs[i], ys, k, K=K, tau=tau)
if progressbar:
t.set_postfix({'mean acc': np.mean(alpha[:(i + 1)])})
return xs, alpha
xs, _ = mh_sampler(ys, M=30000, tau=0.7, progressbar=True)
"""
Explanation: g) Comparison to standard Metropolis Hastings sampler
This is the Metropolis Hastings sampler for the distribution $\pi_k$
End of explanation
"""
fig, axs = plt.subplots(2, 1, figsize=(8, 6))
burnin = 500
axs[0].hist(xs[burnin:, 0], normed=True, bins=50);
axs[0].axvline(np.mean(xs[burnin:, 0]), color='r', linestyle='--')
axs[0].axvline(np.median(xs[burnin:, 0]), color='k', linestyle='--')
axs[1].hist(xs[burnin:, 1], normed=True, bins=50);
axs[1].axvline(np.mean(xs[burnin:, 1]), color='r', linestyle='--')
axs[1].axvline(np.median(xs[burnin:, 1]), color='k', linestyle='--')
means_mh = np.zeros((10, 2))
means_smc = np.zeros((10, 2))
for m in tqdm_notebook(range(10)):
xs, _ = mh_sampler(ys, M=25000, tau=0.7, progressbar=True)
means_mh[m, :] = np.mean(xs[500:], axis=0)
xs, _, ws = smc_sampler(ys, N=2000, ess_min=1500, progressbar=True)
means_smc[m, :] = [np.sum(xs[10, :, 0] * ws[10]),
np.sum(xs[10, :, 1] * ws[10])]
np.mean(np.linalg.norm(means_smc - x0, axis=1, ord=1))
np.mean(np.linalg.norm(means_mh - x0, axis=1, ord=1))
"""
Explanation: Some visualisations of the marginal distributions for the two coordinates determined by the Metropolis-Hastings run.
End of explanation
"""
|
mtasende/Machine-Learning-Nanodegree-Capstone
|
notebooks/prod/.ipynb_checkpoints/n10_dyna_q_with_predictor_full_training_dyna1-checkpoint.ipynb
|
mit
|
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
import pickle
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent_predictor import AgentPredictor
from functools import partial
from sklearn.externals import joblib
NUM_THREADS = 1
LOOKBACK = -1
STARTING_DAYS_AHEAD = 252
POSSIBLE_FRACTIONS = [0.0, 1.0]
DYNA = 20
BASE_DAYS = 112
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
estimator_close = joblib.load('../../data/best_predictor.pkl')
estimator_volume = joblib.load('../../data/best_volume_predictor.pkl')
agents = [AgentPredictor(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=DYNA,
name='Agent_{}'.format(i),
estimator_close=estimator_close,
estimator_volume=estimator_volume,
env=env,
prediction_window=BASE_DAYS) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
"""
Explanation: In this notebook a Q learner with dyna and a custom predictor will be trained and evaluated. The Q learner recommends when to buy or sell shares of one particular stock, and in which quantity (in fact it determines the desired fraction of shares in the total portfolio value).
End of explanation
"""
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
import pickle
with open('../../data/dyna_q_with_predictor.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
"""
Explanation: Let's show the symbols data, to see how good the recommender has to be.
End of explanation
"""
TEST_DAYS_AHEAD = 112
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
"""
Explanation: Let's run the trained agent, with the test set
First a non-learning test: this scenario would be worse than what is possible (in fact, the q-learner can learn from past samples in the test set without compromising the causality).
End of explanation
"""
TEST_DAYS_AHEAD = 112
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
"""
Explanation: And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
End of explanation
"""
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_test_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
"""
Explanation: What are the metrics for "holding the position"?
End of explanation
"""
|
AndreySheka/dl_ekb
|
hw1/Homework 1 (Face Recognition).ipynb
|
mit
|
import scipy.io
image_h, image_w = 32, 32
data = scipy.io.loadmat('faces_data.mat')
X_train = data['train_faces'].reshape((image_w, image_h, -1)).transpose((2, 1, 0)).reshape((-1, image_h * image_w))
y_train = data['train_labels'] - 1
X_test = data['test_faces'].reshape((image_w, image_h, -1)).transpose((2, 1, 0)).reshape((-1, image_h * image_w))
y_test = data['test_labels'] - 1
n_features = X_train.shape[1]
n_train = len(y_train)
n_test = len(y_test)
n_classes = len(np.unique(y_train))
print('Dataset loaded.')
print(' Image size : {}x{}'.format(image_h, image_w))
print(' Train images : {}'.format(n_train))
print(' Test images : {}'.format(n_test))
print(' Number of classes : {}'.format(n_classes))
"""
Explanation: Face recognition
The goal of this seminar is to build two simple (anv very similar) face recognition pipelines using scikit-learn package. Overall, we'd like to explore different representations and see which one works better.
Prepare dataset
End of explanation
"""
def plot_gallery(images, titles, h, w, n_row=3, n_col=6):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.5 * n_col, 1.7 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray, interpolation='nearest')
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
titles = [str(y[0]) for y in y_train]
plot_gallery(X_train, titles, image_h, image_w)
"""
Explanation: Now we are going to plot some samples from the dataset using the provided helper function.
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
# Use KNeighborsClassifier to calculate test score for the Nearest Neighbour classifier.
print('Test score: {}'.format(test_score))
"""
Explanation: Nearest Neighbour baseline
The simplest way to do face recognition is to treat raw pixels as features and perform Nearest Neighbor Search in the Euclidean space. Let's use KNeighborsClassifier class.
End of explanation
"""
# Populate variable 'X_train_processed' with samples each of which has zero mean and unit variance.
"""
Explanation: Not very imperssive, is it?
Eigenfaces
All the dirty work will be done by the scikit-learn package. First we need to learn a dictionary of codewords. For that we preprocess the training set by making each face normalized (zero mean and unit variance)..
End of explanation
"""
from sklearn.decomposition import RandomizedPCA
n_components = 64
# Populate 'pca' with a trained instance of RamdomizedPCA.
"""
Explanation: Now we are going to apply PCA to obtain a dictionary of codewords.
RamdomizedPCA class is what we need.
End of explanation
"""
# Visualize principal components.
"""
Explanation: We plot a bunch of principal components.
End of explanation
"""
# Transform training data and plot decomposition coefficients.
"""
Explanation: This time we don't have any restriction on number of non-zero coefficients in the vector decomposition, so the codes are not sparse anymore:
End of explanation
"""
# Populate 'test_score' with test accuracy of an SVM classifier.
print('Test score: {}'.format(test_score))
"""
Explanation: Train an SVM and apply it to the encoded test data.
End of explanation
"""
n_components = [1, 2, 4, 8, 16, 32, 64]
accuracy = []
# Try different numbers of components and populate 'accuracy' list.
plt.figure(figsize=(10, 6))
plt.plot(n_nonzero, accuracy)
print('Max accuracy: {}'.format(max(accuracy)))
"""
Explanation: How many components are sufficient to reach the same accuracy level?
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.