code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plotly.com/python/getting-started/) by downloading the client and [reading the primer](https://plotly.com/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plotly.com/python/getting-started/#initialization-for-online-plotting) or [offline](https://plotly.com/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plotly.com/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
```
import plotly
plotly.__version__
```
### Basic Sankey Diagram
```
import plotly.plotly as py
data = dict(
type='sankey',
node = dict(
pad = 15,
thickness = 20,
line = dict(
color = "black",
width = 0.5
),
label = ["A1", "A2", "B1", "B2", "C1", "C2"],
color = ["blue", "blue", "blue", "blue", "blue", "blue"]
),
link = dict(
source = [0,1,0,2,3,3],
target = [2,3,3,4,4,5],
value = [8,4,2,8,4,2]
))
layout = dict(
title = "Basic Sankey Diagram",
font = dict(
size = 10
)
)
fig = dict(data=[data], layout=layout)
py.iplot(fig, validate=False)
```
### Create Sankey Canvas
```
import plotly.plotly as py
data = dict(
type='sankey',
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh"
)
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
```
### Add Nodes
```
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label'],
color = data['data'][0]['node']['color']
)
)
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
```
### Add Links
```
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
width = 1118,
height = 772,
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label'],
color = data['data'][0]['node']['color']
),
link = dict(
source = data['data'][0]['link']['source'],
target = data['data'][0]['link']['target'],
value = data['data'][0]['link']['value'],
label = data['data'][0]['link']['label']
))
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
fig = dict(data=[data_trace], layout=layout)
py.iplot(fig, validate=False)
```
### Style Sankey Diagram
```
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy_dark.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
width = 1118,
height = 772,
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label']
),
link = dict(
source = data['data'][0]['link']['source'],
target = data['data'][0]['link']['target'],
value = data['data'][0]['link']['value'],
label = data['data'][0]['link']['label']
))
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10,
color = 'white'
),
plot_bgcolor = 'black',
paper_bgcolor = 'black'
)
fig = dict(data=[data_trace], layout=layout)
py.iplot(fig, validate = False)
```
### Reference
See [https://plotly.com/python/reference/#sankey](https://plotly.com/python/reference/#sankey) for more information and options!
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'sankey.ipynb', 'python/sankey-diagram/', 'Sankey Diagram',
'How to make Sankey Diagrams in Python with Plotly.',
title = 'Sankey Diagram | Plotly',
has_thumbnail='true', thumbnail='thumbnail/sankey.jpg',
language='python',
display_as='basic', order=11,
ipynb= '~notebook_demo/151')
```
|
github_jupyter
|
import plotly
plotly.__version__
import plotly.plotly as py
data = dict(
type='sankey',
node = dict(
pad = 15,
thickness = 20,
line = dict(
color = "black",
width = 0.5
),
label = ["A1", "A2", "B1", "B2", "C1", "C2"],
color = ["blue", "blue", "blue", "blue", "blue", "blue"]
),
link = dict(
source = [0,1,0,2,3,3],
target = [2,3,3,4,4,5],
value = [8,4,2,8,4,2]
))
layout = dict(
title = "Basic Sankey Diagram",
font = dict(
size = 10
)
)
fig = dict(data=[data], layout=layout)
py.iplot(fig, validate=False)
import plotly.plotly as py
data = dict(
type='sankey',
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh"
)
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label'],
color = data['data'][0]['node']['color']
)
)
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
width = 1118,
height = 772,
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label'],
color = data['data'][0]['node']['color']
),
link = dict(
source = data['data'][0]['link']['source'],
target = data['data'][0]['link']['target'],
value = data['data'][0]['link']['value'],
label = data['data'][0]['link']['label']
))
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10
)
)
fig = dict(data=[data_trace], layout=layout)
py.iplot(fig, validate=False)
import plotly.plotly as py
import urllib, json
url = 'https://raw.githubusercontent.com/plotly/plotly.js/master/test/image/mocks/sankey_energy_dark.json'
response = urllib.urlopen(url)
data = json.loads(response.read())
data_trace = dict(
type='sankey',
width = 1118,
height = 772,
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
valueformat = ".0f",
valuesuffix = "TWh",
node = dict(
pad = 15,
thickness = 15,
line = dict(
color = "black",
width = 0.5
),
label = data['data'][0]['node']['label']
),
link = dict(
source = data['data'][0]['link']['source'],
target = data['data'][0]['link']['target'],
value = data['data'][0]['link']['value'],
label = data['data'][0]['link']['label']
))
layout = dict(
title = "Energy forecast for 2050<br>Source: Department of Energy & Climate Change, Tom Counsell via <a href='https://bost.ocks.org/mike/sankey/'>Mike Bostock</a>",
font = dict(
size = 10,
color = 'white'
),
plot_bgcolor = 'black',
paper_bgcolor = 'black'
)
fig = dict(data=[data_trace], layout=layout)
py.iplot(fig, validate = False)
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'sankey.ipynb', 'python/sankey-diagram/', 'Sankey Diagram',
'How to make Sankey Diagrams in Python with Plotly.',
title = 'Sankey Diagram | Plotly',
has_thumbnail='true', thumbnail='thumbnail/sankey.jpg',
language='python',
display_as='basic', order=11,
ipynb= '~notebook_demo/151')
| 0.497803 | 0.927298 |
```
#test string
test = 'This is my test text. We are keeping this text to keep this manageable.'
def count_words(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts'''
word_counts = {}
for word in text.split(" "):
#known word
if word in word_counts:
word_counts[word] +=1
#unknown word
else:
word_counts[word] = 1
return word_counts
count_words(test)
def count_words(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. Skips punctuation'''
#lower case letters
text = text.lower()
#skip punctuation
skips = ['.', ':', ';', "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = {}
for word in text.split(" "):
#known word
if word in word_counts:
word_counts[word] +=1
#unknown word
else:
word_counts[word] = 1
return word_counts
count_words(test)
from collections import Counter
def count_words_fast(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. Skips punctuation'''
#lower case letters
text = text.lower()
#skip punctuation
skips = ['.', ':', ';', "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = Counter(text.split(' '))
return word_counts
count_words_fast(test)
count_words_fast(test) == count_words(test)
len(count_words("This comprehension check is to check for comprehension."))
text = 'This comprehension check is to check for comprehension.'
count_words(text) is count_words_fast(text)
text = 'This comprehension check is to check for comprehension.'
count_words(text) == count_words_fast(text)
```
#### Introduction to Language Processing: Question 1
What is Project Gutenberg?
- **An online repository of publically available books in many languages.**
- An online repository of electronically-scanned microfiche copies of the original works of Martin Luther.
- An online translation service that can be used for any text file, including entire books.
#### Counting Words: Question 1
The function ```count_words``` is as defined in Video 3.2.2.
Consider the following code:
```len(count_words("This comprehension check is to check for comprehension."))```
What does this return?
- 5
- **6**
- 7
- 8
#### Counting Words: Question 2
The functions ```count_words``` and ```count_words_fast``` are as defined in Video 3.2.2. Consider the following code:
```count_words(text) is count_words_fast(text)```
What does this return?
- True
- **False**
|
github_jupyter
|
#test string
test = 'This is my test text. We are keeping this text to keep this manageable.'
def count_words(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts'''
word_counts = {}
for word in text.split(" "):
#known word
if word in word_counts:
word_counts[word] +=1
#unknown word
else:
word_counts[word] = 1
return word_counts
count_words(test)
def count_words(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. Skips punctuation'''
#lower case letters
text = text.lower()
#skip punctuation
skips = ['.', ':', ';', "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = {}
for word in text.split(" "):
#known word
if word in word_counts:
word_counts[word] +=1
#unknown word
else:
word_counts[word] = 1
return word_counts
count_words(test)
from collections import Counter
def count_words_fast(text):
'''Count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. Skips punctuation'''
#lower case letters
text = text.lower()
#skip punctuation
skips = ['.', ':', ';', "'", '"']
for ch in skips:
text = text.replace(ch, "")
word_counts = Counter(text.split(' '))
return word_counts
count_words_fast(test)
count_words_fast(test) == count_words(test)
len(count_words("This comprehension check is to check for comprehension."))
text = 'This comprehension check is to check for comprehension.'
count_words(text) is count_words_fast(text)
text = 'This comprehension check is to check for comprehension.'
count_words(text) == count_words_fast(text)
What does this return?
- 5
- **6**
- 7
- 8
#### Counting Words: Question 2
The functions ```count_words``` and ```count_words_fast``` are as defined in Video 3.2.2. Consider the following code:
| 0.629661 | 0.713244 |
```
'''
RL to capture a hidden flag
'''
import numpy as np
import matplotlib.pyplot as plt
# Grid environment
COL = 10 ## 6 10 12. 15 fail # after adding max_steps 15/20/50 is fine
ROW = 10 ## 4 10 12. 15 fail
NUM_STATES = COL * ROW
NUM_ACTIONS = 4 # up down left right
FLAG_STATE = 13 # preset flag location. Must be less than COL * ROW
REWARD_FAIL = -10
REWARD_SUCCCESS = 100
## convert state to row and column
def getStateToRC(state):
r = state // COL ## col
c = state % COL
return r, c
## get state from row and column
def getRCToState(r, c):
return int(r*COL + c)
## step function
def xp_step(state, action):
r, c = getStateToRC(state)
if (r > 0 and action == 0): ## 4 actions: up down left right
r-=1
if (r < ROW-1 and action == 1):
r+=1
if (c > 1 and action == 2):
c-=1
if (c < COL-1 and action == 3):
c+=1
state_new = getRCToState(r, c)
done = False
reward = REWARD_FAIL
if (state_new == FLAG_STATE): ## fixed place
done = True
reward = REWARD_SUCCCESS
return state_new, reward, done
env = []
MAX_STEPS = 50*(COL + ROW)/2 # max tries in one episode
# Define Q-learning function
def QLearning(env, learning, discount, epsilon, min_eps, episodes):
# Determine size of discretized state space
# Initialize Q table
Q = np.random.uniform(low = -1, high = 1,
size = (NUM_STATES, NUM_ACTIONS))
# Initialize variables to track rewards
reward_list = []
avg_reward_list = []
# Calculate episodic decay in epsilon
decay = (epsilon - min_eps)/episodes
# Run Q learning algorithm
for i in range(episodes):
# Initialize parameters
done = False
total_reward, reward = 0, 0
state = np.random.randint(0, NUM_STATES -1) # random
j = 0
while done != True:
j+=1
if j> MAX_STEPS:
break;
# Determine the next action - epsilon greedy strategy
if np.random.random() < 1 - epsilon:
action = np.argmax(Q[state])
else:
action = np.random.randint(0, NUM_ACTIONS -1)
# Get next state and reward
state_new, reward, done = xp_step(state, action)
#Allow for terminal states
if done:
Q[state, action] = reward
total_reward += reward
break
# Adjust Q value for current state
else:
delta = learning * (reward + discount * np.max(Q[state_new]) - Q[state, action])
Q[state, action] += delta
# Update variables
total_reward += reward
state = state_new
# Decay epsilon
if epsilon > min_eps:
epsilon -= decay
# Track rewards
reward_list.append(total_reward)
if (i+1) % 100 == 0: # sample rewards
avg_reward = np.mean(reward_list)
avg_reward_list.append(avg_reward)
reward_list = []
if (i+1) % 100 == 0:
print('Episode {} Average Reward: {}'.format(i+1, avg_reward))
# env.close()
return avg_reward_list, Q
# Run Q-learning algorithm
rewards, Q = QLearning(env, 0.2, 0.9, 0.8, 0, 10000)
# Plot Rewards
plt.plot(100*(np.arange(len(rewards)) + 1), rewards)
plt.xlabel('Episodes')
plt.ylabel('Average Reward')
plt.title('Average Reward vs Episodes')
# plt.savefig('rewards.jpg')
# plt.close()
print (Q)
## See a smart agent
##
for i in range(5):
state = np.random.randint(0, NUM_STATES -1) # random
print(f"{i}::Start:{state}")
for j in range(200):
action = np.argmax(Q[state])
print(f"{j}::action:{action}")
state, reward, done = xp_step(state, action)
end = state
if done:
print(f"Reward:{reward} at {end}")
break
```
|
github_jupyter
|
'''
RL to capture a hidden flag
'''
import numpy as np
import matplotlib.pyplot as plt
# Grid environment
COL = 10 ## 6 10 12. 15 fail # after adding max_steps 15/20/50 is fine
ROW = 10 ## 4 10 12. 15 fail
NUM_STATES = COL * ROW
NUM_ACTIONS = 4 # up down left right
FLAG_STATE = 13 # preset flag location. Must be less than COL * ROW
REWARD_FAIL = -10
REWARD_SUCCCESS = 100
## convert state to row and column
def getStateToRC(state):
r = state // COL ## col
c = state % COL
return r, c
## get state from row and column
def getRCToState(r, c):
return int(r*COL + c)
## step function
def xp_step(state, action):
r, c = getStateToRC(state)
if (r > 0 and action == 0): ## 4 actions: up down left right
r-=1
if (r < ROW-1 and action == 1):
r+=1
if (c > 1 and action == 2):
c-=1
if (c < COL-1 and action == 3):
c+=1
state_new = getRCToState(r, c)
done = False
reward = REWARD_FAIL
if (state_new == FLAG_STATE): ## fixed place
done = True
reward = REWARD_SUCCCESS
return state_new, reward, done
env = []
MAX_STEPS = 50*(COL + ROW)/2 # max tries in one episode
# Define Q-learning function
def QLearning(env, learning, discount, epsilon, min_eps, episodes):
# Determine size of discretized state space
# Initialize Q table
Q = np.random.uniform(low = -1, high = 1,
size = (NUM_STATES, NUM_ACTIONS))
# Initialize variables to track rewards
reward_list = []
avg_reward_list = []
# Calculate episodic decay in epsilon
decay = (epsilon - min_eps)/episodes
# Run Q learning algorithm
for i in range(episodes):
# Initialize parameters
done = False
total_reward, reward = 0, 0
state = np.random.randint(0, NUM_STATES -1) # random
j = 0
while done != True:
j+=1
if j> MAX_STEPS:
break;
# Determine the next action - epsilon greedy strategy
if np.random.random() < 1 - epsilon:
action = np.argmax(Q[state])
else:
action = np.random.randint(0, NUM_ACTIONS -1)
# Get next state and reward
state_new, reward, done = xp_step(state, action)
#Allow for terminal states
if done:
Q[state, action] = reward
total_reward += reward
break
# Adjust Q value for current state
else:
delta = learning * (reward + discount * np.max(Q[state_new]) - Q[state, action])
Q[state, action] += delta
# Update variables
total_reward += reward
state = state_new
# Decay epsilon
if epsilon > min_eps:
epsilon -= decay
# Track rewards
reward_list.append(total_reward)
if (i+1) % 100 == 0: # sample rewards
avg_reward = np.mean(reward_list)
avg_reward_list.append(avg_reward)
reward_list = []
if (i+1) % 100 == 0:
print('Episode {} Average Reward: {}'.format(i+1, avg_reward))
# env.close()
return avg_reward_list, Q
# Run Q-learning algorithm
rewards, Q = QLearning(env, 0.2, 0.9, 0.8, 0, 10000)
# Plot Rewards
plt.plot(100*(np.arange(len(rewards)) + 1), rewards)
plt.xlabel('Episodes')
plt.ylabel('Average Reward')
plt.title('Average Reward vs Episodes')
# plt.savefig('rewards.jpg')
# plt.close()
print (Q)
## See a smart agent
##
for i in range(5):
state = np.random.randint(0, NUM_STATES -1) # random
print(f"{i}::Start:{state}")
for j in range(200):
action = np.argmax(Q[state])
print(f"{j}::action:{action}")
state, reward, done = xp_step(state, action)
end = state
if done:
print(f"Reward:{reward} at {end}")
break
| 0.460774 | 0.579519 |
```
# Step 7: Additional Catalog Corrections
# TO DO: Check what filters are present, we want all info regardless if all filters are present.
#Merge Each Filter into One Catalog
#Try only keeping things where errors are good.
by_filter['UVM2'] = by_filter['UVM2'][by_filter['UVM2'].UVM2_MAG_ERR < 0.35]
by_filter['UVW1'] = by_filter['UVW1'][by_filter['UVW1'].UVW1_MAG_ERR < 0.35]
by_filter['UVW2'] = by_filter['UVW2'][by_filter['UVW2'].UVW2_MAG_ERR < 0.35]
#Orignial Stuff:
first_two = pd.merge(by_filter['UVM2'],by_filter['UVW2'],on=['Ra','Dec'],how='outer')
catalog = pd.merge(first_two,by_filter['UVW1'],on=['Ra','Dec'],how='outer')
# Remove duplicate optical photometry
catalog = catalog.drop(labels=['Umag_y', 'e_Umag_y', 'Bmag_y', 'e_Bmag_y', 'Vmag_y',
'e_Vmag_y', 'Imag_y', 'e_Imag_y', 'Flag_y', 'Jmag_y',
'e_Jmag_y','Hmag_y', 'e_Hmag_y', 'Ksmag_y', 'e_Ksmag_y',
'Umag_x', 'e_Umag_x', 'Bmag_x', 'e_Bmag_x', 'Vmag_x',
'e_Vmag_x', 'Imag_x', 'e_Imag_x', 'Flag_x', 'Jmag_x',
'e_Jmag_x','Hmag_x', 'e_Hmag_x', 'Ksmag_x', 'e_Ksmag_x'],
axis=1)
# Might as well not keep things with no photometry
catalog = catalog[(np.isfinite(catalog.UVM2_RA)) | (np.isfinite(catalog.UVW1_RA)) | (np.isfinite(catalog.UVW2_RA)) ]
catalog.to_csv(f"{path}/{observation_id}/{observation_id}_1_1.csv")
# Step 8 Plot Tractor Results
f,axes=plt.subplots(3,3,figsize=(20,20))
fontsize = 20
for i,h in enumerate(hdu):
uvfilter = h.header['FILTER']
data = TractorObjects[uvfilter].image
model = TractorObjects[uvfilter].model
residual = data - model
axes[i,0].imshow(data,vmin=vmin(data),vmax=vmax(data),origin="lower")
axes[i,1].imshow(model,vmin=vmin(data),vmax=vmax(data),origin="lower")
axes[i,2].imshow(residual,vmin=vmin(residual),vmax=vmax(residual),origin="lower")
axes[i,0].set_ylabel(uvfilter,fontsize=fontsize)
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1_tractor.png")
#plt.close()
# Step 9 Plot Photometry
#plt.style.use('bmh')
g,axs=plt.subplots(2,4,figsize=(30,20))
uvm2_v = catalog[(catalog.UVM2_MAG_ERR < 0.35) & (catalog.UVM2_SATURATED == False) & (catalog.UVM2_SSS == 1.0) & (catalog.UVM2_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvw1_v = catalog[(catalog.UVW1_MAG_ERR < 0.35) & (catalog.UVW1_SATURATED == False) & (catalog.UVW1_SSS == 1.0) & (catalog.UVW1_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvw2_v = catalog[(catalog.UVW2_MAG_ERR < 0.35) & (catalog.UVW2_SATURATED == False) & (catalog.UVW2_SSS == 1.0) & (catalog.UVW2_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvm2_uvw1 = catalog[(catalog.UVM2_MAG_ERR < 0.35) & (catalog.UVM2_SATURATED == False) & (catalog.UVM2_SSS == 1.0) &
(catalog.UVM2_EDGE == 1.0) & (catalog.UVW1_MAG_ERR < 0.35) & (catalog.UVW1_SATURATED == False) &
(catalog.UVW1_SSS == 1.0) & (catalog.UVW1_EDGE == 1.0)]
axs[0,0].scatter(uvw1_v.UVW1_MAG - uvw1_v.Vmag,uvw1_v.UVW1_MAG)
axs[0,1].scatter(uvm2_v.UVM2_MAG - uvm2_v.Vmag,uvm2_v.UVM2_MAG )
axs[0,2].scatter(uvw2_v.UVW2_MAG - uvw2_v.Vmag,uvw2_v.UVW2_MAG)
axs[0,3].scatter(uvm2_uvw1.UVM2_MAG - uvm2_uvw1.UVW1_MAG,uvm2_uvw1.UVM2_MAG)
axs[1,0].scatter(uvw1_v.UVW1_MAG - uvw1_v.Vmag,uvw1_v.Vmag)
axs[1,1].scatter(uvm2_v.UVM2_MAG - uvm2_v.Vmag,uvm2_v.Vmag )
axs[1,2].scatter(uvw2_v.UVW2_MAG - uvw2_v.Vmag,uvw2_v.Vmag)
axs[1,3].scatter(uvm2_uvw1.UVM2_MAG - uvm2_uvw1.UVW1_MAG,uvm2_uvw1.UVW1_MAG)
axs[0,0].set_xlabel("UVW1 - V",fontsize=fontsize); axs[0,0].set_ylabel("UVW1",fontsize=fontsize);
axs[0,1].set_xlabel("UVM2 - V",fontsize=fontsize); axs[0,1].set_ylabel("UVM2",fontsize=fontsize);
axs[0,2].set_xlabel("UVW2 - V",fontsize=fontsize); axs[0,2].set_ylabel("UVW2",fontsize=fontsize);
axs[0,3].set_xlabel("UVM2 - UVW1",fontsize=fontsize); axs[0,3].set_ylabel("UVM2",fontsize=fontsize);
axs[1,0].set_xlabel("UVW1 - V",fontsize=fontsize); axs[1,0].set_ylabel("V",fontsize=fontsize);
axs[1,1].set_xlabel("UVM2 - V",fontsize=fontsize); axs[1,1].set_ylabel("V",fontsize=fontsize);
axs[1,2].set_xlabel("UVW2 - V",fontsize=fontsize); axs[1,2].set_ylabel("V",fontsize=fontsize);
axs[1,3].set_xlabel("UVM2 - UVW1",fontsize=fontsize); axs[1,3].set_ylabel("UVW1",fontsize=fontsize);
[ax.set_ylim(19,11) for ax in axs[0]]
[ax.set_xlim(-7,4) for ax in axs[0]]
[ax.set_ylim(20,12) for ax in axs[1]]
[ax.set_xlim(-7,4) for ax in axs[1]]
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1_photometry.png")
#plt.close()
# Step 10 Plot Coordinates
plt.figure(figsize=(10,10))
d = meta['UVM2'].data
x,y = meta['UVM2'].pixel_positions
plt.imshow(d,vmin=vmin(d),vmax=vmax(d),origin="lower")
plt.scatter(x,y,s=5,c='red')
plt.xlim(500,600)
plt.ylim(500,600)
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1coordinates.png")
print("Total Time for ObsID: ",observation_id,' (1_1) ', (time.time()-t1)/60)
```
|
github_jupyter
|
# Step 7: Additional Catalog Corrections
# TO DO: Check what filters are present, we want all info regardless if all filters are present.
#Merge Each Filter into One Catalog
#Try only keeping things where errors are good.
by_filter['UVM2'] = by_filter['UVM2'][by_filter['UVM2'].UVM2_MAG_ERR < 0.35]
by_filter['UVW1'] = by_filter['UVW1'][by_filter['UVW1'].UVW1_MAG_ERR < 0.35]
by_filter['UVW2'] = by_filter['UVW2'][by_filter['UVW2'].UVW2_MAG_ERR < 0.35]
#Orignial Stuff:
first_two = pd.merge(by_filter['UVM2'],by_filter['UVW2'],on=['Ra','Dec'],how='outer')
catalog = pd.merge(first_two,by_filter['UVW1'],on=['Ra','Dec'],how='outer')
# Remove duplicate optical photometry
catalog = catalog.drop(labels=['Umag_y', 'e_Umag_y', 'Bmag_y', 'e_Bmag_y', 'Vmag_y',
'e_Vmag_y', 'Imag_y', 'e_Imag_y', 'Flag_y', 'Jmag_y',
'e_Jmag_y','Hmag_y', 'e_Hmag_y', 'Ksmag_y', 'e_Ksmag_y',
'Umag_x', 'e_Umag_x', 'Bmag_x', 'e_Bmag_x', 'Vmag_x',
'e_Vmag_x', 'Imag_x', 'e_Imag_x', 'Flag_x', 'Jmag_x',
'e_Jmag_x','Hmag_x', 'e_Hmag_x', 'Ksmag_x', 'e_Ksmag_x'],
axis=1)
# Might as well not keep things with no photometry
catalog = catalog[(np.isfinite(catalog.UVM2_RA)) | (np.isfinite(catalog.UVW1_RA)) | (np.isfinite(catalog.UVW2_RA)) ]
catalog.to_csv(f"{path}/{observation_id}/{observation_id}_1_1.csv")
# Step 8 Plot Tractor Results
f,axes=plt.subplots(3,3,figsize=(20,20))
fontsize = 20
for i,h in enumerate(hdu):
uvfilter = h.header['FILTER']
data = TractorObjects[uvfilter].image
model = TractorObjects[uvfilter].model
residual = data - model
axes[i,0].imshow(data,vmin=vmin(data),vmax=vmax(data),origin="lower")
axes[i,1].imshow(model,vmin=vmin(data),vmax=vmax(data),origin="lower")
axes[i,2].imshow(residual,vmin=vmin(residual),vmax=vmax(residual),origin="lower")
axes[i,0].set_ylabel(uvfilter,fontsize=fontsize)
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1_tractor.png")
#plt.close()
# Step 9 Plot Photometry
#plt.style.use('bmh')
g,axs=plt.subplots(2,4,figsize=(30,20))
uvm2_v = catalog[(catalog.UVM2_MAG_ERR < 0.35) & (catalog.UVM2_SATURATED == False) & (catalog.UVM2_SSS == 1.0) & (catalog.UVM2_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvw1_v = catalog[(catalog.UVW1_MAG_ERR < 0.35) & (catalog.UVW1_SATURATED == False) & (catalog.UVW1_SSS == 1.0) & (catalog.UVW1_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvw2_v = catalog[(catalog.UVW2_MAG_ERR < 0.35) & (catalog.UVW2_SATURATED == False) & (catalog.UVW2_SSS == 1.0) & (catalog.UVW2_EDGE == 1.0) & (catalog.e_Vmag < 0.35)]
uvm2_uvw1 = catalog[(catalog.UVM2_MAG_ERR < 0.35) & (catalog.UVM2_SATURATED == False) & (catalog.UVM2_SSS == 1.0) &
(catalog.UVM2_EDGE == 1.0) & (catalog.UVW1_MAG_ERR < 0.35) & (catalog.UVW1_SATURATED == False) &
(catalog.UVW1_SSS == 1.0) & (catalog.UVW1_EDGE == 1.0)]
axs[0,0].scatter(uvw1_v.UVW1_MAG - uvw1_v.Vmag,uvw1_v.UVW1_MAG)
axs[0,1].scatter(uvm2_v.UVM2_MAG - uvm2_v.Vmag,uvm2_v.UVM2_MAG )
axs[0,2].scatter(uvw2_v.UVW2_MAG - uvw2_v.Vmag,uvw2_v.UVW2_MAG)
axs[0,3].scatter(uvm2_uvw1.UVM2_MAG - uvm2_uvw1.UVW1_MAG,uvm2_uvw1.UVM2_MAG)
axs[1,0].scatter(uvw1_v.UVW1_MAG - uvw1_v.Vmag,uvw1_v.Vmag)
axs[1,1].scatter(uvm2_v.UVM2_MAG - uvm2_v.Vmag,uvm2_v.Vmag )
axs[1,2].scatter(uvw2_v.UVW2_MAG - uvw2_v.Vmag,uvw2_v.Vmag)
axs[1,3].scatter(uvm2_uvw1.UVM2_MAG - uvm2_uvw1.UVW1_MAG,uvm2_uvw1.UVW1_MAG)
axs[0,0].set_xlabel("UVW1 - V",fontsize=fontsize); axs[0,0].set_ylabel("UVW1",fontsize=fontsize);
axs[0,1].set_xlabel("UVM2 - V",fontsize=fontsize); axs[0,1].set_ylabel("UVM2",fontsize=fontsize);
axs[0,2].set_xlabel("UVW2 - V",fontsize=fontsize); axs[0,2].set_ylabel("UVW2",fontsize=fontsize);
axs[0,3].set_xlabel("UVM2 - UVW1",fontsize=fontsize); axs[0,3].set_ylabel("UVM2",fontsize=fontsize);
axs[1,0].set_xlabel("UVW1 - V",fontsize=fontsize); axs[1,0].set_ylabel("V",fontsize=fontsize);
axs[1,1].set_xlabel("UVM2 - V",fontsize=fontsize); axs[1,1].set_ylabel("V",fontsize=fontsize);
axs[1,2].set_xlabel("UVW2 - V",fontsize=fontsize); axs[1,2].set_ylabel("V",fontsize=fontsize);
axs[1,3].set_xlabel("UVM2 - UVW1",fontsize=fontsize); axs[1,3].set_ylabel("UVW1",fontsize=fontsize);
[ax.set_ylim(19,11) for ax in axs[0]]
[ax.set_xlim(-7,4) for ax in axs[0]]
[ax.set_ylim(20,12) for ax in axs[1]]
[ax.set_xlim(-7,4) for ax in axs[1]]
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1_photometry.png")
#plt.close()
# Step 10 Plot Coordinates
plt.figure(figsize=(10,10))
d = meta['UVM2'].data
x,y = meta['UVM2'].pixel_positions
plt.imshow(d,vmin=vmin(d),vmax=vmax(d),origin="lower")
plt.scatter(x,y,s=5,c='red')
plt.xlim(500,600)
plt.ylim(500,600)
plt.savefig(f"{path}/{observation_id}/{observation_id}_1_1coordinates.png")
print("Total Time for ObsID: ",observation_id,' (1_1) ', (time.time()-t1)/60)
| 0.644449 | 0.473475 |
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('..src')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.cluster.bicluster import SpectralBiclustering, SpectralCoclustering
from sklearn.manifold import TSNE
from tqdm import tqdm
from src.data.load_data import get_small_dataset_content
from src.features.normalize import select_variance_features, scale_df
df = get_small_dataset_content()
df.head()
# Feature Selection
X = select_variance_features(df, threshold=15)
X = scale_df(X)
X.head()
plt.figure(figsize=(20,10))
im = plt.imshow(X, aspect='auto')
plt.xticks([0, 1, 2, 3])
plt.ylabel("Samples")
plt.xlabel("Gene Expression Data")
plt.colorbar(im)
plt.show()
# Plot the scaled features using t-SNE
X_tsne = TSNE(n_components=2, perplexity=40).fit_transform(X)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1])
plt.show()
# Clustering
# Compute DBSCAN
epsilons = []
scores = []
n_clusters = []
PARAMETER_FACTOR = 100
# Find the best epsilon for clustering based on the silhouette coefficient
for eps in tqdm(range(1, 10 * PARAMETER_FACTOR, 1)):
db = DBSCAN(eps=eps/PARAMETER_FACTOR, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Evaluation
epsilons.append(eps/PARAMETER_FACTOR)
n_clusters.append(n_clusters_)
score = 0
if n_clusters_ > 1:
score = metrics.silhouette_score(X, labels)
scores.append(score)
plt.plot(epsilons, n_clusters, label="n_clusters")
plt.plot(epsilons, scores, label="score")
plt.legend()
plt.show()
max_index = scores.index(max(scores))
max_index/PARAMETER_FACTOR, scores[max_index], n_clusters[max_index]
db = DBSCAN(eps=max_index/PARAMETER_FACTOR, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=[list(set(labels)).index(x) for x in labels])
plt.show()
# Bi-Clustering using Spectral Co-Clustering and Spectral Bi-Clustering
models = [SpectralCoclustering(n_clusters=2), SpectralBiclustering(n_clusters=2)]
for model in models:
model.fit(X)
fit_data = X.values[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, extent=(-3, 3, 3, -3))
plt.title("Rearranged to show biclusters using " + str(model.__class__))
plt.show()
```
|
github_jupyter
|
%matplotlib inline
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('..src')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.cluster.bicluster import SpectralBiclustering, SpectralCoclustering
from sklearn.manifold import TSNE
from tqdm import tqdm
from src.data.load_data import get_small_dataset_content
from src.features.normalize import select_variance_features, scale_df
df = get_small_dataset_content()
df.head()
# Feature Selection
X = select_variance_features(df, threshold=15)
X = scale_df(X)
X.head()
plt.figure(figsize=(20,10))
im = plt.imshow(X, aspect='auto')
plt.xticks([0, 1, 2, 3])
plt.ylabel("Samples")
plt.xlabel("Gene Expression Data")
plt.colorbar(im)
plt.show()
# Plot the scaled features using t-SNE
X_tsne = TSNE(n_components=2, perplexity=40).fit_transform(X)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1])
plt.show()
# Clustering
# Compute DBSCAN
epsilons = []
scores = []
n_clusters = []
PARAMETER_FACTOR = 100
# Find the best epsilon for clustering based on the silhouette coefficient
for eps in tqdm(range(1, 10 * PARAMETER_FACTOR, 1)):
db = DBSCAN(eps=eps/PARAMETER_FACTOR, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Evaluation
epsilons.append(eps/PARAMETER_FACTOR)
n_clusters.append(n_clusters_)
score = 0
if n_clusters_ > 1:
score = metrics.silhouette_score(X, labels)
scores.append(score)
plt.plot(epsilons, n_clusters, label="n_clusters")
plt.plot(epsilons, scores, label="score")
plt.legend()
plt.show()
max_index = scores.index(max(scores))
max_index/PARAMETER_FACTOR, scores[max_index], n_clusters[max_index]
db = DBSCAN(eps=max_index/PARAMETER_FACTOR, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=[list(set(labels)).index(x) for x in labels])
plt.show()
# Bi-Clustering using Spectral Co-Clustering and Spectral Bi-Clustering
models = [SpectralCoclustering(n_clusters=2), SpectralBiclustering(n_clusters=2)]
for model in models:
model.fit(X)
fit_data = X.values[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, extent=(-3, 3, 3, -3))
plt.title("Rearranged to show biclusters using " + str(model.__class__))
plt.show()
| 0.644784 | 0.593609 |
### ENVELOPE SPECTRUM - HEALTHY (Fault Diameter 0.007")
```
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
import lee_dataset_CWRU
from lee_dataset_CWRU import *
import envelope_spectrum
from envelope_spectrum import *
faultRates = [3.585, 5.415, 1] #[outer, inner, shaft]
Fs = 12000
DE_H1, FE_H1, t_DE_H1, t_FE_H1, RPM_H1, samples_s_DE_H1, samples_s_FE_H1 = lee_dataset('../DataCWRU/97.mat')
DE_H2, FE_H2, t_DE_H2, t_FE_H2, RPM_H2, samples_s_DE_H2, samples_s_FE_H2 = lee_dataset('../DataCWRU/98.mat')
RPM_H2 = 1778
DE_H3, FE_H3, t_DE_H3, t_FE_H3, RPM_H3, samples_s_DE_H3, samples_s_FE_H3 = lee_dataset('../DataCWRU/99.mat')
RPM_H3 = 1750
DE_H4, FE_H4, t_DE_H4, t_FE_H4, RPM_H4, samples_s_DE_H4, samples_s_FE_H4 = lee_dataset('../DataCWRU/100.mat')
fr_H1 = RPM_H1 / 60
BPFI_H1 = 5.4152 * fr_H1
BPFO_H1 = 3.5848 * fr_H1
fr_H2 = RPM_H2 / 60
BPFI_H2 = 5.4152 * fr_H2
BPFO_H2 = 3.5848 * fr_H2
fr_H3 = RPM_H3 / 60
BPFI_H3 = 5.4152 * fr_H3
BPFO_H3 = 3.5848 * fr_H3
fr_H4 = RPM_H4 / 60
BPFI_H4 = 5.4152 * fr_H4
BPFO_H4 = 3.5848 * fr_H4
fSpec_H1, xSpec_H1 = envelope_spectrum2(DE_H1, Fs)
fSpec_H2, xSpec_H2 = envelope_spectrum2(DE_H2, Fs)
fSpec_H3, xSpec_H3 = envelope_spectrum2(DE_H3, Fs)
fSpec_H4, xSpec_H4 = envelope_spectrum2(DE_H4, Fs)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
fig.set_size_inches(14, 10)
ax1.plot(fSpec_H1, xSpec_H1, label = 'Env. spectrum')
ax1.axvline(x = fr_H1, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax1.axvline(x = BPFI_H1, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax1.axvline(x = BPFO_H1, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax1.set_xlim(0,200)
ax1.legend()
ax1.set_xlabel('Frequency')
ax1.set_ylabel('Env. spectrum')
ax1.set_title("Normal Baseline Data, 1797 RPM")
ax2.plot(fSpec_H2, xSpec_H2, label = 'Env. spectrum')
ax2.axvline(x = fr_H2, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax2.axvline(x = BPFI_H2, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax2.axvline(x = BPFO_H2, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax2.set_xlim(0,200)
ax2.legend()
ax2.set_xlabel('Frequency')
ax2.set_ylabel('Env. spectrum')
ax2.set_title("Normal Baseline Data, 1772 RPM")
ax3.plot(fSpec_H3, xSpec_H3, label = 'Env. spectrum')
ax3.axvline(x = fr_H3, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax3.axvline(x = BPFI_H3, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax3.axvline(x = BPFO_H3, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax3.set_xlim(0,200)
ax3.legend()
ax3.set_xlabel('Frequency')
ax3.set_ylabel('Env. spectrum')
ax3.set_title("Normal Baseline Data, 1750 RPM")
ax4.plot(fSpec_H4, xSpec_H4, label = 'Env. spectrum')
ax4.axvline(x = fr_H4, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax4.axvline(x = BPFI_H4, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax4.axvline(x = BPFO_H4, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax4.set_xlim(0,200)
ax4.legend(loc = 1)
ax4.set_xlabel('Frequency')
ax4.set_ylabel('Env. spectrum')
ax4.set_title("Normal Baseline Data, 1730 RPM")
clasificacion_sanos = pd.DataFrame({'Señal': ['97.mat', '98.mat', '99.mat', '100.mat'],
'Estado': ['Sano'] * 4,
'Predicción': [clasificacion_envelope(fSpec_H1, xSpec_H1, fr_H1, BPFO_H1, BPFI_H1),
clasificacion_envelope(fSpec_H2, xSpec_H2, fr_H2, BPFO_H2, BPFI_H2),
clasificacion_envelope(fSpec_H3, xSpec_H3, fr_H3, BPFO_H3, BPFI_H3),
clasificacion_envelope(fSpec_H4, xSpec_H4, fr_H4, BPFO_H4, BPFI_H4)]})
clasificacion_sanos
```
|
github_jupyter
|
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
import lee_dataset_CWRU
from lee_dataset_CWRU import *
import envelope_spectrum
from envelope_spectrum import *
faultRates = [3.585, 5.415, 1] #[outer, inner, shaft]
Fs = 12000
DE_H1, FE_H1, t_DE_H1, t_FE_H1, RPM_H1, samples_s_DE_H1, samples_s_FE_H1 = lee_dataset('../DataCWRU/97.mat')
DE_H2, FE_H2, t_DE_H2, t_FE_H2, RPM_H2, samples_s_DE_H2, samples_s_FE_H2 = lee_dataset('../DataCWRU/98.mat')
RPM_H2 = 1778
DE_H3, FE_H3, t_DE_H3, t_FE_H3, RPM_H3, samples_s_DE_H3, samples_s_FE_H3 = lee_dataset('../DataCWRU/99.mat')
RPM_H3 = 1750
DE_H4, FE_H4, t_DE_H4, t_FE_H4, RPM_H4, samples_s_DE_H4, samples_s_FE_H4 = lee_dataset('../DataCWRU/100.mat')
fr_H1 = RPM_H1 / 60
BPFI_H1 = 5.4152 * fr_H1
BPFO_H1 = 3.5848 * fr_H1
fr_H2 = RPM_H2 / 60
BPFI_H2 = 5.4152 * fr_H2
BPFO_H2 = 3.5848 * fr_H2
fr_H3 = RPM_H3 / 60
BPFI_H3 = 5.4152 * fr_H3
BPFO_H3 = 3.5848 * fr_H3
fr_H4 = RPM_H4 / 60
BPFI_H4 = 5.4152 * fr_H4
BPFO_H4 = 3.5848 * fr_H4
fSpec_H1, xSpec_H1 = envelope_spectrum2(DE_H1, Fs)
fSpec_H2, xSpec_H2 = envelope_spectrum2(DE_H2, Fs)
fSpec_H3, xSpec_H3 = envelope_spectrum2(DE_H3, Fs)
fSpec_H4, xSpec_H4 = envelope_spectrum2(DE_H4, Fs)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
fig.set_size_inches(14, 10)
ax1.plot(fSpec_H1, xSpec_H1, label = 'Env. spectrum')
ax1.axvline(x = fr_H1, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax1.axvline(x = BPFI_H1, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax1.axvline(x = BPFO_H1, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax1.set_xlim(0,200)
ax1.legend()
ax1.set_xlabel('Frequency')
ax1.set_ylabel('Env. spectrum')
ax1.set_title("Normal Baseline Data, 1797 RPM")
ax2.plot(fSpec_H2, xSpec_H2, label = 'Env. spectrum')
ax2.axvline(x = fr_H2, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax2.axvline(x = BPFI_H2, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax2.axvline(x = BPFO_H2, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax2.set_xlim(0,200)
ax2.legend()
ax2.set_xlabel('Frequency')
ax2.set_ylabel('Env. spectrum')
ax2.set_title("Normal Baseline Data, 1772 RPM")
ax3.plot(fSpec_H3, xSpec_H3, label = 'Env. spectrum')
ax3.axvline(x = fr_H3, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax3.axvline(x = BPFI_H3, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax3.axvline(x = BPFO_H3, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax3.set_xlim(0,200)
ax3.legend()
ax3.set_xlabel('Frequency')
ax3.set_ylabel('Env. spectrum')
ax3.set_title("Normal Baseline Data, 1750 RPM")
ax4.plot(fSpec_H4, xSpec_H4, label = 'Env. spectrum')
ax4.axvline(x = fr_H4, color = 'k', linestyle = '--', lw = 1.5, label = 'fr', alpha = 0.6)
ax4.axvline(x = BPFI_H4, color = 'r', linestyle = '--', lw = 1.5, label = 'BPFI', alpha = 0.6)
ax4.axvline(x = BPFO_H4, color = 'g', linestyle = '--', lw = 1.5, label = 'BPFO', alpha = 0.6)
ax4.set_xlim(0,200)
ax4.legend(loc = 1)
ax4.set_xlabel('Frequency')
ax4.set_ylabel('Env. spectrum')
ax4.set_title("Normal Baseline Data, 1730 RPM")
clasificacion_sanos = pd.DataFrame({'Señal': ['97.mat', '98.mat', '99.mat', '100.mat'],
'Estado': ['Sano'] * 4,
'Predicción': [clasificacion_envelope(fSpec_H1, xSpec_H1, fr_H1, BPFO_H1, BPFI_H1),
clasificacion_envelope(fSpec_H2, xSpec_H2, fr_H2, BPFO_H2, BPFI_H2),
clasificacion_envelope(fSpec_H3, xSpec_H3, fr_H3, BPFO_H3, BPFI_H3),
clasificacion_envelope(fSpec_H4, xSpec_H4, fr_H4, BPFO_H4, BPFI_H4)]})
clasificacion_sanos
| 0.314893 | 0.716653 |
<a id="TSM_Demo_top"></a>
# TSM Demo
<hr>
# Notebook Summary
TSM stands for "Total Suspended Matter" - also called TSS which stands for "Total Suspended Solids". It is the dry-weight of particles suspended (not dissolved) in a body of water. It is a proxy of water quality.
<hr>
# Index
* [Import Dependencies and Connect to the Data Cube](#TSM_Demo_import)
* [Choose Platforms and Products](#TSM_Demo_plat_prod)
* [Get the Extents of the Cube](#TSM_Demo_extents)
* [Define the Extents of the Analysis](#TSM_Demo_define_extents)
* [Load Data from the Data Cube](#TSM_Demo_load_data)
* Mask out clouds and create a median composite
* Show false-color RGB image of the composite
* [Obtain TSM](#TSM_Demo_obtain_tsm)
* Mask out everything but water and calculate TSM
* Show the water composite
* Show mean TSM
* Show maximum TSM
* Show minimum TSM
## <span id="TSM_Demo_import">Import Dependencies and Connect to the Data Cube [▴](#TSM_Demo_top)</span>
```
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
import warnings
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import xarray as xr
from utils.data_cube_utilities.clean_mask import landsat_clean_mask_full
from utils.data_cube_utilities.dc_load import get_product_extents
from utils.data_cube_utilities.dc_time import dt_to_str
from utils.data_cube_utilities.dc_display_map import display_map
from utils.data_cube_utilities.dc_rgb import rgb
from utils.data_cube_utilities.plotter_utils import figure_ratio
from utils.data_cube_utilities.dc_water_quality import tsm
from utils.data_cube_utilities.dc_water_classifier import wofs_classify
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
dc = api.dc
```
## <span id="TSM_Demo_plat_prod">Choose Platforms and Products [▴](#TSM_Demo_top)</span>
**List available products for each platform**
```
# Get available products
products_info = dc.list_products()
# List Landsat 7 products
print("Landsat 7 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"]
# List Landsat 8 products
print("Landsat 8 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"]
```
**Choose products**
```
# Select a product and platform
# Examples: ghana, kenya, tanzania, sierra_leone, senegal
product = 'ls8_usgs_sr_scene'
platform = 'LANDSAT_8'
collection = 'c1'
level = 'l2'
```
## <span id="TSM_Demo_extents">Get the Extents of the Cube [▴](#TSM_Demo_top)</span>
```
full_lat, full_lon, min_max_dates = get_product_extents(api, platform, product)
# Print the extents of the data.
print("Latitude Extents:", full_lat)
print("Longitude Extents:", full_lon)
print("Time Extents:", list(map(dt_to_str, min_max_dates)))
```
**Visualize the available area**
```
display_map(full_lat, full_lon)
```
## <span id="TSM_Demo_define_extents">Define the Extents of the Analysis [▴](#TSM_Demo_top)</span>
```
# Select an analysis region (Lat-Lon) within the extents listed above.
# Select a time period (Min-Max) within the extents listed above (Year-Month-Day)
# This region and time period will be used for the cloud assessment
# Weija Reservoir, Ghana
# lat = (5.5487, 5.6203)
# lon = (-0.4028, -0.3326)
# Lake Manyara, Tanzania
lat = (-3.8505, -3.3886)
lon = (35.7184, 35.9271)
# Delta du Saloum - Senegal
# lat = (13.65, 13.7550)
# lon = (-16.70, -16.65)
# Time Period
time_extents = ('2018-01-01', '2018-12-31')
```
**Visualize the selected area**
```
display_map(lat, lon)
```
## <span id="TSM_Demo_load_data">Load Data from the Data Cube [▴](#TSM_Demo_top)</span>
### Mask out clouds and create a median mosaic
```
load_params = \
dict(latitude = lat,
longitude = lon,
platform = platform,
time = time_extents,
product = product,
group_by='solar_day',
dask_chunks={'latitude':1000,'longitude':1000,
'time':10})
landsat_ds = \
dc.load(**load_params,
measurements = ['red', 'green', 'blue',
'nir', 'swir1', 'swir2', 'pixel_qa'])
max_px_x_y = 1000 # Max resolution in either x or y dimension.
lat_stride = int(max(1, np.ceil(len(landsat_ds.latitude)/max_px_x_y)))
lon_stride = int(max(1, np.ceil(len(landsat_ds.longitude)/max_px_x_y)))
landsat_ds = landsat_ds.isel(latitude=slice(0, len(landsat_ds.latitude), lat_stride),
longitude=slice(0, len(landsat_ds.longitude), lon_stride))
```
**Mask unclean data**
```
clean_mask = landsat_clean_mask_full(dc, landsat_ds, product=product, platform=platform,
collection=collection, level=level)
landsat_ds = landsat_ds.where(clean_mask).persist()
```
**Create a median composite**
```
median_composite = landsat_ds.median('time').persist()
```
### Show false-color RGB image of the land and water composite
```
# RGB image options
# Standard RGB = 321 = Red, Green, Blue
# False Color = 543 = SWIR1, NIR, Red
# False Color (Landsat Mosaic) = 742 = SWIR2, NIR, Green
std_figsize = figure_ratio(median_composite, fixed_width=8)
fig = plt.figure(figsize=std_figsize)
median_composite[['swir2', 'nir', 'green']].to_array()\
.plot.imshow(vmin=0, vmax=4000)
plt.show()
```
## <span id="TSM_Demo_obtain_tsm">Obtain TSM [▴](#TSM_Demo_top)</span>
### Mask out everything but water and calculate TSM
```
# Ignore innocuous warnings about division by zero and NaNs.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
water = wofs_classify(landsat_ds, no_data=0.0).wofs
water_mask = water.astype(np.bool)
water_composite = water.max('time', skipna=True).persist()
tsm_da = tsm(landsat_ds[['red', 'green']], water_mask).tsm.persist()
tsm_min = tsm_da.min('time', skipna=True).persist()
tsm_mean = tsm_da.mean('time', skipna=True).persist()
tsm_max = tsm_da.max('time', skipna=True).persist()
del tsm_da
```
### Show the water composite
```
fig = plt.figure(figsize=std_figsize)
water_composite.plot.imshow()
plt.show()
```
### Show mean TSM
> Note that the color scale is different for these images. The color scale for each is determined by its distribution of values, so examine the color scales carefully to determine the estimated mass of suspended matter for a given color in each one.
```
plt.figure(figsize=std_figsize)
mean_tsm_plot = tsm_mean.plot.imshow(cmap = "hot", robust=True)
plt.title('Mean Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
mean_tsm_plot.colorbar.set_label('g \ L')
plt.show()
```
### Show maximum TSM
```
plt.figure(figsize=std_figsize)
max_tsm_plot = tsm_max.plot.imshow(cmap = "hot", robust=True)
plt.title('Maximum Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
max_tsm_plot.colorbar.set_label('g \ L')
plt.show()
```
### Show minimum TSM
```
plt.figure(figsize=std_figsize)
minimum_tsm_plot = tsm_min.plot.imshow(cmap = "hot", robust=True)
plt.title('Minimum Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
minimum_tsm_plot.colorbar.set_label('g \ L')
plt.show()
```
|
github_jupyter
|
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
import warnings
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import xarray as xr
from utils.data_cube_utilities.clean_mask import landsat_clean_mask_full
from utils.data_cube_utilities.dc_load import get_product_extents
from utils.data_cube_utilities.dc_time import dt_to_str
from utils.data_cube_utilities.dc_display_map import display_map
from utils.data_cube_utilities.dc_rgb import rgb
from utils.data_cube_utilities.plotter_utils import figure_ratio
from utils.data_cube_utilities.dc_water_quality import tsm
from utils.data_cube_utilities.dc_water_classifier import wofs_classify
from datacube.utils.aws import configure_s3_access
configure_s3_access(requester_pays=True)
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
dc = api.dc
# Get available products
products_info = dc.list_products()
# List Landsat 7 products
print("Landsat 7 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"]
# List Landsat 8 products
print("Landsat 8 Products:")
products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"]
# Select a product and platform
# Examples: ghana, kenya, tanzania, sierra_leone, senegal
product = 'ls8_usgs_sr_scene'
platform = 'LANDSAT_8'
collection = 'c1'
level = 'l2'
full_lat, full_lon, min_max_dates = get_product_extents(api, platform, product)
# Print the extents of the data.
print("Latitude Extents:", full_lat)
print("Longitude Extents:", full_lon)
print("Time Extents:", list(map(dt_to_str, min_max_dates)))
display_map(full_lat, full_lon)
# Select an analysis region (Lat-Lon) within the extents listed above.
# Select a time period (Min-Max) within the extents listed above (Year-Month-Day)
# This region and time period will be used for the cloud assessment
# Weija Reservoir, Ghana
# lat = (5.5487, 5.6203)
# lon = (-0.4028, -0.3326)
# Lake Manyara, Tanzania
lat = (-3.8505, -3.3886)
lon = (35.7184, 35.9271)
# Delta du Saloum - Senegal
# lat = (13.65, 13.7550)
# lon = (-16.70, -16.65)
# Time Period
time_extents = ('2018-01-01', '2018-12-31')
display_map(lat, lon)
load_params = \
dict(latitude = lat,
longitude = lon,
platform = platform,
time = time_extents,
product = product,
group_by='solar_day',
dask_chunks={'latitude':1000,'longitude':1000,
'time':10})
landsat_ds = \
dc.load(**load_params,
measurements = ['red', 'green', 'blue',
'nir', 'swir1', 'swir2', 'pixel_qa'])
max_px_x_y = 1000 # Max resolution in either x or y dimension.
lat_stride = int(max(1, np.ceil(len(landsat_ds.latitude)/max_px_x_y)))
lon_stride = int(max(1, np.ceil(len(landsat_ds.longitude)/max_px_x_y)))
landsat_ds = landsat_ds.isel(latitude=slice(0, len(landsat_ds.latitude), lat_stride),
longitude=slice(0, len(landsat_ds.longitude), lon_stride))
clean_mask = landsat_clean_mask_full(dc, landsat_ds, product=product, platform=platform,
collection=collection, level=level)
landsat_ds = landsat_ds.where(clean_mask).persist()
median_composite = landsat_ds.median('time').persist()
# RGB image options
# Standard RGB = 321 = Red, Green, Blue
# False Color = 543 = SWIR1, NIR, Red
# False Color (Landsat Mosaic) = 742 = SWIR2, NIR, Green
std_figsize = figure_ratio(median_composite, fixed_width=8)
fig = plt.figure(figsize=std_figsize)
median_composite[['swir2', 'nir', 'green']].to_array()\
.plot.imshow(vmin=0, vmax=4000)
plt.show()
# Ignore innocuous warnings about division by zero and NaNs.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
water = wofs_classify(landsat_ds, no_data=0.0).wofs
water_mask = water.astype(np.bool)
water_composite = water.max('time', skipna=True).persist()
tsm_da = tsm(landsat_ds[['red', 'green']], water_mask).tsm.persist()
tsm_min = tsm_da.min('time', skipna=True).persist()
tsm_mean = tsm_da.mean('time', skipna=True).persist()
tsm_max = tsm_da.max('time', skipna=True).persist()
del tsm_da
fig = plt.figure(figsize=std_figsize)
water_composite.plot.imshow()
plt.show()
plt.figure(figsize=std_figsize)
mean_tsm_plot = tsm_mean.plot.imshow(cmap = "hot", robust=True)
plt.title('Mean Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
mean_tsm_plot.colorbar.set_label('g \ L')
plt.show()
plt.figure(figsize=std_figsize)
max_tsm_plot = tsm_max.plot.imshow(cmap = "hot", robust=True)
plt.title('Maximum Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
max_tsm_plot.colorbar.set_label('g \ L')
plt.show()
plt.figure(figsize=std_figsize)
minimum_tsm_plot = tsm_min.plot.imshow(cmap = "hot", robust=True)
plt.title('Minimum Total Suspended Matter (TSM)')
plt.xlabel('Longitude (degrees east)')
plt.ylabel('Latitude (degrees north)')
minimum_tsm_plot.colorbar.set_label('g \ L')
plt.show()
| 0.483648 | 0.83612 |
```
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
seed=42
np.random.seed(seed)
X,y=load_iris(return_X_y=True)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=42,shuffle=True,stratify=y)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
scaler.fit(x_train) #obtenemos los datos de escalamiento en los conjuntos de entrenamiento
x_train=scaler.transform(x_train)
x_test=scaler.transform(x_test)
#obtener un metamodelo (principal)
model_tree=DecisionTreeClassifier()
#numero de estimadores
n_estimators=600
#comenzamos con el metodo de embolsado-> bagging classifier
bagging=BaggingClassifier(base_estimator=model_tree,
n_estimators=n_estimators,
random_state=seed
)
bagging.fit(x_train,y_train);
bagging.score(x_train,y_train)
bagging.score(x_test,y_test)
#validacion cruzada sobre los datos
from sklearn.model_selection import cross_validate
result=cross_validate(bagging,x_train,y_train,scoring=["accuracy"],return_train_score=True,cv=10)
result.keys()
result["test_accuracy"].mean(),result["test_accuracy"].std()
result["train_accuracy"].mean(),result["train_accuracy"].std()
from sklearn.metrics import classification_report
y_pred=bagging.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
forest=RandomForestClassifier()
param_grid={
"max_depth":range(1,21),
"n_estimators":100*np.arange(1,11)
}
cv=StratifiedKFold(n_splits=10,random_state=seed,shuffle=True)
grid=GridSearchCV(forest,
param_grid=param_grid,
cv=cv,
scoring="accuracy",n_jobs=-1)
grid.fit(x_train,y_train);
grid.best_score_
model=grid.best_estimator_
y_pred=model.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
parameters=grid.best_params_
#entonces el mejor modelo es model
import joblib
joblib.dump(model,"classifier_iris.pkl")
#vamos a crear una canalizacion de los datos para automatizar el preprocesamiento
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=seed,shuffle=True,stratify=y)
#la canalizacion usa las caracteristicas numericas y categoricas
#todos nuestros datos tienen caracteritisticas categoricas
numerical_features=slice()
numerical_transformer=Pipeline([
("inpute",SimpleImputer(strategy="mean")),
("scaler",MinMaxScaler(feature_range=(0,1)))
])
transformer=ColumnTransformer(
[("numerical",numerical_transformer,numerical_features)],
remainder="drop"
)
#ahora combinamos las canalizacion con el modelo
pipeline_model=Pipeline([("transformer",transformer),
("modelo_forest",RandomForestClassifier(**parameters))
])
pipeline_model.fit(x_train,y_train);
pipeline_model.predict_proba([[0.1,0.2,0.3,0.4]])
y_test=pipeline_model.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
#mostramos la representacion HTML de la canalizacion
from sklearn import set_config
set_config(display="diagram")
pipeline_model
import pandas as pd
features_content=pd.DataFrame([[0.1,0.2,0.3,0.4],
[2.3,5.6,7.8,0.9],
[0.4,5.6,None,9.2],
[0.1,0.2,0.3,0.4]])
features_content.head()
pipeline_model.predict(features_content)
features_content["predict label"]=pipeline_model.predict(features_content)
features_content.head()
pipeline_model.predict(features_content.iloc[:,:4])
joblib.dump(pipeline_model,"pipeline_model_classification_iris.pkl")
canal=joblib.load("pipeline_model_classification_iris.pkl")
canal.predict(features_content.iloc[:,:4])
```
## Modelo de Voting Classifier
* Se esxperimentara con un tipo de modelo de conjunto cuya salida sera igual a la prediccion de mayor proporcion entre los distintos modelos entrenados. Las predicciones se votan y la clase que gane es la salida.
```
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
list_models=[("RandomForest",RandomForestClassifier(**parameters)),
("SVC",SVC(probability=True)),
("Logistic",LogisticRegression(max_iter=200)),
("Kneighbors",KNeighborsClassifier())
]
from sklearn.ensemble import VotingClassifier
voting=VotingClassifier(estimators=list_models,voting="soft")
voting.fit(transformer.fit_transform(x_train),y_train);
voting.score(transformer.transform(x_train),y_train)
voting.score(transformer.transform(x_test),y_test)
y_pred=voting.predict(transformer.transform(x_test))
print(classification_report(y_test,y_pred))
pipeline_model.score(x_train,y_train)
pipeline_model.score(x_test,y_test)
```
## Modelo de conjunto: Stacking
```
from sklearn.ensemble import StackingClassifier
base_models=list_models.copy()
final_model=LogisticRegression(max_iter=200)
cv=StratifiedKFold(n_splits=10)
stacking=StackingClassifier(estimators=base_models,final_estimator=final_model,cv=cv)
stacking.fit(x_train,y_train);
stacking.score(x_train,y_train)
stacking.score(x_test,y_test)
```
## AdaBoostClassifier
* Metamodelo que usa clasificadores debiles para construir uno mas fuerte.
* La estrategie que usa el observar donde se produjeron predicciones incorrectas para luego centrarse en dichos casos y construir un modelo mas fuerte.
```
from sklearn.ensemble import AdaBoostClassifier
ada=AdaBoostClassifier(n_estimators=100,random_state=seed)
ada.fit(x_train,y_train)
ada.score(x_train,y_train)
ada.score(x_test,y_test)
```
|
github_jupyter
|
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
seed=42
np.random.seed(seed)
X,y=load_iris(return_X_y=True)
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=42,shuffle=True,stratify=y)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler(feature_range=(0,1))
scaler.fit(x_train) #obtenemos los datos de escalamiento en los conjuntos de entrenamiento
x_train=scaler.transform(x_train)
x_test=scaler.transform(x_test)
#obtener un metamodelo (principal)
model_tree=DecisionTreeClassifier()
#numero de estimadores
n_estimators=600
#comenzamos con el metodo de embolsado-> bagging classifier
bagging=BaggingClassifier(base_estimator=model_tree,
n_estimators=n_estimators,
random_state=seed
)
bagging.fit(x_train,y_train);
bagging.score(x_train,y_train)
bagging.score(x_test,y_test)
#validacion cruzada sobre los datos
from sklearn.model_selection import cross_validate
result=cross_validate(bagging,x_train,y_train,scoring=["accuracy"],return_train_score=True,cv=10)
result.keys()
result["test_accuracy"].mean(),result["test_accuracy"].std()
result["train_accuracy"].mean(),result["train_accuracy"].std()
from sklearn.metrics import classification_report
y_pred=bagging.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
forest=RandomForestClassifier()
param_grid={
"max_depth":range(1,21),
"n_estimators":100*np.arange(1,11)
}
cv=StratifiedKFold(n_splits=10,random_state=seed,shuffle=True)
grid=GridSearchCV(forest,
param_grid=param_grid,
cv=cv,
scoring="accuracy",n_jobs=-1)
grid.fit(x_train,y_train);
grid.best_score_
model=grid.best_estimator_
y_pred=model.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
parameters=grid.best_params_
#entonces el mejor modelo es model
import joblib
joblib.dump(model,"classifier_iris.pkl")
#vamos a crear una canalizacion de los datos para automatizar el preprocesamiento
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=seed,shuffle=True,stratify=y)
#la canalizacion usa las caracteristicas numericas y categoricas
#todos nuestros datos tienen caracteritisticas categoricas
numerical_features=slice()
numerical_transformer=Pipeline([
("inpute",SimpleImputer(strategy="mean")),
("scaler",MinMaxScaler(feature_range=(0,1)))
])
transformer=ColumnTransformer(
[("numerical",numerical_transformer,numerical_features)],
remainder="drop"
)
#ahora combinamos las canalizacion con el modelo
pipeline_model=Pipeline([("transformer",transformer),
("modelo_forest",RandomForestClassifier(**parameters))
])
pipeline_model.fit(x_train,y_train);
pipeline_model.predict_proba([[0.1,0.2,0.3,0.4]])
y_test=pipeline_model.predict(x_test)
report=classification_report(y_test,y_pred)
print(report)
#mostramos la representacion HTML de la canalizacion
from sklearn import set_config
set_config(display="diagram")
pipeline_model
import pandas as pd
features_content=pd.DataFrame([[0.1,0.2,0.3,0.4],
[2.3,5.6,7.8,0.9],
[0.4,5.6,None,9.2],
[0.1,0.2,0.3,0.4]])
features_content.head()
pipeline_model.predict(features_content)
features_content["predict label"]=pipeline_model.predict(features_content)
features_content.head()
pipeline_model.predict(features_content.iloc[:,:4])
joblib.dump(pipeline_model,"pipeline_model_classification_iris.pkl")
canal=joblib.load("pipeline_model_classification_iris.pkl")
canal.predict(features_content.iloc[:,:4])
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
list_models=[("RandomForest",RandomForestClassifier(**parameters)),
("SVC",SVC(probability=True)),
("Logistic",LogisticRegression(max_iter=200)),
("Kneighbors",KNeighborsClassifier())
]
from sklearn.ensemble import VotingClassifier
voting=VotingClassifier(estimators=list_models,voting="soft")
voting.fit(transformer.fit_transform(x_train),y_train);
voting.score(transformer.transform(x_train),y_train)
voting.score(transformer.transform(x_test),y_test)
y_pred=voting.predict(transformer.transform(x_test))
print(classification_report(y_test,y_pred))
pipeline_model.score(x_train,y_train)
pipeline_model.score(x_test,y_test)
from sklearn.ensemble import StackingClassifier
base_models=list_models.copy()
final_model=LogisticRegression(max_iter=200)
cv=StratifiedKFold(n_splits=10)
stacking=StackingClassifier(estimators=base_models,final_estimator=final_model,cv=cv)
stacking.fit(x_train,y_train);
stacking.score(x_train,y_train)
stacking.score(x_test,y_test)
from sklearn.ensemble import AdaBoostClassifier
ada=AdaBoostClassifier(n_estimators=100,random_state=seed)
ada.fit(x_train,y_train)
ada.score(x_train,y_train)
ada.score(x_test,y_test)
| 0.574634 | 0.583678 |
# CMA score
Mathieu Bourdenx - Oct. 2020
```
import numpy as np
+t vc
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
import scanpy as sc
# Load cluster ID (from Seurat)
clusters = pd.read_excel('../../cluster_identity.xlsx')
```
## Load datasets (very very long - 1hr+)
```
#Load full dataset
adata = sc.read_loom('../../cx_rnaAssay.loom')
# Load small dataset containing cell metadata
small_adata = sc.read_loom('./cx_integratedAssay.loom')
# Copy metadata
adata.obs = small_adata.obs
# Create a barcode dataframe
barcode = small_adata.obs
```
## Preprocessing
```
# Normalize counts as CPM
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e6)
# Log transform data
sc.pp.log1p(adata)
```
## CMA score calculation
```
# Load matrix file with weight and direction
model_matrix = pd.read_excel('./activation_model.xlsx')
cma_network = adata[:, model_matrix['Gene name Ms']]
cma_data_zs = cma_network.copy().X.todense().T
for i in tqdm(np.arange(cma_data_zs.shape[0])):
µ = np.mean(cma_data_zs[i, :])
sd = np.std(cma_data_zs[i, :])
cma_data_zs[i, :] = (cma_data_zs[i, :] - µ)/sd
for i,j in tqdm(enumerate(barcode.index)):
cell_matrix = model_matrix.copy()
for g in cell_matrix.index:
cell_matrix.loc[g, 'gene_count'] = cma_data_zs[g, i]
cell_matrix['gene_score'] = cell_matrix['gene_count'] * cell_matrix['Direction'] * cell_matrix['Weight']
score = cell_matrix['gene_score'].sum()/np.sum(cell_matrix['Weight'])
barcode.loc[j, 'score'] = score
for barplotin tqdm(barcode.index):
barcode.loc[i, 'broad.cell.type'] = clusters.loc[int(barcode.loc[i, 'seurat_clusters']), 'broad.cell.type']
plt.figure(figsize=(12, 6))
sns.barplot(data=barcode, x="broad.cell.type", y='score', hue='Condition')
# Calculation of net score to WT 2m
for maj_cell in tqdm(np.unique(barcode['broad.cell.type'])):
µ = np.mean(barcode[barcode['broad.cell.type'] == maj_cell][barcode['Condition'] == 'CX_WT_2m']['score'])
for cell_index in barcode[barcode['broad.cell.type'] == maj_cell].index:
barcode.loc[cell_index, 'net_score_group'] = barcode.loc[cell_index, 'score'] - µ
# Create a new age category to align 6 and 8m
for i in tqdm(barcode.index):
if barcode.loc[i, 'Age'] == '2m':
barcode.loc[i, 'new_age'] = '2m'
elif barcode.loc[i, 'Age'] == '6m':
barcode.loc[i, 'new_age'] = '8m'
elif barcode.loc[i, 'Age'] == '8m':
barcode.loc[i, 'new_age'] = '8m'
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "excit."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/excitatory_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "inhib."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/inhib_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "astro."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/astro_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "microglia"], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/microglia_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "oligo."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/oligo_net.png', dpi=300)
plt.show()
```
## Plots with 3 groups
```
def make_plots(cellpop):
fig, ax = plt.subplots(figsize=(4,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == cellpop], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO", 'PD'])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
plt.legend(bbox_to_anchor=(1,1))
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots_3groups/{}_netscore.png'.format(cellpop), dpi=300)
plt.show()
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_plots(cellpop=i)
```
# CMA component heatmap
#### *Trial function on excitatory neurons*
```
neuron_matrix = np.zeros((18, 6))
wt_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
for rank in tqdm(np.arange(neuron_matrix.shape[0])):
neuron_matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
neuron_matrix_zs = neuron_matrix.copy()
for i in np.arange(neuron_matrix_zs.shape[0]):
µ = np.mean(neuron_matrix_zs[i, :])
sd = np.std(neuron_matrix_zs[i, :])
neuron_matrix_zs[i, :] = (neuron_matrix_zs[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps_3groups/ex_neurons.pdf')
plt.show()
neuron_matrix_2group = neuron_matrix[:, :-2]
for i in np.arange(neuron_matrix_2group.shape[0]):
µ = np.mean(neuron_matrix_2group[i, :])
sd = np.std(neuron_matrix_2group[i, :])
neuron_matrix_2group[i, :] = (neuron_matrix_2group[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_2group, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(4), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m'], rotation='vertical')
plt.savefig('./heatmaps/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps/ex_neurons.pdf')
plt.show()
```
#### *All cells function*
```
def make_heatmaps(cellpop):
# Prepare empty matrix
matrix = np.zeros((18, 6))
#Find cell indices for each condition
wt_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
#Calculate mean per gene for every condition
for rank in tqdm(np.arange(matrix.shape[0])):
matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
#Perform z-scoring on each row
matrix_zs = matrix.copy()
for i in np.arange(matrix_zs.shape[0]):
µ = np.mean(matrix_zs[i, :])
sd = np.std(matrix_zs[i, :])
matrix_zs[i, :] = (matrix_zs[i, :] - µ) / sd
#Plot heatmap including all conditions
plt.figure(figsize=(6, 6))
plt.imshow(matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/{}.png'.format(cellpop), dpi=300)
plt.savefig('./heatmaps_3groups/{}.pdf'.format(cellpop))
plt.show()
#Perform z-scoring on only 2 groups
matrix_2group = matrix[:, :-2]
for i in np.arange(matrix_2group.shape[0]):
µ = np.mean(matrix_2group[i, :])
sd = np.std(matrix_2group[i, :])
matrix_2group[i, :] = (matrix_2group[i, :] - µ) / sd
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_heatmaps(cellpop=i)
```
|
github_jupyter
|
import numpy as np
+t vc
import pandas as pd
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
plt.rc('xtick', labelsize=14)
plt.rc('ytick', labelsize=14)
import scanpy as sc
# Load cluster ID (from Seurat)
clusters = pd.read_excel('../../cluster_identity.xlsx')
#Load full dataset
adata = sc.read_loom('../../cx_rnaAssay.loom')
# Load small dataset containing cell metadata
small_adata = sc.read_loom('./cx_integratedAssay.loom')
# Copy metadata
adata.obs = small_adata.obs
# Create a barcode dataframe
barcode = small_adata.obs
# Normalize counts as CPM
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e6)
# Log transform data
sc.pp.log1p(adata)
# Load matrix file with weight and direction
model_matrix = pd.read_excel('./activation_model.xlsx')
cma_network = adata[:, model_matrix['Gene name Ms']]
cma_data_zs = cma_network.copy().X.todense().T
for i in tqdm(np.arange(cma_data_zs.shape[0])):
µ = np.mean(cma_data_zs[i, :])
sd = np.std(cma_data_zs[i, :])
cma_data_zs[i, :] = (cma_data_zs[i, :] - µ)/sd
for i,j in tqdm(enumerate(barcode.index)):
cell_matrix = model_matrix.copy()
for g in cell_matrix.index:
cell_matrix.loc[g, 'gene_count'] = cma_data_zs[g, i]
cell_matrix['gene_score'] = cell_matrix['gene_count'] * cell_matrix['Direction'] * cell_matrix['Weight']
score = cell_matrix['gene_score'].sum()/np.sum(cell_matrix['Weight'])
barcode.loc[j, 'score'] = score
for barplotin tqdm(barcode.index):
barcode.loc[i, 'broad.cell.type'] = clusters.loc[int(barcode.loc[i, 'seurat_clusters']), 'broad.cell.type']
plt.figure(figsize=(12, 6))
sns.barplot(data=barcode, x="broad.cell.type", y='score', hue='Condition')
# Calculation of net score to WT 2m
for maj_cell in tqdm(np.unique(barcode['broad.cell.type'])):
µ = np.mean(barcode[barcode['broad.cell.type'] == maj_cell][barcode['Condition'] == 'CX_WT_2m']['score'])
for cell_index in barcode[barcode['broad.cell.type'] == maj_cell].index:
barcode.loc[cell_index, 'net_score_group'] = barcode.loc[cell_index, 'score'] - µ
# Create a new age category to align 6 and 8m
for i in tqdm(barcode.index):
if barcode.loc[i, 'Age'] == '2m':
barcode.loc[i, 'new_age'] = '2m'
elif barcode.loc[i, 'Age'] == '6m':
barcode.loc[i, 'new_age'] = '8m'
elif barcode.loc[i, 'Age'] == '8m':
barcode.loc[i, 'new_age'] = '8m'
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "excit."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/excitatory_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "inhib."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/inhib_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "astro."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/astro_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "microglia"], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/microglia_net.png', dpi=300)
plt.show()
fig, ax = plt.subplots(figsize=(3,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == "oligo."], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO"])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
#plt.legend(title='Cell type', loc='upper left')
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots/oligo_net.png', dpi=300)
plt.show()
def make_plots(cellpop):
fig, ax = plt.subplots(figsize=(4,3), constrained_layout=True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
sns.pointplot(x='new_age', y='net_score_group', data=barcode[barcode['broad.cell.type'] == cellpop], hue='Genotype', order=['2m', '8m'] , hue_order=['WT', "L2AKO", 'PD'])
plt.axhline(y=0, linestyle='dashed', linewidth=2, color='gray', zorder=1)
plt.legend(bbox_to_anchor=(1,1))
plt.ylabel('CMA score \n(net difference)', fontdict={'size': 14})
plt.xlabel('Age', fontdict={'size': 14})
plt.yticks([-0.06, 0, 0.06])
plt.ylim(-0.06, 0.08)
plt.xlim(-.2, 1.2)
plt.savefig('./plots_3groups/{}_netscore.png'.format(cellpop), dpi=300)
plt.show()
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_plots(cellpop=i)
neuron_matrix = np.zeros((18, 6))
wt_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == 'excit.'][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
for rank in tqdm(np.arange(neuron_matrix.shape[0])):
neuron_matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
neuron_matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
neuron_matrix_zs = neuron_matrix.copy()
for i in np.arange(neuron_matrix_zs.shape[0]):
µ = np.mean(neuron_matrix_zs[i, :])
sd = np.std(neuron_matrix_zs[i, :])
neuron_matrix_zs[i, :] = (neuron_matrix_zs[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps_3groups/ex_neurons.pdf')
plt.show()
neuron_matrix_2group = neuron_matrix[:, :-2]
for i in np.arange(neuron_matrix_2group.shape[0]):
µ = np.mean(neuron_matrix_2group[i, :])
sd = np.std(neuron_matrix_2group[i, :])
neuron_matrix_2group[i, :] = (neuron_matrix_2group[i, :] - µ) / sd
plt.figure(figsize=(6, 6))
plt.imshow(neuron_matrix_2group, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(4), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m'], rotation='vertical')
plt.savefig('./heatmaps/ex_neurons.png', dpi=300)
plt.savefig('./heatmaps/ex_neurons.pdf')
plt.show()
def make_heatmaps(cellpop):
# Prepare empty matrix
matrix = np.zeros((18, 6))
#Find cell indices for each condition
wt_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '2m'].index)
wt_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'WT'][barcode['new_age'] == '8m'].index)
ko_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '2m'].index)
ko_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'L2AKO'][barcode['new_age'] == '8m'].index)
pd_2m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '2m'].index)
pd_8m_index = list(barcode[barcode['broad.cell.type'] == cellpop][barcode['Genotype'] == 'PD'][barcode['new_age'] == '8m'].index)
#Calculate mean per gene for every condition
for rank in tqdm(np.arange(matrix.shape[0])):
matrix[rank, 0] = np.mean(cma_network[wt_2m_index, :].X.todense()[:, rank])
matrix[rank, 1] = np.mean(cma_network[wt_8m_index, :].X.todense()[:, rank])
matrix[rank, 2] = np.mean(cma_network[ko_2m_index, :].X.todense()[:, rank])
matrix[rank, 3] = np.mean(cma_network[ko_8m_index, :].X.todense()[:, rank])
matrix[rank, 4] = np.mean(cma_network[pd_2m_index, :].X.todense()[:, rank])
matrix[rank, 5] = np.mean(cma_network[pd_8m_index, :].X.todense()[:, rank])
#Perform z-scoring on each row
matrix_zs = matrix.copy()
for i in np.arange(matrix_zs.shape[0]):
µ = np.mean(matrix_zs[i, :])
sd = np.std(matrix_zs[i, :])
matrix_zs[i, :] = (matrix_zs[i, :] - µ) / sd
#Plot heatmap including all conditions
plt.figure(figsize=(6, 6))
plt.imshow(matrix_zs, cmap='viridis', vmin=-1, vmax=1)
plt.colorbar(shrink=.5, label='Gene z score')
plt.yticks(np.arange(18), model_matrix['Gene name'])
plt.ylim(17.5, -0.5)
plt.xticks(np.arange(6), ['WT 2m', 'WT 8m', 'KO 2m', 'KO 8m', 'PD 2m', 'PD 8m'], rotation='vertical')
plt.savefig('./heatmaps_3groups/{}.png'.format(cellpop), dpi=300)
plt.savefig('./heatmaps_3groups/{}.pdf'.format(cellpop))
plt.show()
#Perform z-scoring on only 2 groups
matrix_2group = matrix[:, :-2]
for i in np.arange(matrix_2group.shape[0]):
µ = np.mean(matrix_2group[i, :])
sd = np.std(matrix_2group[i, :])
matrix_2group[i, :] = (matrix_2group[i, :] - µ) / sd
cell_to_plot = ['excit.', "oligo.", 'astro.', 'microglia', 'inhib.', 'OPCs']
for i in cell_to_plot:
make_heatmaps(cellpop=i)
| 0.636579 | 0.823931 |
# Heuristics for signals with sparse first and second differences
We can estimate piecewise constant and piecewise linear functions by constructing cost functions that penalize the cardinality of the first- and second-order differences of a signal, respectively. The cardinality measure (sometimes called the $\ell_0$ norm) is simply the number of non-zero values. The $\ell_1$ norm is a common convex relaxation of the cardinality measure.
Here we demonstrate two signal classes based on the $\ell_1$ heuristic: `SparseFirstDiffConvex` and `SparseSecondDiffConvex`
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from scipy.optimize import minimize_scalar, minimize
from time import time
import seaborn as sns
sns.set_style('darkgrid')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import GaussNoise, SparseFirstDiffConvex, SparseSecondDiffConvex
from osd.utilities import progress
SOLVER = 'OSQP'
```
## Example 1: Noisy Square Wave
```
np.random.seed(42)
t = np.linspace(0, 1000, 3000)
signal1 = signal.square(2 * np.pi * t * 1 / (450.))
y = signal1 + 0.25 * np.random.randn(len(signal1))
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal')
plt.plot(t, y, alpha=0.5, label='observed signal')
plt.legend()
plt.show()
```
### Sparse First-Order Difference Heuristic
```
problem = Problem(data=y, components=[GaussNoise, SparseFirstDiffConvex])
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
```
### Sparse Second-Order Difference Heuristic
```
problem = Problem(data=y, components=[GaussNoise, SparseSecondDiffConvex])
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
```
## Example 2: Noisy Triangle Wave
```
np.random.seed(42)
t = np.linspace(0, 1000, 3000)
signal1 = np.abs(signal.sawtooth(2 * np.pi * t * 1 / (500.)))
y = signal1 + 0.25 * np.random.randn(len(signal1))
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal')
plt.plot(t, y, alpha=0.5, label='observed signal')
plt.legend()
plt.show()
```
### Sparse First-Order Difference Heuristic
```
problem = Problem(data=y, components=[GaussNoise, SparseFirstDiffConvex])
problem.weights.value = [1, 1e2]
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
```
### Sparse Second-Order Difference Heuristic
```
problem = Problem(data=y, components=[GaussNoise, SparseSecondDiffConvex])
problem.weights.value = [1, 1e2]
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
from scipy.optimize import minimize_scalar, minimize
from time import time
import seaborn as sns
sns.set_style('darkgrid')
import sys
sys.path.append('..')
from osd import Problem
from osd.components import GaussNoise, SparseFirstDiffConvex, SparseSecondDiffConvex
from osd.utilities import progress
SOLVER = 'OSQP'
np.random.seed(42)
t = np.linspace(0, 1000, 3000)
signal1 = signal.square(2 * np.pi * t * 1 / (450.))
y = signal1 + 0.25 * np.random.randn(len(signal1))
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal')
plt.plot(t, y, alpha=0.5, label='observed signal')
plt.legend()
plt.show()
problem = Problem(data=y, components=[GaussNoise, SparseFirstDiffConvex])
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
problem = Problem(data=y, components=[GaussNoise, SparseSecondDiffConvex])
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
np.random.seed(42)
t = np.linspace(0, 1000, 3000)
signal1 = np.abs(signal.sawtooth(2 * np.pi * t * 1 / (500.)))
y = signal1 + 0.25 * np.random.randn(len(signal1))
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal')
plt.plot(t, y, alpha=0.5, label='observed signal')
plt.legend()
plt.show()
problem = Problem(data=y, components=[GaussNoise, SparseFirstDiffConvex])
problem.weights.value = [1, 1e2]
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
problem = Problem(data=y, components=[GaussNoise, SparseSecondDiffConvex])
problem.weights.value = [1, 1e2]
problem.optimize_weights(solver=SOLVER)
problem.weights.value
plt.figure(figsize=(10, 6))
plt.plot(t, signal1, label='true signal', ls='--')
plt.plot(t, y, alpha=0.1, linewidth=1, marker='.', label='observed signal')
plt.plot(t, problem.estimates[1], label='estimated signal')
plt.legend()
plt.show()
problem.holdout_validation(solver=SOLVER, seed=42)
| 0.584153 | 0.989192 |
```
import psycopg2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from joblib import dump, load
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import LassoLars
from sklearn.model_selection import RandomizedSearchCV
# Query modeling_data2 table from PostgreSQL database
try:
conn = psycopg2.connect(user="cohort17",
password="Cohort17Movies",
host="moviesdb.ce8d6g1pa5lm.us-east-1.rds.amazonaws.com",
port="5432",database="moviesdb")
dbquery = "select * from modeling_data3"
movies = pd.read_sql_query(dbquery, conn)
except (Exception, psycopg2.Error) as error :
print ("Error while fetching data from PostgreSQL", error)
finally:
if(conn):
conn.close()
X = movies.drop(['primarytitle','domesticgross'], axis=1)
y = movies['domesticgross']
numeric_features = X[['productionbudget','runtimeminutes','release_year','release_week',
'genre_year_avg_sales','lead_prior_avg_sales','lead_prior_lead_count',
'director_prior_avg_sales','director_prior_count']].columns
dummy_features = X.drop(numeric_features, axis=1).columns
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)
impute_numeric = SimpleImputer(missing_values=np.nan, strategy='median', copy=False, fill_value=None)
impute_dummy = SimpleImputer(missing_values=np.nan, strategy='constant', copy=False, fill_value=0)
scale_numeric = MinMaxScaler(copy=False)
numeric_transformer = Pipeline(steps=[
('imputer', impute_numeric),
('scaler', scale_numeric)])
dummy_transformer = Pipeline(steps=[
('imputer', impute_dummy)])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('dum', dummy_transformer, dummy_features)])
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor(random_state=0))])
gbm.fit(X_train, y_train)
gbm.score(X_test, y_test)
dump(gbm, 'gbm_1.joblib')
#Warning: This cell may take a long time to run, depending on resources
gbm = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor())])
param_grid = {'regressor__learning_rate': [.05],
'regressor__n_estimators': [125, 150, 175],
'regressor__subsample': [.8, .9, 1],
'regressor__min_samples_split': [8, 10, 12],
'regressor__min_samples_leaf': [1, 2],
'regressor__max_depth': [3, 4, 5, 6],
'regressor__max_features': ['sqrt']}
CV = RandomizedSearchCV(estimator = gbm,
param_distributions = param_grid,
n_iter = 50,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_train, y_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_test, y_test)
dump(CV.best_estimator_, 'gbm_newfeatures.joblib')
gbm_random = load('gbm_newfeatures.joblib')
gbm_random.score(X_test, y_test)
gbm_random['regressor']
gbm2 = GradientBoostingRegressor(alpha=0.9, ccp_alpha=0.0, criterion='friedman_mse',
init=None, learning_rate=0.05, loss='ls', max_depth=5,
max_features='sqrt', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=2, min_samples_split=8,
min_weight_fraction_leaf=0.0, n_estimators=125,
n_iter_no_change=None, presort='deprecated',
random_state=None, subsample=1, tol=0.0001,
validation_fraction=0.1, verbose=0, warm_start=False)
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', gbm2)])
gbm.fit(X_train, y_train)
gbm.score(X_test, y_test)
dump(gbm, 'gbm_newfeatures.joblib')
#Warning: This cell may take a long time to run, depending on resources
rf = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', RandomForestRegressor())])
param_grid = {'regressor__max_depth': [10, 20, 40, 60, None],
'regressor__max_features': ['sqrt'],
'regressor__min_samples_leaf': [1, 2, 4, 8],
'regressor__min_samples_split': [2, 4, 8, 10],
'regressor__n_estimators': [100, 150, 200]}
CV = RandomizedSearchCV(estimator = rf,
param_distributions = param_grid,
n_iter = 300,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_train, y_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_test, y_test)
dump(CV.best_estimator_, 'rf_randomsearch.joblib')
```
##Feature Reduction Section (list of features to remove obtained from model_exploration workbook)
```
remove = ['talkinganimals',
'martin_scorsese',
'm_night_shyamalan',
'colin_farrell',
'bruce_willis',
'brad_pitt',
'shawn_levy',
'jackie_chan',
'mark_wahlberg',
'hugh_jackman',
'gerard_butler',
'keanu_reeves',
'nicolas_cage',
'dwayne_johnson',
'steven_spielberg',
'leonardo_dicaprio',
'clint_eastwood',
'jason_statham',
'denzel_washington',
'religious',
'jake_gyllenhaal',
'owen_wilson',
'guy_ritchie',
'matthew_mcconaughey',
'ron_howard',
'david_gordon_green',
'russell_crowe',
'jack_black',
'tom_cruise',
'matt_damon',
'africanamerican',
'michael_bay',
'cate_blanchett',
'tim_burton',
'ben_affleck',
'john_goodman',
'christian_bale',
'george_clooney',
'dysfunctionalfamily',
'mystery',
'tom_hanks',
'will_ferrell',
'stephen_soderbergh',
'robert_de_niro',
'biography',
'ridley_scott',
'paramount',
'romance',
'reese_witherspoon',
'channing_tatum',
'johnny_depp',
'thriller',
'g_rating',
'revenge',
'ice_cube',
'horror',
'samuel_l_jackson',
'sony',
'jim_carrey',
'christopher_nolan',
'pg_rating',
'documentary',
'warner_bros',
'adam_sandler',
'will_smith',
'crime',
'comedy',
'sandra_bullock',
'steve_carell',
'family']
X_reduce = movies.drop(['primarytitle','domesticgross'] + remove, axis=1)
y_reduce = movies['domesticgross']
numeric_features = X_reduce[['productionbudget','runtimeminutes','release_year','release_week']].columns
dummy_features = X_reduce.drop(numeric_features, axis=1).columns
X_reduce_train, X_reduce_test, y_reduce_train, y_reduce_test = train_test_split(X_reduce, y_reduce, test_size=.2, random_state=0)
impute_numeric = SimpleImputer(missing_values=np.nan, strategy='median', copy=False, fill_value=None)
impute_dummy = SimpleImputer(missing_values=np.nan, strategy='constant', copy=False, fill_value=0)
scale_numeric = MinMaxScaler(copy=False)
numeric_transformer = Pipeline(steps=[
('imputer', impute_numeric),
('scaler', scale_numeric)])
dummy_transformer = Pipeline(steps=[
('imputer', impute_dummy)])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('dum', dummy_transformer, dummy_features)])
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor(random_state=0))])
gbm.fit(X_reduce_train, y_reduce_train)
gbm.score(X_reduce_test, y_reduce_test)
dump(gbm, 'gbm_reduce_2.joblib')
#Warning: This cell may take a long time to run, depending on resources
gbm = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor())])
param_grid = {'regressor__learning_rate': [.05],
'regressor__n_estimators': [125, 150, 175, 200],
'regressor__subsample': [.8, .9, 1],
'regressor__min_samples_split': [4, 6, 8, 10, 12],
'regressor__min_samples_leaf': [1, 2, 3],
'regressor__max_depth': [4, 5, 6, 7, 8],
'regressor__max_features': ['sqrt']}
CV = RandomizedSearchCV(estimator = gbm,
param_distributions = param_grid,
n_iter = 150,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_reduce_train, y_reduce_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_reduce_test, y_reduce_test)
dump(CV.best_estimator_, 'gbm_reduce_randomsearch2.joblib')
feature_importance = CV.best_estimator_.steps[-1][1].feature_importances_
importance = pd.DataFrame({'columns':X_reduce.columns,'importance': feature_importance})
importance.sort_values('importance', ascending=True, inplace=True)
importance.tail(50).plot('columns','importance','barh',figsize=(12,12))
```
|
github_jupyter
|
import psycopg2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from joblib import dump, load
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import LassoLars
from sklearn.model_selection import RandomizedSearchCV
# Query modeling_data2 table from PostgreSQL database
try:
conn = psycopg2.connect(user="cohort17",
password="Cohort17Movies",
host="moviesdb.ce8d6g1pa5lm.us-east-1.rds.amazonaws.com",
port="5432",database="moviesdb")
dbquery = "select * from modeling_data3"
movies = pd.read_sql_query(dbquery, conn)
except (Exception, psycopg2.Error) as error :
print ("Error while fetching data from PostgreSQL", error)
finally:
if(conn):
conn.close()
X = movies.drop(['primarytitle','domesticgross'], axis=1)
y = movies['domesticgross']
numeric_features = X[['productionbudget','runtimeminutes','release_year','release_week',
'genre_year_avg_sales','lead_prior_avg_sales','lead_prior_lead_count',
'director_prior_avg_sales','director_prior_count']].columns
dummy_features = X.drop(numeric_features, axis=1).columns
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=0)
impute_numeric = SimpleImputer(missing_values=np.nan, strategy='median', copy=False, fill_value=None)
impute_dummy = SimpleImputer(missing_values=np.nan, strategy='constant', copy=False, fill_value=0)
scale_numeric = MinMaxScaler(copy=False)
numeric_transformer = Pipeline(steps=[
('imputer', impute_numeric),
('scaler', scale_numeric)])
dummy_transformer = Pipeline(steps=[
('imputer', impute_dummy)])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('dum', dummy_transformer, dummy_features)])
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor(random_state=0))])
gbm.fit(X_train, y_train)
gbm.score(X_test, y_test)
dump(gbm, 'gbm_1.joblib')
#Warning: This cell may take a long time to run, depending on resources
gbm = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor())])
param_grid = {'regressor__learning_rate': [.05],
'regressor__n_estimators': [125, 150, 175],
'regressor__subsample': [.8, .9, 1],
'regressor__min_samples_split': [8, 10, 12],
'regressor__min_samples_leaf': [1, 2],
'regressor__max_depth': [3, 4, 5, 6],
'regressor__max_features': ['sqrt']}
CV = RandomizedSearchCV(estimator = gbm,
param_distributions = param_grid,
n_iter = 50,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_train, y_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_test, y_test)
dump(CV.best_estimator_, 'gbm_newfeatures.joblib')
gbm_random = load('gbm_newfeatures.joblib')
gbm_random.score(X_test, y_test)
gbm_random['regressor']
gbm2 = GradientBoostingRegressor(alpha=0.9, ccp_alpha=0.0, criterion='friedman_mse',
init=None, learning_rate=0.05, loss='ls', max_depth=5,
max_features='sqrt', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=2, min_samples_split=8,
min_weight_fraction_leaf=0.0, n_estimators=125,
n_iter_no_change=None, presort='deprecated',
random_state=None, subsample=1, tol=0.0001,
validation_fraction=0.1, verbose=0, warm_start=False)
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', gbm2)])
gbm.fit(X_train, y_train)
gbm.score(X_test, y_test)
dump(gbm, 'gbm_newfeatures.joblib')
#Warning: This cell may take a long time to run, depending on resources
rf = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', RandomForestRegressor())])
param_grid = {'regressor__max_depth': [10, 20, 40, 60, None],
'regressor__max_features': ['sqrt'],
'regressor__min_samples_leaf': [1, 2, 4, 8],
'regressor__min_samples_split': [2, 4, 8, 10],
'regressor__n_estimators': [100, 150, 200]}
CV = RandomizedSearchCV(estimator = rf,
param_distributions = param_grid,
n_iter = 300,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_train, y_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_test, y_test)
dump(CV.best_estimator_, 'rf_randomsearch.joblib')
remove = ['talkinganimals',
'martin_scorsese',
'm_night_shyamalan',
'colin_farrell',
'bruce_willis',
'brad_pitt',
'shawn_levy',
'jackie_chan',
'mark_wahlberg',
'hugh_jackman',
'gerard_butler',
'keanu_reeves',
'nicolas_cage',
'dwayne_johnson',
'steven_spielberg',
'leonardo_dicaprio',
'clint_eastwood',
'jason_statham',
'denzel_washington',
'religious',
'jake_gyllenhaal',
'owen_wilson',
'guy_ritchie',
'matthew_mcconaughey',
'ron_howard',
'david_gordon_green',
'russell_crowe',
'jack_black',
'tom_cruise',
'matt_damon',
'africanamerican',
'michael_bay',
'cate_blanchett',
'tim_burton',
'ben_affleck',
'john_goodman',
'christian_bale',
'george_clooney',
'dysfunctionalfamily',
'mystery',
'tom_hanks',
'will_ferrell',
'stephen_soderbergh',
'robert_de_niro',
'biography',
'ridley_scott',
'paramount',
'romance',
'reese_witherspoon',
'channing_tatum',
'johnny_depp',
'thriller',
'g_rating',
'revenge',
'ice_cube',
'horror',
'samuel_l_jackson',
'sony',
'jim_carrey',
'christopher_nolan',
'pg_rating',
'documentary',
'warner_bros',
'adam_sandler',
'will_smith',
'crime',
'comedy',
'sandra_bullock',
'steve_carell',
'family']
X_reduce = movies.drop(['primarytitle','domesticgross'] + remove, axis=1)
y_reduce = movies['domesticgross']
numeric_features = X_reduce[['productionbudget','runtimeminutes','release_year','release_week']].columns
dummy_features = X_reduce.drop(numeric_features, axis=1).columns
X_reduce_train, X_reduce_test, y_reduce_train, y_reduce_test = train_test_split(X_reduce, y_reduce, test_size=.2, random_state=0)
impute_numeric = SimpleImputer(missing_values=np.nan, strategy='median', copy=False, fill_value=None)
impute_dummy = SimpleImputer(missing_values=np.nan, strategy='constant', copy=False, fill_value=0)
scale_numeric = MinMaxScaler(copy=False)
numeric_transformer = Pipeline(steps=[
('imputer', impute_numeric),
('scaler', scale_numeric)])
dummy_transformer = Pipeline(steps=[
('imputer', impute_dummy)])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('dum', dummy_transformer, dummy_features)])
gbm = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor(random_state=0))])
gbm.fit(X_reduce_train, y_reduce_train)
gbm.score(X_reduce_test, y_reduce_test)
dump(gbm, 'gbm_reduce_2.joblib')
#Warning: This cell may take a long time to run, depending on resources
gbm = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor())])
param_grid = {'regressor__learning_rate': [.05],
'regressor__n_estimators': [125, 150, 175, 200],
'regressor__subsample': [.8, .9, 1],
'regressor__min_samples_split': [4, 6, 8, 10, 12],
'regressor__min_samples_leaf': [1, 2, 3],
'regressor__max_depth': [4, 5, 6, 7, 8],
'regressor__max_features': ['sqrt']}
CV = RandomizedSearchCV(estimator = gbm,
param_distributions = param_grid,
n_iter = 150,
cv = 12,
verbose = 2,
random_state = 0,
n_jobs = -1)
CV.fit(X_reduce_train, y_reduce_train)
print(CV.best_params_)
print(CV.best_score_)
CV.score(X_reduce_test, y_reduce_test)
dump(CV.best_estimator_, 'gbm_reduce_randomsearch2.joblib')
feature_importance = CV.best_estimator_.steps[-1][1].feature_importances_
importance = pd.DataFrame({'columns':X_reduce.columns,'importance': feature_importance})
importance.sort_values('importance', ascending=True, inplace=True)
importance.tail(50).plot('columns','importance','barh',figsize=(12,12))
| 0.507324 | 0.494202 |
```
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://github.com/chenmengjie-xiaomi/ageron_handson-ml/blob/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL,housing_path = HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path,'housing.tgz')
print(tgz_path)
urllib.request.urlretrieve(housing_url,tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path,'housing.csv')
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.head()
#info获取简单的描述
housing.info()
## 查看分类属性
housing['ocean_proximity'].value_counts()
##查看属性摘要
housing.describe()##注意这里的空值会被忽略,因此这里的total_bedrooms是20433而不是20643
```
25% housing_median_age 为18 代表:25%的区域低于18.
```
#另一种快速的了解数据类型的方式是绘制每个数值属性的直方图。
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
plt.show()
```
> 随机产生测试集,如果你的数据集足够大,这种方法通常不错,否则会产生明显的偏差,这时候要采用分层采样。
```
##1. 完整数据集
##2. 随机采样
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing,test_size=0.2,random_state=42)
##多数收入中位数聚集在2-5万美元左右,但是有部分超过6万,在数据集中,每层都要足够数量的实例
## 因此将收入中位数除以1.5(限制收入类别的数量),然后使用ceil取整(得到离散类别),最后将所有大于5的类别合并为类别5
import numpy as np
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0,inplace=True)
#3. 分层采样
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(housing,housing['income_cat']):
start_train_set = housing.loc[train_index]
start_test_set = housing.loc[test_index]
##查看分层后的分布
housing['income_cat'].value_counts()/len(housing)
```
## 数据可视化
```
#删除income_cat属相,将数据恢复
for set in (start_train_set,start_test_set):
set.drop(['income_cat'],axis=1,inplace=True)
##创建数据集副本,那样可以不损坏数据集
housing = start_train_set.copy()
### 地理数据可视化
housing.plot(kind='scatter',x='longitude',y='latitude')
#调节透明度,看到高密度区域
housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.1)
#看人口和房价
housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.4,
s=housing['population']/100,label='population',
c='median_house_value', cmap=plt.get_cmap('jet'),colorbar=True)
plt.legend()
```
这张图片显示房屋价格和与地理位置和人口密度息息相关。
|
github_jupyter
|
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://github.com/chenmengjie-xiaomi/ageron_handson-ml/blob/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL,housing_path = HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path,'housing.tgz')
print(tgz_path)
urllib.request.urlretrieve(housing_url,tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path,'housing.csv')
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.head()
#info获取简单的描述
housing.info()
## 查看分类属性
housing['ocean_proximity'].value_counts()
##查看属性摘要
housing.describe()##注意这里的空值会被忽略,因此这里的total_bedrooms是20433而不是20643
#另一种快速的了解数据类型的方式是绘制每个数值属性的直方图。
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
plt.show()
##1. 完整数据集
##2. 随机采样
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing,test_size=0.2,random_state=42)
##多数收入中位数聚集在2-5万美元左右,但是有部分超过6万,在数据集中,每层都要足够数量的实例
## 因此将收入中位数除以1.5(限制收入类别的数量),然后使用ceil取整(得到离散类别),最后将所有大于5的类别合并为类别5
import numpy as np
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing['income_cat'].where(housing['income_cat'] < 5, 5.0,inplace=True)
#3. 分层采样
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index, test_index in split.split(housing,housing['income_cat']):
start_train_set = housing.loc[train_index]
start_test_set = housing.loc[test_index]
##查看分层后的分布
housing['income_cat'].value_counts()/len(housing)
#删除income_cat属相,将数据恢复
for set in (start_train_set,start_test_set):
set.drop(['income_cat'],axis=1,inplace=True)
##创建数据集副本,那样可以不损坏数据集
housing = start_train_set.copy()
### 地理数据可视化
housing.plot(kind='scatter',x='longitude',y='latitude')
#调节透明度,看到高密度区域
housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.1)
#看人口和房价
housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.4,
s=housing['population']/100,label='population',
c='median_house_value', cmap=plt.get_cmap('jet'),colorbar=True)
plt.legend()
| 0.258981 | 0.691745 |
```
import numpy as np
import scipy.stats as stats
import pymc3 as pm
import arviz as az
az.style.use('arviz-white')
```
# Probabilistic Programming
This post is based on an excerpt from the second chapter of the book that I have slightly adapted so it's easier to read without having read the first chapter.
## Bayesian Inference
Bayesian statistics is conceptually very simple; we have _the knowns_ and _the unknowns_; we use Bayes' theorem to condition the latter on the former. If we are lucky, this process will reduce the uncertainty about _the unknowns_.
Generally, we refer to _the knowns_ as **data** and treat it like a constant, and __the unknowns__ as **parameters** and treat them as probability distributions. In more formal terms, we assign probability distributions to unknown quantities. Then, we use Bayes' theorem to combine the prior probability distribution and the data to get the posterior distribution.
$$p(\theta \mid y) = \frac{p(y \mid \theta) p(\theta)}{p(y)}$$
Here $\theta$ represents the parameters in our model, generally this are the quantities we want to learn, $y$ represents the data.
Each term in the Bayes Theorem has it's own name:
* $p(\theta \mid y)$: posterior
* $p(y \mid \theta)$: likelihood
* $p(\theta)$: prior
* $p(y)$: marginal likelihood
The prior distribution should reflect what we know about the value of the parameter before seeing the data $y$. If we know nothing, like Jon Snow, we could use _flat_ priors that do not convey too much information. In general, we can do better than flat priors.
The likelihood is how we introduce data in our analysis. It is an expression of the plausibility of the data given the parameters.
The posterior distribution is the result of the Bayesian analysis and reflects all that we know about a problem (given our data and model). The posterior is a probability distribution for the parameters in our model and not a single value. Conceptually, we can think of the posterior as the updated prior in the light of (new) data. In fact, the posterior from one analysis can be used as the prior for a new analysis.
The last term is the marginal likelihood, also known as evidence. For the moment we will think of it as a simple normalization factor.
## Probabilistic Programming: Inference-Button
Although conceptually simple, fully probabilistic models often lead to analytically intractable expressions. For many years, this was a real problem and was probably one of the main issues that hindered the wide adoption of Bayesian methods. The arrival of the computational era and the development of numerical methods that, at least in principle, can be used to solve any inference problem, has dramatically transformed the Bayesian data analysis practice. We can think of these numerical methods as universal inference engines, or as Thomas Wiecki, a core developer of PyMC3, likes to call it, the inference-button. The possibility of automating the inference process has led to the development of **probabilistic programming languages (PPL)**, which allow for a clear separation between model creation and inference.
In the PPL framework, users specify a full probabilistic model by writing a few lines of code, and then inference follows automatically. It is expected that probabilistic programming will have a major impact on data science and other disciplines by enabling practitioners to build complex probabilistic models in a less time-consuming and less error-prone way.
I think one good analogy for the impact that programming languages can have on scientific computing is the introduction of the Fortran programming language more than six decades ago. While Fortran has lost its shine nowadays, at one time, it was considered to be very revolutionary. For the first time, scientists moved away from computational details and began focusing on building numerical methods, models, and simulations in a more natural way. In a similar fashion, we now have PPL, which hides details on how probabilities are manipulated and how the inference is performed from users, allowing users to focus on model specification and the analysis of the results.
In this post, you will learn how to use PyMC3 to define and solve a simple model. We will treat the inference-button as a black box that gives us proper **samples** from the posterior distribution. The methods we will be using are stochastic, and so the samples will vary every time we run them. However, if the inference process works as expected, the samples will be representative of the posterior distribution and thus we will obtain the same conclusion from any of those samples. The details of what happens under the hood when we push the inference-button and how to check if the samples are indeed trustworthy is explained in Chapter 8, _Inference Engines_.
## PyMC3 primer
PyMC3 is a Python library for probabilistic programming. The latest version at the moment of writing is 3.6. PyMC3 provides a very simple and intuitive syntax that is easy to read and close to the syntax used in statistical literature to describe probabilistic models. PyMC3's base code is written using Python, and the computationally demanding parts are written using NumPy and Theano.
**Theano** is a Python library that was originally developed for deep learning and allows us to define, optimize, and evaluate mathematical expressions involving multidimensional arrays efficiently. The main reason PyMC3 uses Theano is because some of the sampling methods, such as NUTS, need gradients to be computed, and Theano knows how to compute gradients using what is known as automatic differentiation. Also, Theano compiles Python code to C code, and hence PyMC3 is really fast. This is all the information about Theano we need to have to use PyMC3. If you still want to learn more about it, start reading the official Theano tutorial at http://deeplearning.net/software/theano/tutorial/index.html#tutorial.
> Note: You may have heard that Theano is no longer developed, but that's no reason to worry. PyMC devs will take over Theano maintenance, ensuring that Theano will keep serving PyMC3 for several years to come. At the same time, PyMC devs are moving quickly to create the successor to PyMC3. This will probably be based on TensorFlow as a backend, although other options are being analyzed as well. You can read more about this at this [blog post](https://medium.com/@pymc_devs/theano-tensorflow-and-the-future-of-pymc-6c9987bb19d5).
### The coin-flipping problem
The coin-flipping problem, or the beta-binomial model if you want to sound fancy at parties, is a classical problem in statistics and goes like this: we toss a coin a number of times and record how many heads and tails we get. Based on this data, we try to answer questions such as, is the coin fair? Or, more generally, how biased is the coin?
While this problem may sound dull, we should not underestimate it. The coin-flipping problem is a great example to learn the basics of Bayesian statistics because it is a simple model that we can solve and compute with ease. Besides, many real problems consist of binary, mutually-exclusive outcomes such as 0 or 1, positive or negative, odds or evens, spam or ham, hotdog or not hotdog, cat or dog, safe or unsafe, and healthy or unhealthy. Thus, even when we are talking about coins, this model applies to any of those problems.
In order to estimate the bias of a coin, and in general to answer any questions in a Bayesian
setting, we will need data and a probabilistic model. We are going to generate the data using Python, but you can also generate the data yourself using a real coin!
```
np.random.seed(123)
trials = 4
theta_real = 0.35 # unknown value in a real experiment
data = stats.bernoulli.rvs(p=theta_real, size=trials)
```
### Model specification
Now that we have the data, we need to specify the model. This is done by specifying the likelihood and the prior using probability distributions. For the likelihood, we will use the binomial distribution with parameters $n=1$, and $p=\theta$ and for the prior, a beta distribution with parameters $\alpha=\beta=1$:
We can write the model using the following mathematical notation:
$$
\theta \sim \mathop{Beta}(\alpha, \beta) \\
y \sim \mathop{Bern}(n=1, p=\theta)
$$
A justification of this model is discussed in chapter 1. But briefly we can justify it as follows. Coins can take only two values heads and tails, thus we can use a Bernoulli distribution with n=1, as this distributions models the distribution of two mutually exclusive outcomes like heads (1) or tails (0). We use a beta distribution with parameters $\alpha=\beta=1$ as this is equivalent to a uniform distribution in the interval [0, 1]. That is we are totally ignorant of the value $\theta$ can take, besides being some number between 0 and 1. If instead you have reasons to think the value should be around 0.5, you could use a beta distribution with parameters $\alpha=\beta=2$ or $\alpha=\beta=20$, I suggest once you have read this post you try using those priors and see how the inference change.
This statistical model has an almost one-to-one translation to PyMC3:
```
with pm.Model() as our_first_model:
θ = pm.Beta('θ', alpha=1., beta=1.)
y = pm.Bernoulli('y', p=θ, observed=data)
trace = pm.sample(1000, random_seed=123)
```
* The first line of the code creates a container for our model. Everything inside the `with-block` will be automatically added to `our_first_model`. You can think of this as syntactic sugar to ease model specification as we do not need to manually assign variables to the model.
* The second line specifies the prior. As you can see, the syntax follows the mathematical notation closely.
> Please note that we’ve used the name `θ` twice, first as a Python variable and then as the first argument of the Beta function; using the same name is a good practice to avoid confusion. The `θ` variable is a random variable; it is not a number, but an object representing a probability distribution from which we can compute random numbers and probability densities.
* The third line specifies the likelihood. The syntax is almost the same as for the prior, except that we pass the data using the `observed` argument. This is the way we tell PyMC3 we want to condition the unknowns ($\theta$) on the known (`data`). The observed values can be passed as a Python list, a tuple, a NumPy array, or a pandas DataFrame.
Now, we are finished with the model's specification! Pretty neat, right?
### Pushing the inference button
The last line is the inference button. We are asking for 1,000 samples from the posterior and will store them in the `trace` object. Behind this innocent line, PyMC3 has hundreds of _oompa loompas_, singing and baking a delicious Bayesian inference just for you! Well, not exactly, but PyMC3 is automating a lot of tasks. If you run the code, you will get a message like this:
Auto-assigning NUTS sampler...
Initializing NUTS using jitter+adapt_diag...
Multiprocess sampling (2 chains in 2 jobs)
NUTS: [θ]
Sampling 2 chains: 100%|██████████| 3000/3000 [00:00<00:00, 3017.95draws/s]
The first and second lines tell us that PyMC3 has automatically assigned the NUTS sampler (one inference engine that works very well for continuous variables), and has used a method to initialize that sampler.
The third line says that PyMC3 will run two chains in parallel, so we can get two independent samples from the posterior for the price of one. The exact number of chains is computed taking into account the number of processors in your machine; you can change it using the `chains` argument for the `sample` function.
The next line is telling us which variables are being sampled by which sampler. For this particular case, this line is not adding any new information because NUTS is used to sample the only variable we have `θ`. However, this is not always the case as PyMC3 can assign different samplers to different variables. This is done automatically by PyMC3 based on the properties of the variables, which ensures that the best possible sampler is used for each variable. Users can manually assign samplers using the step argument of the sample function.
Finally, the last line is a progress bar, with several related metrics indicating how fast the sampler is working, including the number of iterations per second. If you run the code, you will see the progress-bar get updated really fast. Here, we are seeing the last stage when the sampler has finished its work. The numbers are 3000/3000, where the first number is the running sampler number (this starts at 1), and the last is the total number of samples. You will notice that we have asked for 1,000 samples, but PyMC3 is computing 3,000 samples. We have 500 samples per chain to auto-tune the sampling algorithm (NUTS, in this example).
These samples will be discarded by default. We also have 1,000 productive draws per-chain, thus a total of 3,000 samples are generated. The tuning phase helps PyMC3 provide a reliable sample from the posterior. We can change the number of tuning steps with the `tune` argument of the sample function.
### Summarizing the posterior
Generally, the first task we will perform after sampling from the posterior is check what the results look like. The `plot_trace` function is one of many ArviZ functions we can use for this task:
```
az.plot_trace(trace);
```
By using `az.plot_trace`, we get two subplots for each unobserved variable. The only unobserved variable in `our_first_model` is $\theta$. Notice that $y$ is an observed variable representing the data; we do not need to sample that because we already know those values.
In the above figure, we have two subplots. On the left, we have a Kernel Density Estimation (KDE) plot; this is like the smooth version of an histogram. On the right, we get the individual sampled values at each step during the sampling. From the trace plot, we can visually get the plausible values of the parameters according to the posterior distribution.
Another plot we can make with ArviZ is a posterior plot.
```
az.plot_posterior(trace, round_to=2);
```
We can interpret this result as follows, on average the value of $\theta$ is 0.33, this means the coin is most likely biased towards tails, as we used 0 to represent tails and 1 to represents heads. This estimation is pretty close to the real value of $\theta=0.35$, the value we used to generate the synthetic data. We can see that PyMC3 and our model has provided a reasonable answer. Of course for real examples we do not know the _true_ value of the parameters, that's the whole point of doing inferences in the first place.
As we can see from this example we did not got a single number for $\theta$ we got a distribution of plausible values. This distribution represents the uncertainty in our estimate. There are many ways to express the uncertainty of a Bayesian Analysis one is to use a **Highest Posterior Density** (HPD) Interval, as in the previous plot. For this particular example the hpd interval says that the 94% of the plausible values are contained within the 0.02-0.64 range.
Getting the posterior is not the end on a Bayesian Analysis. As with other forms of modeling a Bayesian analysis is an iterative process and is motivated by a particular context and set of questions. But I hope this simple example has make you learn more about Bayesian Analysis
|
github_jupyter
|
import numpy as np
import scipy.stats as stats
import pymc3 as pm
import arviz as az
az.style.use('arviz-white')
np.random.seed(123)
trials = 4
theta_real = 0.35 # unknown value in a real experiment
data = stats.bernoulli.rvs(p=theta_real, size=trials)
with pm.Model() as our_first_model:
θ = pm.Beta('θ', alpha=1., beta=1.)
y = pm.Bernoulli('y', p=θ, observed=data)
trace = pm.sample(1000, random_seed=123)
az.plot_trace(trace);
az.plot_posterior(trace, round_to=2);
| 0.431345 | 0.991084 |
# Information Theory Measures w/ RBIG
```
import sys
# MacOS
sys.path.insert(0, '/Users/eman/Documents/code_projects/rbig/')
sys.path.insert(0, '/home/emmanuel/code/py_packages/py_rbig/src')
# ERC server
sys.path.insert(0, '/home/emmanuel/code/rbig/')
import numpy as np
import warnings
from time import time
from rbig.rbig import RBIGKLD, RBIG, RBIGMI, entropy_marginal
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
warnings.filterwarnings('ignore') # get rid of annoying warnings
%load_ext autoreload
%autoreload 2
```
---
## Total Correlation
```
#Parameters
n_samples = 10000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
```
#### Sample Data
```
# Generate random normal data
data_original = rng.randn(n_samples, d_dimensions)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
data = data_original @ A
# covariance matrix
C = A.T @ A
vv = np.diag(C)
```
#### Calculate Total Correlation
```
tc_original = np.log(np.sqrt(vv)).sum() - 0.5 * np.log(np.linalg.det(C))
print(f"TC: {tc_original:.4f}")
```
### RBIG - TC
```
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
pdf_extension = 10
pdf_resolution = None
tolerance = None
# Initialize RBIG class
tc_rbig_model = RBIG(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance,
pdf_extension=pdf_extension,
pdf_resolution=pdf_resolution)
# fit model to the data
tc_rbig_model.fit(data);
tc_rbig = tc_rbig_model.mutual_information * np.log(2)
print(f"TC (RBIG): {tc_rbig:.4f}")
print(f"TC: {tc_original:.4f}")
```
---
## Entropy
#### Sample Data
```
#Parameters
n_samples = 5000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
# Generate random normal data
data_original = rng.randn(n_samples, d_dimensions)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
data = data_original @ A
```
#### Calculate Entropy
```
Hx = entropy_marginal(data)
H_original = Hx.sum() + np.log2(np.abs(np.linalg.det(A)))
H_original *= np.log(2)
print(f"H: {H_original:.4f}")
```
### Entropy RBIG
```
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
pdf_extension = None
pdf_resolution = None
tolerance = None
# Initialize RBIG class
ent_rbig_model = RBIG(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance)
# fit model to the data
ent_rbig_model.fit(data);
H_rbig = ent_rbig_model.entropy(correction=True) * np.log(2)
print(f"Entropy (RBIG): {H_rbig:.4f}")
print(f"Entropy: {H_original:.4f}")
```
---
## Mutual Information
#### Sample Data
```
#Parameters
n_samples = 10000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
# Generate random Data
A = rng.rand(2 * d_dimensions, 2 * d_dimensions)
# Covariance Matrix
C = A @ A.T
mu = np.zeros((2 * d_dimensions))
dat_all = rng.multivariate_normal(mu, C, n_samples)
CX = C[:d_dimensions, :d_dimensions]
CY = C[d_dimensions:, d_dimensions:]
X = dat_all[:, :d_dimensions]
Y = dat_all[:, d_dimensions:]
```
#### Calculate Mutual Information
```
H_X = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(CX)))
H_Y = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(CY)))
H = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(C)))
mi_original = H_X + H_Y - H
mi_original *= np.log(2)
print(f"MI: {mi_original:.4f}")
```
### RBIG - Mutual Information
```
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
tolerance = None
# Initialize RBIG class
rbig_model = RBIGMI(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance)
# fit model to the data
rbig_model.fit(X, Y);
H_rbig = rbig_model.mutual_information() * np.log(2)
print(f"MI (RBIG): {H_rbig:.4f}")
print(f"MI: {mi_original:.4f}")
```
---
## Kullback-Leibler Divergence (KLD)
#### Sample Data
```
#Parameters
n_samples = 10000
d_dimensions = 10
mu = 0.4 # how different the distributions are
seed = 123
rng = check_random_state(seed)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
# covariance matrix
cov = A @ A.T
# Normalize cov mat
cov = A / A.max()
# create covariance matrices for x and y
cov_x = np.eye(d_dimensions)
cov_y = cov_x.copy()
mu_x = np.zeros(d_dimensions) + mu
mu_y = np.zeros(d_dimensions)
# generate multivariate gaussian data
X = rng.multivariate_normal(mu_x, cov_x, n_samples)
Y = rng.multivariate_normal(mu_y, cov_y, n_samples)
```
#### Calculate KLD
```
kld_original = 0.5 * ((mu_y - mu_x) @ np.linalg.inv(cov_y) @ (mu_y - mu_x).T +
np.trace(np.linalg.inv(cov_y) @ cov_x) -
np.log(np.linalg.det(cov_x) / np.linalg.det(cov_y)) - d_dimensions)
print(f'KLD: {kld_original:.4f}')
```
### RBIG - KLD
```
X.min(), X.max()
Y.min(), Y.max()
%%time
n_layers = 100000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
tolerance = None
pdf_extension = 10
pdf_resolution = None
verbose = 0
# Initialize RBIG class
kld_rbig_model = RBIGKLD(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance,
pdf_resolution=pdf_resolution,
pdf_extension=pdf_extension,
verbose=verbose)
# fit model to the data
kld_rbig_model.fit(X, Y);
# Save KLD value to data structure
kld_rbig= kld_rbig_model.kld*np.log(2)
print(f'KLD (RBIG): {kld_rbig:.4f}')
print(f'KLD: {kld_original:.4f}')
```
|
github_jupyter
|
import sys
# MacOS
sys.path.insert(0, '/Users/eman/Documents/code_projects/rbig/')
sys.path.insert(0, '/home/emmanuel/code/py_packages/py_rbig/src')
# ERC server
sys.path.insert(0, '/home/emmanuel/code/rbig/')
import numpy as np
import warnings
from time import time
from rbig.rbig import RBIGKLD, RBIG, RBIGMI, entropy_marginal
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
warnings.filterwarnings('ignore') # get rid of annoying warnings
%load_ext autoreload
%autoreload 2
#Parameters
n_samples = 10000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
# Generate random normal data
data_original = rng.randn(n_samples, d_dimensions)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
data = data_original @ A
# covariance matrix
C = A.T @ A
vv = np.diag(C)
tc_original = np.log(np.sqrt(vv)).sum() - 0.5 * np.log(np.linalg.det(C))
print(f"TC: {tc_original:.4f}")
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
pdf_extension = 10
pdf_resolution = None
tolerance = None
# Initialize RBIG class
tc_rbig_model = RBIG(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance,
pdf_extension=pdf_extension,
pdf_resolution=pdf_resolution)
# fit model to the data
tc_rbig_model.fit(data);
tc_rbig = tc_rbig_model.mutual_information * np.log(2)
print(f"TC (RBIG): {tc_rbig:.4f}")
print(f"TC: {tc_original:.4f}")
#Parameters
n_samples = 5000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
# Generate random normal data
data_original = rng.randn(n_samples, d_dimensions)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
data = data_original @ A
Hx = entropy_marginal(data)
H_original = Hx.sum() + np.log2(np.abs(np.linalg.det(A)))
H_original *= np.log(2)
print(f"H: {H_original:.4f}")
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
pdf_extension = None
pdf_resolution = None
tolerance = None
# Initialize RBIG class
ent_rbig_model = RBIG(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance)
# fit model to the data
ent_rbig_model.fit(data);
H_rbig = ent_rbig_model.entropy(correction=True) * np.log(2)
print(f"Entropy (RBIG): {H_rbig:.4f}")
print(f"Entropy: {H_original:.4f}")
#Parameters
n_samples = 10000
d_dimensions = 10
seed = 123
rng = check_random_state(seed)
# Generate random Data
A = rng.rand(2 * d_dimensions, 2 * d_dimensions)
# Covariance Matrix
C = A @ A.T
mu = np.zeros((2 * d_dimensions))
dat_all = rng.multivariate_normal(mu, C, n_samples)
CX = C[:d_dimensions, :d_dimensions]
CY = C[d_dimensions:, d_dimensions:]
X = dat_all[:, :d_dimensions]
Y = dat_all[:, d_dimensions:]
H_X = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(CX)))
H_Y = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(CY)))
H = 0.5 * np.log(2 * np.pi * np.exp(1) * np.abs(np.linalg.det(C)))
mi_original = H_X + H_Y - H
mi_original *= np.log(2)
print(f"MI: {mi_original:.4f}")
%%time
n_layers = 10000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
tolerance = None
# Initialize RBIG class
rbig_model = RBIGMI(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance)
# fit model to the data
rbig_model.fit(X, Y);
H_rbig = rbig_model.mutual_information() * np.log(2)
print(f"MI (RBIG): {H_rbig:.4f}")
print(f"MI: {mi_original:.4f}")
#Parameters
n_samples = 10000
d_dimensions = 10
mu = 0.4 # how different the distributions are
seed = 123
rng = check_random_state(seed)
# Generate random Data
A = rng.rand(d_dimensions, d_dimensions)
# covariance matrix
cov = A @ A.T
# Normalize cov mat
cov = A / A.max()
# create covariance matrices for x and y
cov_x = np.eye(d_dimensions)
cov_y = cov_x.copy()
mu_x = np.zeros(d_dimensions) + mu
mu_y = np.zeros(d_dimensions)
# generate multivariate gaussian data
X = rng.multivariate_normal(mu_x, cov_x, n_samples)
Y = rng.multivariate_normal(mu_y, cov_y, n_samples)
kld_original = 0.5 * ((mu_y - mu_x) @ np.linalg.inv(cov_y) @ (mu_y - mu_x).T +
np.trace(np.linalg.inv(cov_y) @ cov_x) -
np.log(np.linalg.det(cov_x) / np.linalg.det(cov_y)) - d_dimensions)
print(f'KLD: {kld_original:.4f}')
X.min(), X.max()
Y.min(), Y.max()
%%time
n_layers = 100000
rotation_type = 'PCA'
random_state = 0
zero_tolerance = 60
tolerance = None
pdf_extension = 10
pdf_resolution = None
verbose = 0
# Initialize RBIG class
kld_rbig_model = RBIGKLD(n_layers=n_layers,
rotation_type=rotation_type,
random_state=random_state,
zero_tolerance=zero_tolerance,
tolerance=tolerance,
pdf_resolution=pdf_resolution,
pdf_extension=pdf_extension,
verbose=verbose)
# fit model to the data
kld_rbig_model.fit(X, Y);
# Save KLD value to data structure
kld_rbig= kld_rbig_model.kld*np.log(2)
print(f'KLD (RBIG): {kld_rbig:.4f}')
print(f'KLD: {kld_original:.4f}')
| 0.522202 | 0.847053 |
# Analyzing IMDB Data in Keras
```
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
```
## 1. Loading the data
This dataset comes preloaded with Keras, so one simple command will get us training and testing data. There is a parameter for how many words we want to look at. We've set it at 1000, but feel free to experiment.
```
# Loading the data (it's preloaded in Keras)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)
print(x_train.shape)
print(x_test.shape)
```
## 2. Examining the data
Notice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.
The output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.
```
print(x_train[0])
print(y_train[0])
```
## 3. One-hot encoding the output
Here, we'll turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.
```
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
```
And we'll also one-hot encode the output.
```
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
```
## 4. Building the model architecture
Build a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.
```
# Building the model architecture with one layer of length 100
model = Sequential()
model.add(Dense(512, activation='relu', input_dim=1000))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
# Compiling the model using categorical_crossentropy loss, and rmsprop optimizer.
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
```
## 5. Training the model
Run the model here. Experiment with different batch_size, and number of epochs!
```
# Running and evaluating the model
hist = model.fit(x_train, y_train,
batch_size=32,
epochs=10,
validation_data=(x_test, y_test),
verbose=2)
```
## 6. Evaluating the model
This will give you the accuracy of the model, as evaluated on the testing set. Can you get something over 85%?
```
score = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: ", score[1])
```
|
github_jupyter
|
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
# Loading the data (it's preloaded in Keras)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)
print(x_train.shape)
print(x_test.shape)
print(x_train[0])
print(y_train[0])
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
# Building the model architecture with one layer of length 100
model = Sequential()
model.add(Dense(512, activation='relu', input_dim=1000))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
# Compiling the model using categorical_crossentropy loss, and rmsprop optimizer.
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# Running and evaluating the model
hist = model.fit(x_train, y_train,
batch_size=32,
epochs=10,
validation_data=(x_test, y_test),
verbose=2)
score = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: ", score[1])
| 0.7586 | 0.972257 |
```
%matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import math
from matplotlib.mlab import PCA as mlabPCA
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
import seaborn as sns
import scipy.stats as stats
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, KFold
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
from sklearn.feature_selection import RFE
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
from sklearn.decomposition import PCA as sklearn_pca
import locale
from locale import atof
import warnings
from IPython.display import display
from sklearn import linear_model
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.feature_selection import f_regression
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import pickle
from sklearn.cross_decomposition import PLSRegression
```
**Load new dataset**
```
#Load data form excel spreadsheet into pandas
xls_file = pd.ExcelFile('D:\\Users\\Borja.gonzalez\\Desktop\\Thinkful-DataScience-Borja\\Test_fbidata2014.xlsx')
# View the excel file's sheet names
#xls_file.sheet_names
# Load the xls file's 14tbl08ny as a dataframe
testfbi2014 = xls_file.parse('14tbl08ny')
```
**Clean and prepare the new dataset**
```
#Transform FBI Raw Data
#Rename columns with row 3 from the original data set
testfbi2014 = testfbi2014.rename(columns=testfbi2014.iloc[3])
#Delete first three rows don´t contain data for the regression model
testfbi2014 = testfbi2014.drop(testfbi2014.index[0:4])
#Delete columns containing "Rape"
testfbi2014 = testfbi2014.drop(['City','Arson3','Rape\n(revised\ndefinition)1','Rape\n(legacy\ndefinition)2'], axis = 1)
#Change names in Columns
testfbi2014 = testfbi2014.rename(columns={'Violent\ncrime': 'Violent Crime', 'Murder and\nnonnegligent\nmanslaughter': 'Murder', 'Robbery': 'Robbery', 'Aggravated\nassault': 'Assault', 'Property\ncrime': 'PropertyCrime', 'Burglary': 'Burglary', 'Larceny-\ntheft': 'Larceny & Theft', 'Motor\nvehicle\ntheft': 'MotorVehicleTheft'})
#Clean NaN values from dataset and reset index
testfbi2014 = testfbi2014.dropna().reset_index(drop=True)
#Convert objects to floats
testfbi2014.astype('float64').info()
#Scale and preprocess the dataset
names = testfbi2014.columns
fbi2014_scaled = pd.DataFrame(preprocessing.scale(testfbi2014), columns = names)
```
**Import the model from Challenge: make your own regression model**
```
# load the model from disk
filename = 'finalized_regr.sav'
loaded_model = pickle.load(open(filename, 'rb'))
# Inspect the results.
print('\nCoefficients: \n', loaded_model.coef_)
print('\nIntercept: \n', loaded_model.intercept_)
print('\nR-squared:')
print(loaded_model.score(X, Y))
print('\nVariables in the model: \n',list(X.columns))
```
**Cross Validation & Predictive Power of the "Challenge: make your own regression model" model**
```
X1 = fbi2014_scaled.drop(['Violent Crime','Murder','Larceny & Theft','PropertyCrime','MotorVehicleTheft','Assault'],axis=1)
Y1 = fbi2014_scaled['PropertyCrime'].values.ravel()
#Initiating the cross validation generator, N splits = 10
kf = KFold(20)
#Cross validate the model on the folds
loaded_model.fit(X1,Y1)
scores = cross_val_score(loaded_model, X1, Y1, cv=kf)
print('Cross-validated scores:', scores)
print('Cross-validation average:', scores.mean())
#Predictive accuracy
predictions = cross_val_predict(loaded_model, X1, Y1, cv=kf)
accuracy = metrics.r2_score(Y1, predictions)
print ('Cross-Predicted Accuracy:', accuracy)
# Instantiate and fit our model.
regr1 = linear_model.LinearRegression()
regr1.fit(X1, Y1)
# Inspect the results.
print('\nCoefficients: \n', regr1.coef_)
print('\nIntercept: \n', regr1.intercept_)
print('\nVariables in the model: \n',list(X1.columns))
#Cross validate the new model on the folds
scores = cross_val_score(regr1, X1, Y1, cv=kf)
print('Cross-validated scores:', scores)
print('Cross-validation average:', scores.mean())
#Cross validation, scores
predictions = cross_val_predict(regr1, X1, Y1, cv=kf)
accuracy = metrics.r2_score(Y1, predictions)
print ('Cross-Predicted Accuracy:', accuracy)
# Fit a linear model using Partial Least Squares Regression.
# Reduce feature space to 2 dimensions.
pls1 = PLSRegression(n_components=2)
# Reduce X to R(X) and regress on y.
pls1.fit(X1, Y1)
# Save predicted values.
PLS_predictions = pls1.predict(X1)
print('R-squared PLSR:', pls1.score(X1, Y1))
print('R-squared LR:', scores.mean())
# Compare the predictions of the two models
plt.scatter(predictions,PLS_predictions)
plt.xlabel('Predicted by original 3 features')
plt.ylabel('Predicted by 2 features')
plt.title('Comparing LR and PLSR predictions')
plt.show()
```
**Conclusions & additional notes**
- The model has been tested through follwing cross-validation until 79% has been achieved using FBI data for 2013
- Results from cross vaidation training & features selection can be seen in Challenge: make your own regression model
- New fbidata 2014 has been scaled and passed through the best model achieved in Challenge: make your own regression model
- The predictive capacity of the model is 94% and the cross validation score is 85%. This is the higher R2 that can I have been able to achieve through cross validation while training the model.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import math
from matplotlib.mlab import PCA as mlabPCA
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
import seaborn as sns
import scipy.stats as stats
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, KFold
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
from sklearn.feature_selection import RFE
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
from sklearn.decomposition import PCA as sklearn_pca
import locale
from locale import atof
import warnings
from IPython.display import display
from sklearn import linear_model
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.feature_selection import f_regression
import statsmodels.formula.api as smf
from statsmodels.sandbox.regression.predstd import wls_prediction_std
import pickle
from sklearn.cross_decomposition import PLSRegression
#Load data form excel spreadsheet into pandas
xls_file = pd.ExcelFile('D:\\Users\\Borja.gonzalez\\Desktop\\Thinkful-DataScience-Borja\\Test_fbidata2014.xlsx')
# View the excel file's sheet names
#xls_file.sheet_names
# Load the xls file's 14tbl08ny as a dataframe
testfbi2014 = xls_file.parse('14tbl08ny')
#Transform FBI Raw Data
#Rename columns with row 3 from the original data set
testfbi2014 = testfbi2014.rename(columns=testfbi2014.iloc[3])
#Delete first three rows don´t contain data for the regression model
testfbi2014 = testfbi2014.drop(testfbi2014.index[0:4])
#Delete columns containing "Rape"
testfbi2014 = testfbi2014.drop(['City','Arson3','Rape\n(revised\ndefinition)1','Rape\n(legacy\ndefinition)2'], axis = 1)
#Change names in Columns
testfbi2014 = testfbi2014.rename(columns={'Violent\ncrime': 'Violent Crime', 'Murder and\nnonnegligent\nmanslaughter': 'Murder', 'Robbery': 'Robbery', 'Aggravated\nassault': 'Assault', 'Property\ncrime': 'PropertyCrime', 'Burglary': 'Burglary', 'Larceny-\ntheft': 'Larceny & Theft', 'Motor\nvehicle\ntheft': 'MotorVehicleTheft'})
#Clean NaN values from dataset and reset index
testfbi2014 = testfbi2014.dropna().reset_index(drop=True)
#Convert objects to floats
testfbi2014.astype('float64').info()
#Scale and preprocess the dataset
names = testfbi2014.columns
fbi2014_scaled = pd.DataFrame(preprocessing.scale(testfbi2014), columns = names)
# load the model from disk
filename = 'finalized_regr.sav'
loaded_model = pickle.load(open(filename, 'rb'))
# Inspect the results.
print('\nCoefficients: \n', loaded_model.coef_)
print('\nIntercept: \n', loaded_model.intercept_)
print('\nR-squared:')
print(loaded_model.score(X, Y))
print('\nVariables in the model: \n',list(X.columns))
X1 = fbi2014_scaled.drop(['Violent Crime','Murder','Larceny & Theft','PropertyCrime','MotorVehicleTheft','Assault'],axis=1)
Y1 = fbi2014_scaled['PropertyCrime'].values.ravel()
#Initiating the cross validation generator, N splits = 10
kf = KFold(20)
#Cross validate the model on the folds
loaded_model.fit(X1,Y1)
scores = cross_val_score(loaded_model, X1, Y1, cv=kf)
print('Cross-validated scores:', scores)
print('Cross-validation average:', scores.mean())
#Predictive accuracy
predictions = cross_val_predict(loaded_model, X1, Y1, cv=kf)
accuracy = metrics.r2_score(Y1, predictions)
print ('Cross-Predicted Accuracy:', accuracy)
# Instantiate and fit our model.
regr1 = linear_model.LinearRegression()
regr1.fit(X1, Y1)
# Inspect the results.
print('\nCoefficients: \n', regr1.coef_)
print('\nIntercept: \n', regr1.intercept_)
print('\nVariables in the model: \n',list(X1.columns))
#Cross validate the new model on the folds
scores = cross_val_score(regr1, X1, Y1, cv=kf)
print('Cross-validated scores:', scores)
print('Cross-validation average:', scores.mean())
#Cross validation, scores
predictions = cross_val_predict(regr1, X1, Y1, cv=kf)
accuracy = metrics.r2_score(Y1, predictions)
print ('Cross-Predicted Accuracy:', accuracy)
# Fit a linear model using Partial Least Squares Regression.
# Reduce feature space to 2 dimensions.
pls1 = PLSRegression(n_components=2)
# Reduce X to R(X) and regress on y.
pls1.fit(X1, Y1)
# Save predicted values.
PLS_predictions = pls1.predict(X1)
print('R-squared PLSR:', pls1.score(X1, Y1))
print('R-squared LR:', scores.mean())
# Compare the predictions of the two models
plt.scatter(predictions,PLS_predictions)
plt.xlabel('Predicted by original 3 features')
plt.ylabel('Predicted by 2 features')
plt.title('Comparing LR and PLSR predictions')
plt.show()
| 0.768212 | 0.76895 |
## Introduction
When I first started learning matplotlib, it seemed as if there was an infinite number of ways to do the same set of tasks. Searching for tutorials could present you with a collection of lessons, each achieving roughly the same goal, but doing so in a slightly different manner each time. I was being productive with matplotlib, but I didn't feel like I was getting any closer to really understanding how the library worked. The reason for my uneasiness was largely due to the fact that matplotlib has three different interfaces to choose from, each with its own set of pros and cons and special use cases.
In this lesson, we'll discuss the reason for the existence of each interface. We'll learn how to choose the right interface for the job. And, finally, we'll see an example of each interface in action.
Personally, I feel it's easiest to work from the top to the bottom, so we'll work our way inward from the interface that offers the highest-level of abstraction to the lowest. With that in mind, we'll begin by exploring the pylab interface.
## pylab
If you remember at the beginning of the course, I mentioned that matplotlib was originally created to make Python a viable alternative to Matlab. Given this goal, the author, John Hunter, set out to create an interface that would very closely match that of the Matlab language. The interface he created was called pylab and it provided a nearly one-to-one mapping of the procedurally-based, and stateful, Matlab interface. The major benefits to this interface is that it made it possible for Matlab devotees to make the switch to Python with relative ease. Though the interface has since been deprecated in favor of the pyplot interface, given that it puts everything you need right at your fingertips, and is less verbose than the other interfaces, I would argue that if you want to just pop into a python interpreter and do a quick "one off", interactive EDA session, it's still a good fit for the job.
The main problem, however, with the pylab interface is that it imports everything into the global namespace. This can cause issues with other user defined, or imported, functions eclipsing matplotlib functionality. It also obscures your code since it's not immediately obvious whether a function call comes from matplotlib or, for example, its dependent library, NumPy. For this reason, the pyplot module is now considered to be the canonical way to interactively explore data with matplotlib.
## pyplot
The idea behind the pyplot interface is that, even though the approach taken by pylab doesn’t follow good software engineering practices, users, nonetheless, still need a lightweight way to interact with matplotlib. The difference between pylab and pyplot is that pylab imports everything it uses into the global namespace making everything seem a bit “magical”, whereas pyplot makes it explicit where each function used in a script comes from. The pyplot approach leads to much easier to understand, and therefore, more maintainable code. As such, the pyplot interface is the preferred way to interactively explore a data set, and is now the interface used in the majority of tutorials that you'll find online. Also, just recently, the matplotlib documentation was overhauled and now, pretty consistently, uses pyplot everywhere.
Where the pyplot interface breaks down, however, is when you need more control over how your plots are created. pyplot provides a state machine like interface that purposefully obscures away the details of what classes are being instantiated and which instances are being modified with each function call. This is great when doing exploratory data analysis, but can be a bit limiting when writing scripts to process large amounts of data, or when embedding matplotlib into an application. In either of these cases, you'll need to drop down into matplotlib's object-oriented API.
## The Object-Oriented API
The pylab and pyplot interfaces are simply lightweight abstractions built atop matplotlib's set of classes for creating graphics. Calling a function like `plot` from either interface will first check for existing objects to modify, and then create them as needed. If you need more control over when classes are instantiated and how they're modified, however, then you're going to need to use the object-oriented API.
## Examples
Now that you understand the impetus behind each interface, its pros and cons, and when to use it, it's time to get a little taste of each one in action. We'll start with the Object-Oriented API and work our up to the highest level of abstraction so you can easily see what each layer adds to the previous one.
Now, one note before we continue, you can safely ignore the code in this first cell, it's here mainly just to make sure that our plots look consistent across each example.
```
%matplotlib inline
# Tweaking the 'inline' config a bit to make sure each bit of
# code below displays the same plot.
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [4, 4]
mpl.rcParams['figure.subplot.left'] = 0
mpl.rcParams['figure.subplot.bottom'] = 0
mpl.rcParams['figure.subplot.right'] = 1
mpl.rcParams['figure.subplot.top'] = 1
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# The current version of NumPy available from conda is issuing a warning
# message that some behavior will change in the future when used with the
# current version of matplotlib available from conda. This cell just keeps
# that warning from being displayed.
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
```
## Object-Oriented API
```
from IPython.display import display_png
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import numpy as np
# Define the size of the figure to prevent the spread
# of the data from looking eliptical
fig = Figure(figsize=(5, 5))
# We've chosen the Agg canvas to render PNG output
canvas = FigureCanvasAgg(fig)
# Generate some radom (normally distributed) data using the NumPy library
x = np.random.randn(1000)
y = np.random.randn(1000)
# Create a new Axes object using the subplot function from the Figure object
ax = fig.add_subplot(111)
# Set the x and y axis limits to 4 standard deviations from the mean
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
# Call the Axes method hist to generate the histogram; hist creates a
# sequence of Rectangle artists for each histogram bar and adds them
# to the Axes container. Here "100" means create 100 bins.
#ax.hist(x, 100)
ax.scatter(x, y)
# Decorate the figure with a title and save it.
ax.set_title('Normally distributed data with $\mu=0, \sigma=1$')
# Display the figure as PNG
display_png(fig);
```
## The Scripting Interface (pyplot)
```
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(1000)
y = np.random.randn(1000)
# The creation of Figure and Axes objects is taken care of for us
plt.figure(figsize=(5, 5))
plt.scatter(x, y)
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.title('Normally distributed data with $\mu=0, \sigma=1$');
```
## The MATLAB Interface (pylab)
```
from pylab import *
# Even functions from the inner modules of NumPy are
# made to be global
x = randn(1000)
y = randn(1000)
figure(figsize=(5, 5))
scatter(x, y)
xlim(-4, 4)
ylim(-4, 4)
title('Normally distributed data with $\mu=0, \sigma=1$');
```
## Conclusion
In this lesson, we learned about the different options you have for interacting with matplotlib. We discussed the pros and cons of each interface and when it's appropriate to use each one. And, finally, we got to compare each one through a simple example coded up in each interface. You should now be prepared to understand any of the tutorials or documentation that you run into when trying to further expand your knowledge of matplotlib.
|
github_jupyter
|
%matplotlib inline
# Tweaking the 'inline' config a bit to make sure each bit of
# code below displays the same plot.
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [4, 4]
mpl.rcParams['figure.subplot.left'] = 0
mpl.rcParams['figure.subplot.bottom'] = 0
mpl.rcParams['figure.subplot.right'] = 1
mpl.rcParams['figure.subplot.top'] = 1
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
# The current version of NumPy available from conda is issuing a warning
# message that some behavior will change in the future when used with the
# current version of matplotlib available from conda. This cell just keeps
# that warning from being displayed.
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
from IPython.display import display_png
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import numpy as np
# Define the size of the figure to prevent the spread
# of the data from looking eliptical
fig = Figure(figsize=(5, 5))
# We've chosen the Agg canvas to render PNG output
canvas = FigureCanvasAgg(fig)
# Generate some radom (normally distributed) data using the NumPy library
x = np.random.randn(1000)
y = np.random.randn(1000)
# Create a new Axes object using the subplot function from the Figure object
ax = fig.add_subplot(111)
# Set the x and y axis limits to 4 standard deviations from the mean
ax.set_xlim([-4, 4])
ax.set_ylim([-4, 4])
# Call the Axes method hist to generate the histogram; hist creates a
# sequence of Rectangle artists for each histogram bar and adds them
# to the Axes container. Here "100" means create 100 bins.
#ax.hist(x, 100)
ax.scatter(x, y)
# Decorate the figure with a title and save it.
ax.set_title('Normally distributed data with $\mu=0, \sigma=1$')
# Display the figure as PNG
display_png(fig);
import matplotlib.pyplot as plt
import numpy as np
x = np.random.randn(1000)
y = np.random.randn(1000)
# The creation of Figure and Axes objects is taken care of for us
plt.figure(figsize=(5, 5))
plt.scatter(x, y)
plt.xlim(-4, 4)
plt.ylim(-4, 4)
plt.title('Normally distributed data with $\mu=0, \sigma=1$');
from pylab import *
# Even functions from the inner modules of NumPy are
# made to be global
x = randn(1000)
y = randn(1000)
figure(figsize=(5, 5))
scatter(x, y)
xlim(-4, 4)
ylim(-4, 4)
title('Normally distributed data with $\mu=0, \sigma=1$');
| 0.581422 | 0.975484 |
```
"""
This script produces the Figures 11 from Amaral+2021, the
pearson correlation between stellar and planetary mass and
surface water loss percentage.
@autor: Laura N. R. do Amaral, Universidad Nacional Autónoma de México, 2021
@email: laura.nevesdoamaral@gmail.com
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels as st
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from collections import OrderedDict
import sys
import os
import subprocess
plt.style.use('ggplot')
try:
import vplot as vpl
except:
print('Cannot import vplot. Please install vplot.')
# Check correct number of arguments
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
f1 = './dataframe1.txt'
f2 = './dataframe2.txt'
f3 = './dataframe3.txt'
f4 = './dataframe4.txt'
w1 = np.genfromtxt(f1, usecols=1 ,unpack=True) # stellar mass
x1 = np.genfromtxt(f1, usecols=3 ,unpack=True) # water initial
y1 = np.genfromtxt(f1, usecols=0 ,unpack=True) # planetary mass
z1 = np.genfromtxt(f1, usecols=2 ,unpack=True) # water final
w2 = np.genfromtxt(f2, usecols=1 ,unpack=True) # water initial
x2 = np.genfromtxt(f2, usecols=3 ,unpack=True) # water final
y2 = np.genfromtxt(f2, usecols=0 ,unpack=True) # stellar mass
z2 = np.genfromtxt(f2, usecols=2 ,unpack=True) # planetary mass
w3 = np.genfromtxt(f3, usecols=1 ,unpack=True) # water initial
x3 = np.genfromtxt(f3, usecols=3 ,unpack=True) # water final
y3 = np.genfromtxt(f3, usecols=0 ,unpack=True) # stellar mass
z3 = np.genfromtxt(f3, usecols=2 ,unpack=True) # planetary mass
w4 = np.genfromtxt(f4, usecols=1 ,unpack=True) # water initial
x4 = np.genfromtxt(f4, usecols=3 ,unpack=True) # water final
y4 = np.genfromtxt(f4, usecols=0 ,unpack=True) # stellar mass
z4 = np.genfromtxt(f4, usecols=2 ,unpack=True) # planetary mass
a1 = ((w1-x1)/w1)*100
w1.tolist()
x1.tolist()
y1.tolist()
z1.tolist()
a1.tolist()
a2 = ((w2-x2)/w2)*100
w2.tolist()
x2.tolist()
y2.tolist()
z2.tolist()
a2.tolist()
a3 = ((w3-x3)/w3)*100
w3.tolist()
x3.tolist()
y3.tolist()
z3.tolist()
a3.tolist()
a4 = ((w4-x4)/w4)*100
w4.tolist()
x4.tolist()
y4.tolist()
z4.tolist()
a4.tolist()
dataset1 = {
'W1':w1,
'X1':x1,
'Y1':y1,
'Z1':z1,
'A1':a1
}
dataset2= {
'W2':w2,
'X2':x2,
'Y2':y2,
'Z2':z2,
'A2':a2
}
dataset3 = {
'W3':w3,
'X3':x3,
'Y3':y3,
'Z3':z3,
'A3':a3
}
dataset4 = {
'W4':w4,
'X4':x4,
'Y4':y4,
'Z4':z4,
'A4':a4
}
dataset1 = pd.DataFrame(dataset1)
dataset2 = pd.DataFrame(dataset2)
dataset3 = pd.DataFrame(dataset3)
dataset4 = pd.DataFrame(dataset4)
dataset1.corr()
dataset2.corr()
dataset3.corr()
dataset4.corr()
xyz1 = np.array([y1,z1,a1])
corr_matrix1 = np.corrcoef(xyz1).round(decimals=4)
corr_matrix1
xyz2 = np.array([y2,z2,a2])
corr_matrix2 = np.corrcoef(xyz2).round(decimals=4)
corr_matrix2
xyz3 = np.array([y3,z3,a3])
corr_matrix3 = np.corrcoef(xyz3).round(decimals=4)
corr_matrix3
xyz4 = np.array([y4,z4,a4])
corr_matrix4 = np.corrcoef(xyz4).round(decimals=4)
corr_matrix4
#plt.suptitle('Amount of Oxygen produced', fontsize = 45,horizontalalignment='center')
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.bottom'] = True
mpl.rcParams['ytick.right'] = True
#mpl.rcParams['font.size'] = 10
fig, ax = plt.subplots(nrows=1, ncols=4, sharey=True ,sharex=True, figsize = (16,4))
lab = ('stellar\nmass', 'planetary\nmass', 'H2O loss \npercentage')
im = sns.heatmap(corr_matrix1, ax=ax[0],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix2, ax=ax[1],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix3, ax=ax[2],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix4, ax=ax[3],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
cbar_ax = fig.add_axes([0.99, 0.19, 0.025, 0.6])
mappable = im.get_children()[0]
fig.subplots_adjust(left = 0.050,bottom=0.07, right=0.95, top=0.91, wspace = 0.05, hspace =-0.52 )
fig.colorbar(mappable, cax=cbar_ax, orientation = 'vertical')#,cbar_pad=0.15)
ax[0].title.set_text('a')#. RG phase 1e9 yr \nstellar')
ax[1].title.set_text('b')#. RG phase 1e9 yr \nstellar + flare')
ax[2].title.set_text('c')#. RG phase PMS \nstellar')
ax[3].title.set_text('d')#. RG phase PMS \nstellar + flare')
#sns.set(font_scale=5.5)
# Save figure
if (sys.argv[1] == 'pdf'):
fig.savefig('Correlation.pdf', bbox_inches="tight")#, dpi=300)
if (sys.argv[1] == 'png'):
fig.savefig('Correlation.png', bbox_inches="tight")#, dpi=300)
"""
This script produces the Figures 11 from Amaral+2021, the
pearson correlation between stellar and planetary mass and
surface water loss percentage.
@autor: Laura N. R. do Amaral, Universidad Nacional Autónoma de México, 2021
@email: laura.nevesdoamaral@gmail.com
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels as st
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from collections import OrderedDict
import sys
import os
import subprocess
plt.style.use('ggplot')
try:
import vplot as vpl
except:
print('Cannot import vplot. Please install vplot.')
f1 = './dataframe1.txt'
f2 = './dataframe2.txt'
f3 = './dataframe3.txt'
f4 = './dataframe4.txt'
w1 = np.genfromtxt(f1, usecols=1 ,unpack=True) # stellar mass
x1 = np.genfromtxt(f1, usecols=3 ,unpack=True) # water initial
y1 = np.genfromtxt(f1, usecols=0 ,unpack=True) # planetary mass
z1 = np.genfromtxt(f1, usecols=2 ,unpack=True) # water final
w2 = np.genfromtxt(f2, usecols=1 ,unpack=True) # water initial
x2 = np.genfromtxt(f2, usecols=3 ,unpack=True) # water final
y2 = np.genfromtxt(f2, usecols=0 ,unpack=True) # stellar mass
z2 = np.genfromtxt(f2, usecols=2 ,unpack=True) # planetary mass
w3 = np.genfromtxt(f3, usecols=1 ,unpack=True) # water initial
x3 = np.genfromtxt(f3, usecols=3 ,unpack=True) # water final
y3 = np.genfromtxt(f3, usecols=0 ,unpack=True) # stellar mass
z3 = np.genfromtxt(f3, usecols=2 ,unpack=True) # planetary mass
w4 = np.genfromtxt(f4, usecols=1 ,unpack=True) # water initial
x4 = np.genfromtxt(f4, usecols=3 ,unpack=True) # water final
y4 = np.genfromtxt(f4, usecols=0 ,unpack=True) # stellar mass
z4 = np.genfromtxt(f4, usecols=2 ,unpack=True) # planetary mass
a1 = ((w1-x1)/w1)*100
w1.tolist()
x1.tolist()
y1.tolist()
z1.tolist()
a1.tolist()
a2 = ((w2-x2)/w2)*100
w2.tolist()
x2.tolist()
y2.tolist()
z2.tolist()
a2.tolist()
a3 = ((w3-x3)/w3)*100
w3.tolist()
x3.tolist()
y3.tolist()
z3.tolist()
a3.tolist()
a4 = ((w4-x4)/w4)*100
w4.tolist()
x4.tolist()
y4.tolist()
z4.tolist()
a4.tolist()
dataset1 = {
'W1':w1,
'X1':x1,
'Y1':y1,
'Z1':z1,
'A1':a1
}
dataset2= {
'W2':w2,
'X2':x2,
'Y2':y2,
'Z2':z2,
'A2':a2
}
dataset3 = {
'W3':w3,
'X3':x3,
'Y3':y3,
'Z3':z3,
'A3':a3
}
dataset4 = {
'W4':w4,
'X4':x4,
'Y4':y4,
'Z4':z4,
'A4':a4
}
dataset1 = pd.DataFrame(dataset1)
dataset2 = pd.DataFrame(dataset2)
dataset3 = pd.DataFrame(dataset3)
dataset4 = pd.DataFrame(dataset4)
dataset1.corr()
dataset2.corr()
dataset3.corr()
dataset4.corr()
xyz1 = np.array([y1,z1,a1])
corr_matrix1 = np.corrcoef(xyz1).round(decimals=4)
corr_matrix1
xyz2 = np.array([y2,z2,a2])
corr_matrix2 = np.corrcoef(xyz2).round(decimals=4)
corr_matrix2
xyz3 = np.array([y3,z3,a3])
corr_matrix3 = np.corrcoef(xyz3).round(decimals=4)
corr_matrix3
xyz4 = np.array([y4,z4,a4])
corr_matrix4 = np.corrcoef(xyz4).round(decimals=4)
corr_matrix4
#plt.suptitle('Amount of Oxygen produced', fontsize = 45,horizontalalignment='center')
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.bottom'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.size'] = 30
fig, ax = plt.subplots(nrows=1, ncols=4, sharey=True ,sharex=True, figsize = (16,5.5))
lab = ('stellar\nmass', 'planetary\nmass', 'H2O loss \npercentage')
im1 = sns.heatmap(corr_matrix1, ax=ax[0],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im2 = sns.heatmap(corr_matrix2, ax=ax[1],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im3 = sns.heatmap(corr_matrix3, ax=ax[2],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im4 = sns.heatmap(corr_matrix4, ax=ax[3],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab, annot_kws = {'fontsize' : 30})
cbar_ax = fig.add_axes([0.99, 0.33, 0.03, 0.5])
mappable = im1.get_children()[0]
fig.subplots_adjust(left = 0.050,bottom=0.07, right=0.95, top=0.91, wspace = 0.05, hspace =-0.52 )
fig.colorbar(mappable, cax=cbar_ax, orientation = 'vertical')#,cbar_pad=0.15)
cbar_ax.tick_params(labelsize=22)
ax[0].title.set_text('a')#. RG phase 1e9 yr \nstellar')
ax[1].title.set_text('b')#. RG phase 1e9 yr \nstellar + flare')
ax[2].title.set_text('c')#. RG phase PMS \nstellar')
ax[3].title.set_text('d')#. RG phase PMS \nstellar + flare')
im = [im1,im2,im3,im4]
for i in im:
i.set_xticklabels(i.get_xmajorticklabels(), fontsize = 22, rotation = 90)
i.set_yticklabels(i.get_xmajorticklabels(), fontsize = 22,rotation = 0)
#im1.set_xticklabels(im1.get_xmajorticklabels(), fontsize = 16)
#im2.set_xticklabels(im2.get_xmajorticklabels(), fontsize = 18)
#im3.set_xticklabels(im3.get_xmajorticklabels(), fontsize = 18)
#im4.set_xticklabels(im4.get_xmajorticklabels(), fontsize = 18)
#im1.set_yticklabels(im1.get_xmajorticklabels(), fontsize = 16)
#im2.set_yticklabels(im2.get_xmajorticklabels(), fontsize = 16)
#im3.set_yticklabels(im3.get_xmajorticklabels(), fontsize = 16)
#im4.set_yticklabels(im4.get_xmajorticklabels(), fontsize = 16)
# Save figure
if (sys.argv[1] == 'pdf'):
fig.savefig('Correlation.pdf', bbox_inches="tight")#, dpi=300)
if (sys.argv[1] == 'png'):
fig.savefig('Correlation.png', bbox_inches="tight")#, dpi=300)
```
|
github_jupyter
|
"""
This script produces the Figures 11 from Amaral+2021, the
pearson correlation between stellar and planetary mass and
surface water loss percentage.
@autor: Laura N. R. do Amaral, Universidad Nacional Autónoma de México, 2021
@email: laura.nevesdoamaral@gmail.com
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels as st
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from collections import OrderedDict
import sys
import os
import subprocess
plt.style.use('ggplot')
try:
import vplot as vpl
except:
print('Cannot import vplot. Please install vplot.')
# Check correct number of arguments
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
f1 = './dataframe1.txt'
f2 = './dataframe2.txt'
f3 = './dataframe3.txt'
f4 = './dataframe4.txt'
w1 = np.genfromtxt(f1, usecols=1 ,unpack=True) # stellar mass
x1 = np.genfromtxt(f1, usecols=3 ,unpack=True) # water initial
y1 = np.genfromtxt(f1, usecols=0 ,unpack=True) # planetary mass
z1 = np.genfromtxt(f1, usecols=2 ,unpack=True) # water final
w2 = np.genfromtxt(f2, usecols=1 ,unpack=True) # water initial
x2 = np.genfromtxt(f2, usecols=3 ,unpack=True) # water final
y2 = np.genfromtxt(f2, usecols=0 ,unpack=True) # stellar mass
z2 = np.genfromtxt(f2, usecols=2 ,unpack=True) # planetary mass
w3 = np.genfromtxt(f3, usecols=1 ,unpack=True) # water initial
x3 = np.genfromtxt(f3, usecols=3 ,unpack=True) # water final
y3 = np.genfromtxt(f3, usecols=0 ,unpack=True) # stellar mass
z3 = np.genfromtxt(f3, usecols=2 ,unpack=True) # planetary mass
w4 = np.genfromtxt(f4, usecols=1 ,unpack=True) # water initial
x4 = np.genfromtxt(f4, usecols=3 ,unpack=True) # water final
y4 = np.genfromtxt(f4, usecols=0 ,unpack=True) # stellar mass
z4 = np.genfromtxt(f4, usecols=2 ,unpack=True) # planetary mass
a1 = ((w1-x1)/w1)*100
w1.tolist()
x1.tolist()
y1.tolist()
z1.tolist()
a1.tolist()
a2 = ((w2-x2)/w2)*100
w2.tolist()
x2.tolist()
y2.tolist()
z2.tolist()
a2.tolist()
a3 = ((w3-x3)/w3)*100
w3.tolist()
x3.tolist()
y3.tolist()
z3.tolist()
a3.tolist()
a4 = ((w4-x4)/w4)*100
w4.tolist()
x4.tolist()
y4.tolist()
z4.tolist()
a4.tolist()
dataset1 = {
'W1':w1,
'X1':x1,
'Y1':y1,
'Z1':z1,
'A1':a1
}
dataset2= {
'W2':w2,
'X2':x2,
'Y2':y2,
'Z2':z2,
'A2':a2
}
dataset3 = {
'W3':w3,
'X3':x3,
'Y3':y3,
'Z3':z3,
'A3':a3
}
dataset4 = {
'W4':w4,
'X4':x4,
'Y4':y4,
'Z4':z4,
'A4':a4
}
dataset1 = pd.DataFrame(dataset1)
dataset2 = pd.DataFrame(dataset2)
dataset3 = pd.DataFrame(dataset3)
dataset4 = pd.DataFrame(dataset4)
dataset1.corr()
dataset2.corr()
dataset3.corr()
dataset4.corr()
xyz1 = np.array([y1,z1,a1])
corr_matrix1 = np.corrcoef(xyz1).round(decimals=4)
corr_matrix1
xyz2 = np.array([y2,z2,a2])
corr_matrix2 = np.corrcoef(xyz2).round(decimals=4)
corr_matrix2
xyz3 = np.array([y3,z3,a3])
corr_matrix3 = np.corrcoef(xyz3).round(decimals=4)
corr_matrix3
xyz4 = np.array([y4,z4,a4])
corr_matrix4 = np.corrcoef(xyz4).round(decimals=4)
corr_matrix4
#plt.suptitle('Amount of Oxygen produced', fontsize = 45,horizontalalignment='center')
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.bottom'] = True
mpl.rcParams['ytick.right'] = True
#mpl.rcParams['font.size'] = 10
fig, ax = plt.subplots(nrows=1, ncols=4, sharey=True ,sharex=True, figsize = (16,4))
lab = ('stellar\nmass', 'planetary\nmass', 'H2O loss \npercentage')
im = sns.heatmap(corr_matrix1, ax=ax[0],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix2, ax=ax[1],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix3, ax=ax[2],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
sns.heatmap(corr_matrix4, ax=ax[3],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab,annot_kws={"size": 26})
cbar_ax = fig.add_axes([0.99, 0.19, 0.025, 0.6])
mappable = im.get_children()[0]
fig.subplots_adjust(left = 0.050,bottom=0.07, right=0.95, top=0.91, wspace = 0.05, hspace =-0.52 )
fig.colorbar(mappable, cax=cbar_ax, orientation = 'vertical')#,cbar_pad=0.15)
ax[0].title.set_text('a')#. RG phase 1e9 yr \nstellar')
ax[1].title.set_text('b')#. RG phase 1e9 yr \nstellar + flare')
ax[2].title.set_text('c')#. RG phase PMS \nstellar')
ax[3].title.set_text('d')#. RG phase PMS \nstellar + flare')
#sns.set(font_scale=5.5)
# Save figure
if (sys.argv[1] == 'pdf'):
fig.savefig('Correlation.pdf', bbox_inches="tight")#, dpi=300)
if (sys.argv[1] == 'png'):
fig.savefig('Correlation.png', bbox_inches="tight")#, dpi=300)
"""
This script produces the Figures 11 from Amaral+2021, the
pearson correlation between stellar and planetary mass and
surface water loss percentage.
@autor: Laura N. R. do Amaral, Universidad Nacional Autónoma de México, 2021
@email: laura.nevesdoamaral@gmail.com
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels as st
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from collections import OrderedDict
import sys
import os
import subprocess
plt.style.use('ggplot')
try:
import vplot as vpl
except:
print('Cannot import vplot. Please install vplot.')
f1 = './dataframe1.txt'
f2 = './dataframe2.txt'
f3 = './dataframe3.txt'
f4 = './dataframe4.txt'
w1 = np.genfromtxt(f1, usecols=1 ,unpack=True) # stellar mass
x1 = np.genfromtxt(f1, usecols=3 ,unpack=True) # water initial
y1 = np.genfromtxt(f1, usecols=0 ,unpack=True) # planetary mass
z1 = np.genfromtxt(f1, usecols=2 ,unpack=True) # water final
w2 = np.genfromtxt(f2, usecols=1 ,unpack=True) # water initial
x2 = np.genfromtxt(f2, usecols=3 ,unpack=True) # water final
y2 = np.genfromtxt(f2, usecols=0 ,unpack=True) # stellar mass
z2 = np.genfromtxt(f2, usecols=2 ,unpack=True) # planetary mass
w3 = np.genfromtxt(f3, usecols=1 ,unpack=True) # water initial
x3 = np.genfromtxt(f3, usecols=3 ,unpack=True) # water final
y3 = np.genfromtxt(f3, usecols=0 ,unpack=True) # stellar mass
z3 = np.genfromtxt(f3, usecols=2 ,unpack=True) # planetary mass
w4 = np.genfromtxt(f4, usecols=1 ,unpack=True) # water initial
x4 = np.genfromtxt(f4, usecols=3 ,unpack=True) # water final
y4 = np.genfromtxt(f4, usecols=0 ,unpack=True) # stellar mass
z4 = np.genfromtxt(f4, usecols=2 ,unpack=True) # planetary mass
a1 = ((w1-x1)/w1)*100
w1.tolist()
x1.tolist()
y1.tolist()
z1.tolist()
a1.tolist()
a2 = ((w2-x2)/w2)*100
w2.tolist()
x2.tolist()
y2.tolist()
z2.tolist()
a2.tolist()
a3 = ((w3-x3)/w3)*100
w3.tolist()
x3.tolist()
y3.tolist()
z3.tolist()
a3.tolist()
a4 = ((w4-x4)/w4)*100
w4.tolist()
x4.tolist()
y4.tolist()
z4.tolist()
a4.tolist()
dataset1 = {
'W1':w1,
'X1':x1,
'Y1':y1,
'Z1':z1,
'A1':a1
}
dataset2= {
'W2':w2,
'X2':x2,
'Y2':y2,
'Z2':z2,
'A2':a2
}
dataset3 = {
'W3':w3,
'X3':x3,
'Y3':y3,
'Z3':z3,
'A3':a3
}
dataset4 = {
'W4':w4,
'X4':x4,
'Y4':y4,
'Z4':z4,
'A4':a4
}
dataset1 = pd.DataFrame(dataset1)
dataset2 = pd.DataFrame(dataset2)
dataset3 = pd.DataFrame(dataset3)
dataset4 = pd.DataFrame(dataset4)
dataset1.corr()
dataset2.corr()
dataset3.corr()
dataset4.corr()
xyz1 = np.array([y1,z1,a1])
corr_matrix1 = np.corrcoef(xyz1).round(decimals=4)
corr_matrix1
xyz2 = np.array([y2,z2,a2])
corr_matrix2 = np.corrcoef(xyz2).round(decimals=4)
corr_matrix2
xyz3 = np.array([y3,z3,a3])
corr_matrix3 = np.corrcoef(xyz3).round(decimals=4)
corr_matrix3
xyz4 = np.array([y4,z4,a4])
corr_matrix4 = np.corrcoef(xyz4).round(decimals=4)
corr_matrix4
#plt.suptitle('Amount of Oxygen produced', fontsize = 45,horizontalalignment='center')
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 2
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['xtick.bottom'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['font.size'] = 30
fig, ax = plt.subplots(nrows=1, ncols=4, sharey=True ,sharex=True, figsize = (16,5.5))
lab = ('stellar\nmass', 'planetary\nmass', 'H2O loss \npercentage')
im1 = sns.heatmap(corr_matrix1, ax=ax[0],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im2 = sns.heatmap(corr_matrix2, ax=ax[1],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im3 = sns.heatmap(corr_matrix3, ax=ax[2],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab)
im4 = sns.heatmap(corr_matrix4, ax=ax[3],cmap='RdYlBu',annot=True,cbar = False,vmin=-1,vmax=1,
xticklabels=lab, yticklabels=lab, annot_kws = {'fontsize' : 30})
cbar_ax = fig.add_axes([0.99, 0.33, 0.03, 0.5])
mappable = im1.get_children()[0]
fig.subplots_adjust(left = 0.050,bottom=0.07, right=0.95, top=0.91, wspace = 0.05, hspace =-0.52 )
fig.colorbar(mappable, cax=cbar_ax, orientation = 'vertical')#,cbar_pad=0.15)
cbar_ax.tick_params(labelsize=22)
ax[0].title.set_text('a')#. RG phase 1e9 yr \nstellar')
ax[1].title.set_text('b')#. RG phase 1e9 yr \nstellar + flare')
ax[2].title.set_text('c')#. RG phase PMS \nstellar')
ax[3].title.set_text('d')#. RG phase PMS \nstellar + flare')
im = [im1,im2,im3,im4]
for i in im:
i.set_xticklabels(i.get_xmajorticklabels(), fontsize = 22, rotation = 90)
i.set_yticklabels(i.get_xmajorticklabels(), fontsize = 22,rotation = 0)
#im1.set_xticklabels(im1.get_xmajorticklabels(), fontsize = 16)
#im2.set_xticklabels(im2.get_xmajorticklabels(), fontsize = 18)
#im3.set_xticklabels(im3.get_xmajorticklabels(), fontsize = 18)
#im4.set_xticklabels(im4.get_xmajorticklabels(), fontsize = 18)
#im1.set_yticklabels(im1.get_xmajorticklabels(), fontsize = 16)
#im2.set_yticklabels(im2.get_xmajorticklabels(), fontsize = 16)
#im3.set_yticklabels(im3.get_xmajorticklabels(), fontsize = 16)
#im4.set_yticklabels(im4.get_xmajorticklabels(), fontsize = 16)
# Save figure
if (sys.argv[1] == 'pdf'):
fig.savefig('Correlation.pdf', bbox_inches="tight")#, dpi=300)
if (sys.argv[1] == 'png'):
fig.savefig('Correlation.png', bbox_inches="tight")#, dpi=300)
| 0.357231 | 0.400632 |
## 2章 Open Bandit Datasetを用いた意思決定モデルの学習/評価の実装
この実装例は主に次の3つのステップで構成される。
1. データの前処理: Open Bandit DatasetのうちBernoulliTSモデルで収集されたデータを読み込んで前処理を施す。
2. 意思決定モデルの学習: トレーニングデータを用いてIPWLearnerに基づいた意思決定モデルを学習し、バリデーションデータに対して行動を選択する。
3. 意思決定モデルの性能評価: 学習された意思決定モデルの性能をバリデーションデータを用いて評価する。
このような分析手順を経ることにより「**ZOZOTOWNのファッションアイテム推薦枠において、データ収集時に使われていたBernoulliTSモデルをこれからも使い続けるべきなのか、はたまたOPLで新たに学習したIPWLearnerに基づく意思決定モデルへの切り替えを検討すべきなのか**」という問いに答えることを目指す。
```
# 必要なパッケージをインポート
from pathlib import Path
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import obp
from obp.dataset import OpenBanditDataset
from obp.policy import IPWLearner
from obp.ope import (
OffPolicyEvaluation,
RegressionModel,
InverseProbabilityWeighting as IPS,
DoublyRobust as DR
)
```
## (1) Data Loading and Preprocessing
[Open Bandit Dataset(約11GB)](https://research.zozo.com/data.html)をダウンロードし、"./open_bandit_dataset"におく。
```
# ZOZOTOWNのトップページ推薦枠でBernoulli Thompson Sampling (bts)が収集したデータをダウンロードする
# `data_path=None`とすると、スモールサイズのお試しデータセットを用いることができる
dataset = OpenBanditDataset(
behavior_policy="bts", # データ収集に用いられた意思決定モデル
campaign="men", # キャンペーン. "men", "women", or "all" ("all"はデータ数がとても多いので注意)
data_path=Path("./open_bandit_dataset"), # データセットのパス
)
# デフォルトの前処理を施したデータを取得する
# タイムスタンプの前半70%をトレーニングデータ、後半30%をバリデーションデータとする
training_data, validation_data = dataset.obtain_batch_bandit_feedback(
test_size=0.3,
is_timeseries_split=True
)
# training_dataの中身を確認
training_data.keys()
# 行動(ファッションアイテム)の数
dataset.n_actions
# データ数
dataset.n_rounds
# デフォルトの前処理による特徴料の次元数
dataset.dim_context
# 推薦枠におけるポジションの数
dataset.len_list
```
### 意思決定モデルの学習
トレーニングデータを用いてIPWLearnerとランダムフォレストの組み合わせに基づく意思決定モデルを学習し、バリデーションデータに対して行動を選択する。
```
%%time
# 内部で用いる分類器としてランダムフォレストを指定したIPWLearnerを定義する
new_decision_making_model = IPWLearner(
n_actions=dataset.n_actions, # 行動の数
len_list=dataset.len_list, # 推薦枠の数
base_classifier=RandomForestClassifier(
n_estimators=300, max_depth=10, min_samples_leaf=5, random_state=12345
),
)
# トレーニングデータを用いて、意思決定意思決定モデルを学習する
new_decision_making_model.fit(
context=training_data["context"], # 特徴量(X_i)
action=training_data["action"], # 過去の意思決定モデルによる行動選択
reward=training_data["reward"], # 観測される目的変数
position=training_data["position"], # 行動が提示された推薦位置(ポジション)
pscore=training_data["pscore"], # 過去の意思決定モデルによる行動選択確率(傾向スコア)
)
# バリデーションデータに対して行動を選択する
action_dist = new_decision_making_model.predict(
context=validation_data["context"],
)
```
### 意思決定モデルの性能評価
学習した新たな意思決定モデル(IPWLearner)の性能を、バリデーションデータとIPWおよびDR推定量により評価する。
```
%%time
# DR推定量を用いるのに必要な目的変数予測モデルを得る
# opeモジュールに実装されている`RegressionModel`に好みの機械学習手法を与えば良い
regression_model = RegressionModel(
n_actions=dataset.n_actions, # 行動の数
len_list=dataset.len_list, # 推薦枠内のポジションの数
base_model=LogisticRegression(C=100, max_iter=10000, random_state=12345), # ロジスティック回帰を使用
)
# `fit_predict`メソッドにより、バリデーションデータにおける期待報酬を推定
estimated_rewards_by_reg_model = regression_model.fit_predict(
context=validation_data["context"], # 特徴量(X_i)
action=validation_data["action"], # 過去の意思決定モデルによる行動選択
reward=validation_data["reward"], # 観測される目的変数
position=validation_data["position"], # 行動が提示された推薦位置(ポジション)
random_state=12345,
)
# 意思決定モデルの性能評価を一気通貫で行うための`OffPolicyEvaluation`を定義する
ope = OffPolicyEvaluation(
bandit_feedback=validation_data, # バリデーションデータ
ope_estimators=[IPS(), DR()] # 使用する推定量
)
# 内部で用いる分類器としてロジスティック回帰を指定したIPWLearnerの性能をOPEにより評価
ope.visualize_off_policy_estimates(
action_dist=action_dist, # evaluation_policy_aによるバリデーションデータに対する行動選択
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
is_relative=True, # 過去の意思決定モデルの性能に対する相対的な改善率を出力
random_state=12345,
)
```
ここで得られた意思決定モデルの性能評価の結果から、データ収集時に用いられていたBernoulliTSモデルからIPWLearnerによる特徴量の情報を活用した個別化推薦に切り替えることで、クリック確率(意思決定モデルの性能)を30%程度向上させられる可能性が示唆された。IPWLearnerの性能について推定された95%信頼区間の下限も、1.0付近(ベースラインであるBernoulliTSモデルの性能と同程度)であるため、大きな失敗はしなさそうである。この性能評価の結果に基づき、IPWLearnerを実環境にいきなり導入したり、IPWLearnerが有望な意思決定モデルであることに対して自信を持った上で、安全にA/Bテストに進んだりできる。
|
github_jupyter
|
# 必要なパッケージをインポート
from pathlib import Path
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import obp
from obp.dataset import OpenBanditDataset
from obp.policy import IPWLearner
from obp.ope import (
OffPolicyEvaluation,
RegressionModel,
InverseProbabilityWeighting as IPS,
DoublyRobust as DR
)
# ZOZOTOWNのトップページ推薦枠でBernoulli Thompson Sampling (bts)が収集したデータをダウンロードする
# `data_path=None`とすると、スモールサイズのお試しデータセットを用いることができる
dataset = OpenBanditDataset(
behavior_policy="bts", # データ収集に用いられた意思決定モデル
campaign="men", # キャンペーン. "men", "women", or "all" ("all"はデータ数がとても多いので注意)
data_path=Path("./open_bandit_dataset"), # データセットのパス
)
# デフォルトの前処理を施したデータを取得する
# タイムスタンプの前半70%をトレーニングデータ、後半30%をバリデーションデータとする
training_data, validation_data = dataset.obtain_batch_bandit_feedback(
test_size=0.3,
is_timeseries_split=True
)
# training_dataの中身を確認
training_data.keys()
# 行動(ファッションアイテム)の数
dataset.n_actions
# データ数
dataset.n_rounds
# デフォルトの前処理による特徴料の次元数
dataset.dim_context
# 推薦枠におけるポジションの数
dataset.len_list
%%time
# 内部で用いる分類器としてランダムフォレストを指定したIPWLearnerを定義する
new_decision_making_model = IPWLearner(
n_actions=dataset.n_actions, # 行動の数
len_list=dataset.len_list, # 推薦枠の数
base_classifier=RandomForestClassifier(
n_estimators=300, max_depth=10, min_samples_leaf=5, random_state=12345
),
)
# トレーニングデータを用いて、意思決定意思決定モデルを学習する
new_decision_making_model.fit(
context=training_data["context"], # 特徴量(X_i)
action=training_data["action"], # 過去の意思決定モデルによる行動選択
reward=training_data["reward"], # 観測される目的変数
position=training_data["position"], # 行動が提示された推薦位置(ポジション)
pscore=training_data["pscore"], # 過去の意思決定モデルによる行動選択確率(傾向スコア)
)
# バリデーションデータに対して行動を選択する
action_dist = new_decision_making_model.predict(
context=validation_data["context"],
)
%%time
# DR推定量を用いるのに必要な目的変数予測モデルを得る
# opeモジュールに実装されている`RegressionModel`に好みの機械学習手法を与えば良い
regression_model = RegressionModel(
n_actions=dataset.n_actions, # 行動の数
len_list=dataset.len_list, # 推薦枠内のポジションの数
base_model=LogisticRegression(C=100, max_iter=10000, random_state=12345), # ロジスティック回帰を使用
)
# `fit_predict`メソッドにより、バリデーションデータにおける期待報酬を推定
estimated_rewards_by_reg_model = regression_model.fit_predict(
context=validation_data["context"], # 特徴量(X_i)
action=validation_data["action"], # 過去の意思決定モデルによる行動選択
reward=validation_data["reward"], # 観測される目的変数
position=validation_data["position"], # 行動が提示された推薦位置(ポジション)
random_state=12345,
)
# 意思決定モデルの性能評価を一気通貫で行うための`OffPolicyEvaluation`を定義する
ope = OffPolicyEvaluation(
bandit_feedback=validation_data, # バリデーションデータ
ope_estimators=[IPS(), DR()] # 使用する推定量
)
# 内部で用いる分類器としてロジスティック回帰を指定したIPWLearnerの性能をOPEにより評価
ope.visualize_off_policy_estimates(
action_dist=action_dist, # evaluation_policy_aによるバリデーションデータに対する行動選択
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model,
is_relative=True, # 過去の意思決定モデルの性能に対する相対的な改善率を出力
random_state=12345,
)
| 0.41182 | 0.981311 |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm.notebook import tqdm
import cv2, os, pickle
import joblib
from multiprocessing import cpu_count
DEBUG = False
IMG_HEIGHT = 300
IMG_WIDTH = 480
VAL_SIZE = int(100) if DEBUG else int(100e3) # 100K validation molecules
CHUNK_SIZE = 40000 # to get ~100MB TFRecords
MAX_INCHI_LEN = 200 # maximum InChI length to prevent to much padding
if DEBUG:
train = pd.read_csv('/kaggle/input/bms-molecular-translation/train_labels.csv', dtype={ 'image_id': 'string', 'InChI': 'string' }).head(int(1e3))
else:
train = pd.read_csv('/kaggle/input/bms-molecular-translation/train_labels.csv', dtype={ 'image_id': 'string', 'InChI': 'string' })
# Drop all InChI longer than MAX_INCHI_LEN - 2, <start>InChI <end>, remove 'InChI=1S/' at start
train['InChI_len'] = train['InChI'].apply(len).astype(np.uint16)
train = train.loc[train['InChI_len'] <= MAX_INCHI_LEN - 2 + 9].reset_index(drop=True)
train.head(3)
if DEBUG:
test = pd.read_csv('/kaggle/input/bms-molecular-translation/sample_submission.csv', usecols=['image_id'], dtype={ 'image_id': 'string' }).head(int(1e3))
else:
test = pd.read_csv('/kaggle/input/bms-molecular-translation/sample_submission.csv', usecols=['image_id'], dtype={ 'image_id': 'string' })
test.head(3)
def get_vocabulary():
tokens = ['<start>', '<end>', '<pad>']
vocabulary = set()
for s in tqdm(train['InChI'].values):
vocabulary.update(s)
return tokens + list(vocabulary)
vocabulary = get_vocabulary()
# Save vocabulary mappings
# character -> integer
vocabulary_to_int = dict(zip(vocabulary, np.arange(len(vocabulary), dtype=np.int8)))
with open('vocabulary_to_int.pkl', 'wb') as handle:
pickle.dump(vocabulary_to_int, handle)
# integer -> character
int_to_vocabulary = dict(zip(np.arange(len(vocabulary), dtype=np.int8), vocabulary))
with open('int_to_vocabulary.pkl', 'wb') as handle:
pickle.dump(int_to_vocabulary, handle)
train['InChIClean'] = train['InChI'].apply(lambda InChI: '/'.join(InChI.split('=')[1].split('/')[1:]))
train.head()
# convert the InChI strings to integer lists
# start/end/pad tokens are used
def inchi_str2int(InChI):
res = []
res.append(vocabulary_to_int.get('<start>'))
for c in InChI:
res.append(vocabulary_to_int.get(c))
res.append(vocabulary_to_int.get('<end>'))
while len(res) < MAX_INCHI_LEN:
res.append(vocabulary_to_int.get('<pad>'))
return np.array(res, dtype=np.uint8)
tqdm.pandas() # progress_apply
train['InChI_int'] = train['InChIClean'].progress_apply(inchi_str2int)
train.head()
val = train.iloc[-VAL_SIZE:].reset_index(drop=True)
train = train.iloc[:-VAL_SIZE].reset_index(drop=True)
N_IMGS = len(train)
# plt.figure(figsize=(14, 14))
# img= cv2.imread('../input/bms-molecular-translation/train/d/9/e/d9e032a94a24.png', 0)
# plt.imshow(img)
# plt.show()
def process_img(image_id, folder='train', debug=False):
# read image and invert colors to get black background and white molecule
file_path = f'/kaggle/input/bms-molecular-translation/{folder}/{image_id[0]}/{image_id[1]}/{image_id[2]}/{image_id}.png'
img0 = 255 - cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
# rotate counter clockwise to get horizontal images
h, w = img0.shape
if h > w:
img0 = np.rot90(img0)
img = cv2.resize(img0,(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_NEAREST)
if debug:
fig, ax = plt.subplots(1, 2, figsize=(20,10))
ax[0].imshow(img0)
ax[0].set_title('Original image', size=16)
ax[1].imshow(img)
ax[1].set_title('Fully processed image', size=16)
# normalize to range 0-255 and encode as png
img = (img / img.max() * 255).astype(np.uint8)
img = cv2.imencode('.png', img)[1].tobytes()
return img
def split_in_chunks(data):
return [data[i:i + CHUNK_SIZE] for i in range(0, len(data), CHUNK_SIZE)]
train_data_chunks = {
'train': {
'image_id': split_in_chunks(train['image_id'].values),
'InChI': split_in_chunks(train['InChI_int'].values),
},
'val': {
'image_id': split_in_chunks(val['image_id'].values),
'InChI': split_in_chunks(val['InChI_int'].values),
}
}
test_data_chunks = {
'test': {
'image_id': split_in_chunks(test['image_id'].values),
}
}
def make_tfrecords(data_chunks, folder='train'):
# Try to make output folder
try:
os.makedirs(f'./train')
os.makedirs(f'./val')
os.makedirs(f'./test')
except:
print(f'folders already created')
for k, v in data_chunks.items():
for chunk_idx, image_id_chunk in tqdm(enumerate(v['image_id']), total=len(v['image_id'])):
# process images in parallel
jobs = [joblib.delayed(process_img)(fp, folder) for fp in image_id_chunk]
bs = 10
processed_images_chunk = joblib.Parallel(
n_jobs=cpu_count(),
verbose=0,
require='sharedmem',
batch_size=bs,
backend='threading',
)(jobs)
# Create the TFRecords from the processed images
with tf.io.TFRecordWriter(f'./{k}/batch_{chunk_idx}.tfrecords') as file_writer:
if 'InChI' in v.keys(): # TRAIN/VAL, InChI included
for image, InChI in zip(processed_images_chunk, v['InChI'][chunk_idx]):
record_bytes = tf.train.Example(features=tf.train.Features(feature={
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'InChI': tf.train.Feature(int64_list=tf.train.Int64List(value=InChI)),
})).SerializeToString()
file_writer.write(record_bytes)
else: # TEST, image_id included for submission file
for image, image_id in zip(processed_images_chunk, image_id_chunk):
record_bytes = tf.train.Example(features=tf.train.Features(feature={
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'image_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str.encode(image_id)])),
})).SerializeToString()
file_writer.write(record_bytes)
make_tfrecords(train_data_chunks)
make_tfrecords(test_data_chunks, 'test')
# convert in int encoded InChI to string
def inchi_int2char(InChI):
res = []
for i in InChI:
c = int_to_vocabulary.get(i)
if c not in ['<start>', '<end>', '<pad>']:
res.append(c)
return ''.join(res)
# Check train TFRecords
def decode_tfrecord(record_bytes):
fea_dict= {
'image': tf.io.FixedLenFeature([], tf.string),
'InChI': tf.io.FixedLenFeature([MAX_INCHI_LEN], tf.int64),}
features = tf.io.parse_single_example(record_bytes, fea_dict)
image = tf.io.decode_jpeg(features['image'])
image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, 1])
image = tf.cast(image, tf.float32) / 255.0
InChI = features['InChI']
InChI = tf.reshape(InChI, [MAX_INCHI_LEN])
return image, InChI
# Check test TFRecords
def decode_test_tfrecord(record_bytes):
features = tf.io.parse_single_example(record_bytes, {
'image': tf.io.FixedLenFeature([], tf.string),
'image_id': tf.io.FixedLenFeature([], tf.string),
})
image = tf.io.decode_jpeg(features['image'])
image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, 1])
image = tf.cast(image, tf.float32) / 255.0
image_id = features['image_id']
return image, image_id
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm.notebook import tqdm
import cv2, os, pickle
import joblib
from multiprocessing import cpu_count
DEBUG = False
IMG_HEIGHT = 300
IMG_WIDTH = 480
VAL_SIZE = int(100) if DEBUG else int(100e3) # 100K validation molecules
CHUNK_SIZE = 40000 # to get ~100MB TFRecords
MAX_INCHI_LEN = 200 # maximum InChI length to prevent to much padding
if DEBUG:
train = pd.read_csv('/kaggle/input/bms-molecular-translation/train_labels.csv', dtype={ 'image_id': 'string', 'InChI': 'string' }).head(int(1e3))
else:
train = pd.read_csv('/kaggle/input/bms-molecular-translation/train_labels.csv', dtype={ 'image_id': 'string', 'InChI': 'string' })
# Drop all InChI longer than MAX_INCHI_LEN - 2, <start>InChI <end>, remove 'InChI=1S/' at start
train['InChI_len'] = train['InChI'].apply(len).astype(np.uint16)
train = train.loc[train['InChI_len'] <= MAX_INCHI_LEN - 2 + 9].reset_index(drop=True)
train.head(3)
if DEBUG:
test = pd.read_csv('/kaggle/input/bms-molecular-translation/sample_submission.csv', usecols=['image_id'], dtype={ 'image_id': 'string' }).head(int(1e3))
else:
test = pd.read_csv('/kaggle/input/bms-molecular-translation/sample_submission.csv', usecols=['image_id'], dtype={ 'image_id': 'string' })
test.head(3)
def get_vocabulary():
tokens = ['<start>', '<end>', '<pad>']
vocabulary = set()
for s in tqdm(train['InChI'].values):
vocabulary.update(s)
return tokens + list(vocabulary)
vocabulary = get_vocabulary()
# Save vocabulary mappings
# character -> integer
vocabulary_to_int = dict(zip(vocabulary, np.arange(len(vocabulary), dtype=np.int8)))
with open('vocabulary_to_int.pkl', 'wb') as handle:
pickle.dump(vocabulary_to_int, handle)
# integer -> character
int_to_vocabulary = dict(zip(np.arange(len(vocabulary), dtype=np.int8), vocabulary))
with open('int_to_vocabulary.pkl', 'wb') as handle:
pickle.dump(int_to_vocabulary, handle)
train['InChIClean'] = train['InChI'].apply(lambda InChI: '/'.join(InChI.split('=')[1].split('/')[1:]))
train.head()
# convert the InChI strings to integer lists
# start/end/pad tokens are used
def inchi_str2int(InChI):
res = []
res.append(vocabulary_to_int.get('<start>'))
for c in InChI:
res.append(vocabulary_to_int.get(c))
res.append(vocabulary_to_int.get('<end>'))
while len(res) < MAX_INCHI_LEN:
res.append(vocabulary_to_int.get('<pad>'))
return np.array(res, dtype=np.uint8)
tqdm.pandas() # progress_apply
train['InChI_int'] = train['InChIClean'].progress_apply(inchi_str2int)
train.head()
val = train.iloc[-VAL_SIZE:].reset_index(drop=True)
train = train.iloc[:-VAL_SIZE].reset_index(drop=True)
N_IMGS = len(train)
# plt.figure(figsize=(14, 14))
# img= cv2.imread('../input/bms-molecular-translation/train/d/9/e/d9e032a94a24.png', 0)
# plt.imshow(img)
# plt.show()
def process_img(image_id, folder='train', debug=False):
# read image and invert colors to get black background and white molecule
file_path = f'/kaggle/input/bms-molecular-translation/{folder}/{image_id[0]}/{image_id[1]}/{image_id[2]}/{image_id}.png'
img0 = 255 - cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)
# rotate counter clockwise to get horizontal images
h, w = img0.shape
if h > w:
img0 = np.rot90(img0)
img = cv2.resize(img0,(IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_NEAREST)
if debug:
fig, ax = plt.subplots(1, 2, figsize=(20,10))
ax[0].imshow(img0)
ax[0].set_title('Original image', size=16)
ax[1].imshow(img)
ax[1].set_title('Fully processed image', size=16)
# normalize to range 0-255 and encode as png
img = (img / img.max() * 255).astype(np.uint8)
img = cv2.imencode('.png', img)[1].tobytes()
return img
def split_in_chunks(data):
return [data[i:i + CHUNK_SIZE] for i in range(0, len(data), CHUNK_SIZE)]
train_data_chunks = {
'train': {
'image_id': split_in_chunks(train['image_id'].values),
'InChI': split_in_chunks(train['InChI_int'].values),
},
'val': {
'image_id': split_in_chunks(val['image_id'].values),
'InChI': split_in_chunks(val['InChI_int'].values),
}
}
test_data_chunks = {
'test': {
'image_id': split_in_chunks(test['image_id'].values),
}
}
def make_tfrecords(data_chunks, folder='train'):
# Try to make output folder
try:
os.makedirs(f'./train')
os.makedirs(f'./val')
os.makedirs(f'./test')
except:
print(f'folders already created')
for k, v in data_chunks.items():
for chunk_idx, image_id_chunk in tqdm(enumerate(v['image_id']), total=len(v['image_id'])):
# process images in parallel
jobs = [joblib.delayed(process_img)(fp, folder) for fp in image_id_chunk]
bs = 10
processed_images_chunk = joblib.Parallel(
n_jobs=cpu_count(),
verbose=0,
require='sharedmem',
batch_size=bs,
backend='threading',
)(jobs)
# Create the TFRecords from the processed images
with tf.io.TFRecordWriter(f'./{k}/batch_{chunk_idx}.tfrecords') as file_writer:
if 'InChI' in v.keys(): # TRAIN/VAL, InChI included
for image, InChI in zip(processed_images_chunk, v['InChI'][chunk_idx]):
record_bytes = tf.train.Example(features=tf.train.Features(feature={
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'InChI': tf.train.Feature(int64_list=tf.train.Int64List(value=InChI)),
})).SerializeToString()
file_writer.write(record_bytes)
else: # TEST, image_id included for submission file
for image, image_id in zip(processed_images_chunk, image_id_chunk):
record_bytes = tf.train.Example(features=tf.train.Features(feature={
'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'image_id': tf.train.Feature(bytes_list=tf.train.BytesList(value=[str.encode(image_id)])),
})).SerializeToString()
file_writer.write(record_bytes)
make_tfrecords(train_data_chunks)
make_tfrecords(test_data_chunks, 'test')
# convert in int encoded InChI to string
def inchi_int2char(InChI):
res = []
for i in InChI:
c = int_to_vocabulary.get(i)
if c not in ['<start>', '<end>', '<pad>']:
res.append(c)
return ''.join(res)
# Check train TFRecords
def decode_tfrecord(record_bytes):
fea_dict= {
'image': tf.io.FixedLenFeature([], tf.string),
'InChI': tf.io.FixedLenFeature([MAX_INCHI_LEN], tf.int64),}
features = tf.io.parse_single_example(record_bytes, fea_dict)
image = tf.io.decode_jpeg(features['image'])
image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, 1])
image = tf.cast(image, tf.float32) / 255.0
InChI = features['InChI']
InChI = tf.reshape(InChI, [MAX_INCHI_LEN])
return image, InChI
# Check test TFRecords
def decode_test_tfrecord(record_bytes):
features = tf.io.parse_single_example(record_bytes, {
'image': tf.io.FixedLenFeature([], tf.string),
'image_id': tf.io.FixedLenFeature([], tf.string),
})
image = tf.io.decode_jpeg(features['image'])
image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, 1])
image = tf.cast(image, tf.float32) / 255.0
image_id = features['image_id']
return image, image_id
| 0.33546 | 0.19619 |
```
#export
from fastai.basics import *
from fastai.tabular.core import *
from fastai.tabular.model import *
from fastai.tabular.data import *
#hide
from nbdev.showdoc import *
#default_exp tabular.learner
```
# Tabular learner
> The function to immediately get a `Learner` ready to train for tabular data
The main function you probably want to use in this module is `tabular_learner`. It will automatically create a `TabularModel` suitable for your data and infer the right loss function. See the [tabular tutorial](http://docs.fast.ai/tutorial.tabular) for an example of use in context.
## Main functions
```
#export
@log_args(but_as=Learner.__init__)
class TabularLearner(Learner):
"`Learner` for tabular data"
def predict(self, row, n_workers=defaults.cpus):
"Predict on a Pandas Series"
dl = self.dls.test_dl(row.to_frame().T, num_workers=0)
dl.dataset.conts = dl.dataset.conts.astype(np.float32)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
b = (*tuplify(inp),*tuplify(dec_preds))
full_dec = self.dls.decode(b)
return full_dec,dec_preds[0],preds[0]
show_doc(TabularLearner, title_level=3)
```
It works exactly as a normal `Learner`, the only difference is that it implements a `predict` method specific to work on a row of data.
```
#export
@log_args(to_return=True, but_as=Learner.__init__)
@delegates(Learner.__init__)
def tabular_learner(dls, layers=None, emb_szs=None, config=None, n_out=None, y_range=None, **kwargs):
"Get a `Learner` using `dls`, with `metrics`, including a `TabularModel` created using the remaining params."
if config is None: config = tabular_config()
if layers is None: layers = [200,100]
to = dls.train_ds
emb_szs = get_emb_sz(dls.train_ds, {} if emb_szs is None else emb_szs)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
if y_range is None and 'y_range' in config: y_range = config.pop('y_range')
model = TabularModel(emb_szs, len(dls.cont_names), n_out, layers, y_range=y_range, **config)
return TabularLearner(dls, model, **kwargs)
```
If your data was built with fastai, you probably won't need to pass anything to `emb_szs` unless you want to change the default of the library (produced by `get_emb_sz`), same for `n_out` which should be automatically inferred. `layers` will default to `[200,100]` and is passed to `TabularModel` along with the `config`.
Use `tabular_config` to create a `config` and customize the model used. There is just easy access to `y_range` because this argument is often used.
All the other arguments are passed to `Learner`.
```
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv')
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_df(df, path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
learn = tabular_learner(dls)
show_doc(TabularLearner.predict)
```
We can pass in an individual row of data into our `TabularLearner`'s `predict` method. It's output is slightly different from the other `predict` methods, as this one will always return the input as well:
```
row, clas, probs = learn.predict(df.iloc[0])
row.show()
clas, probs
#hide
#test y_range is passed
learn = tabular_learner(dls, y_range=(0,32))
assert isinstance(learn.model.layers[-1], SigmoidRange)
test_eq(learn.model.layers[-1].low, 0)
test_eq(learn.model.layers[-1].high, 32)
learn = tabular_learner(dls, config = tabular_config(y_range=(0,32)))
assert isinstance(learn.model.layers[-1], SigmoidRange)
test_eq(learn.model.layers[-1].low, 0)
test_eq(learn.model.layers[-1].high, 32)
#export
@typedispatch
def show_results(x:Tabular, y:Tabular, samples, outs, ctxs=None, max_n=10, **kwargs):
df = x.all_cols[:max_n]
for n in x.y_names: df[n+'_pred'] = y[n][:max_n].values
display_df(df)
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
|
github_jupyter
|
#export
from fastai.basics import *
from fastai.tabular.core import *
from fastai.tabular.model import *
from fastai.tabular.data import *
#hide
from nbdev.showdoc import *
#default_exp tabular.learner
#export
@log_args(but_as=Learner.__init__)
class TabularLearner(Learner):
"`Learner` for tabular data"
def predict(self, row, n_workers=defaults.cpus):
"Predict on a Pandas Series"
dl = self.dls.test_dl(row.to_frame().T, num_workers=0)
dl.dataset.conts = dl.dataset.conts.astype(np.float32)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
b = (*tuplify(inp),*tuplify(dec_preds))
full_dec = self.dls.decode(b)
return full_dec,dec_preds[0],preds[0]
show_doc(TabularLearner, title_level=3)
#export
@log_args(to_return=True, but_as=Learner.__init__)
@delegates(Learner.__init__)
def tabular_learner(dls, layers=None, emb_szs=None, config=None, n_out=None, y_range=None, **kwargs):
"Get a `Learner` using `dls`, with `metrics`, including a `TabularModel` created using the remaining params."
if config is None: config = tabular_config()
if layers is None: layers = [200,100]
to = dls.train_ds
emb_szs = get_emb_sz(dls.train_ds, {} if emb_szs is None else emb_szs)
if n_out is None: n_out = get_c(dls)
assert n_out, "`n_out` is not defined, and could not be inferred from data, set `dls.c` or pass `n_out`"
if y_range is None and 'y_range' in config: y_range = config.pop('y_range')
model = TabularModel(emb_szs, len(dls.cont_names), n_out, layers, y_range=y_range, **config)
return TabularLearner(dls, model, **kwargs)
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv')
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
dls = TabularDataLoaders.from_df(df, path, procs=procs, cat_names=cat_names, cont_names=cont_names,
y_names="salary", valid_idx=list(range(800,1000)), bs=64)
learn = tabular_learner(dls)
show_doc(TabularLearner.predict)
row, clas, probs = learn.predict(df.iloc[0])
row.show()
clas, probs
#hide
#test y_range is passed
learn = tabular_learner(dls, y_range=(0,32))
assert isinstance(learn.model.layers[-1], SigmoidRange)
test_eq(learn.model.layers[-1].low, 0)
test_eq(learn.model.layers[-1].high, 32)
learn = tabular_learner(dls, config = tabular_config(y_range=(0,32)))
assert isinstance(learn.model.layers[-1], SigmoidRange)
test_eq(learn.model.layers[-1].low, 0)
test_eq(learn.model.layers[-1].high, 32)
#export
@typedispatch
def show_results(x:Tabular, y:Tabular, samples, outs, ctxs=None, max_n=10, **kwargs):
df = x.all_cols[:max_n]
for n in x.y_names: df[n+'_pred'] = y[n][:max_n].values
display_df(df)
#hide
from nbdev.export import notebook2script
notebook2script()
| 0.672009 | 0.890056 |
# Supervised Learning with Neural Networks
In this tutorial we show how to optimize a neural networks to approximate the state given. In this example, we consider the ground state of the J1-J2 model in one-dimension obtained from ED using NetKet.
The Hamiltonian of the model is given by:
$$ H = \sum_{i=1}^{L} J_{1}\hat{S}_{i} \cdot \hat{S}_{i+1} + J_{2} \hat{S}_{i} \cdot \hat{S}_{i+2} $$
where the sum is over sites of the 1-D chain.
## Outline:
1. Obtain data from ED
2. Choosing the machine (variational ansatz) and the optimizer
3. Defining the Supervised Learning object
4. Running the Supervised Learning
5. Data Visualisation
```
# Import netket library
import netket as nk
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
```
## 1) Obtain data from ED
For a supervised learning problem, we would need to provide the data including input $X$ and output label $Y$. The neural network is asked to learn the mapping $Y=f(X)$. In our case, the input is the spin basis and the output is the coefficient of the corresponding spin basis.
First, we write a simple function to obtain data, i.e. ground state, from exact diagonalization. For detailed explanation see the tutorial for J1-J2 model for example.
```
def load_ed_data(L, J2=0.4):
# Sigma^z*Sigma^z interactions
sigmaz = np.array([[1, 0], [0, -1]])
mszsz = (np.kron(sigmaz, sigmaz))
# Exchange interactions
exchange = np.asarray(
[[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])
# Couplings J1 and J2
J = [1., J2]
mats = []
sites = []
for i in range(L):
for d in [0, 1]:
# \sum_i J*sigma^z(i)*sigma^z(i+d)
mats.append((J[d] * mszsz).tolist())
sites.append([i, (i + d + 1) % L])
# \sum_i J*(sigma^x(i)*sigma^x(i+d) + sigma^y(i)*sigma^y(i+d))
mats.append(((-1.)**(d + 1) * J[d] * exchange).tolist())
sites.append([i, (i + d + 1) % L])
# 1D Lattice
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Spin based Hilbert Space
hi = nk.hilbert.Spin(s=0.5, graph=g)
# Custom Hamiltonian operator
ha = nk.operator.LocalOperator(hi)
for mat, site in zip(mats, sites):
ha += nk.operator.LocalOperator(hi, mat, site)
# Perform Lanczos Exact Diagonalization to get lowest three eigenvalues
res = nk.exact.lanczos_ed(ha, first_n=3, compute_eigenvectors=True)
# Eigenvector
ttargets = []
tsamples = []
for i, visible in enumerate(hi.states()):
# only pick zero-magnetization states
mag = np.sum(visible)
if(np.abs(mag) < 1.0e-4):
tsamples.append(visible.tolist())
ttargets.append([np.log(res.eigenvectors[0][i])])
return hi, tsamples, ttargets
```
After we obtain the result as ``res``, we return the hilbert space ``hi``, the spin basis ``tsamples``, and the coefficients ``ttargets``.
Notice that we restrict ourselves to $\sum S_z = 0$ symmetry sector to simplify the learning.
We now consider a small system $L=10$ and with $J_2 = 0.4$, and obtain the data by calling the function ```load_ed_data```.
```
L = 10
J2 = 0.4
# Load the Hilbert space info and data
hi, training_samples, training_targets = load_ed_data(L, J2)
```
## 2) Choosing the Machine and the Optimizer
For this tutorial, we consider the Restricted Bolzmann Machine ``nk.machine.RbmSpin`` and the AdaDelta optimizer ``nk.optimizer.AdaDelta``.
```
# Machine
ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Optimizer
op = nk.optimizer.AdaDelta()
```
## 3) Defining the Supervised Learning object
We have now have almost everything (machine, optimizer, data) for setting up a supervised learning object. We also need to provide the batch size, ``batch_size``, for stochatic gradient descent. For detail, see https://en.wikipedia.org/wiki/Stochastic_gradient_descent
```
# Supervised learning object
spvsd = nk.supervised.Supervised(
machine=ma,
optimizer=op,
batch_size=400,
samples=training_samples,
targets=training_targets)
```
## 4) Running the Supervised Learning
The very last piece we need for supervised learning is the loss function.
### Loss function
There are different loss functions one could define for the optimization problem, for example:
$$
\begin{align*}
\mathcal{L}_\text{MSE log} &= \frac{1}{N} \sum_{i}^N |\log\Psi(X_i) - \log\Phi(X_i) |^2\\
\mathcal{L}_\text{Overlap} &=-\log\Big[ \frac{\langle{\Psi|\Phi}\rangle\langle{\Phi|\Psi}\rangle}{\langle{\Psi|\Psi}\rangle\langle{\Phi|\Phi}\rangle} \Big] \\
&=- \log\Big( \sum_{i}^N \Psi^*(X_i)\Phi(X_i) \Big) - \log\Big( \sum_{i}^N \Phi^*(X_i)\Psi(X_i) \Big) \\
&\qquad +
\log\Big( \sum_{i}^N \Psi^*(X_i)\Psi(X_i) \Big) +
\log\Big( \sum_{i}^N \Phi^*(X_i)\Phi(X_i) \Big)
\end{align*}
$$
Here, we consider the latter one, which is the negative log of the overlap, as the loss function.
### Gradient estimate
Taking the derivative from overlap errror function above, we have
$$
\begin{equation*}
\partial_k \mathcal{L}_\text{Overlap} = -\frac{\sum_i O_k^*\Psi^*(X_i)\Phi(X_i) }{\sum_i\Psi^*(X_i)\Phi(X_i)} + \frac{\sum_i O_k^*\Psi^*(X_i)\Psi(X_i)}{\sum_i \Psi^*(X_i)\Psi(X_i)}
\end{equation*}
$$
Note that $N$ is the size of the Hilbert space. In general, this could not be computed exactly.
We could estimate this gradient by sampling different distributions,
$$
\begin{equation*}
\hat{\partial_k \mathcal{L}}_\text{Overlap uni} = \frac{\Big\langle O_k^*\Psi^*(X_i)\Psi(X_i)\Big \rangle_{i\sim\text{uni}[1,N]} }{\Big \langle \Psi^*(X_i)\Psi(X_i) \Big \rangle_{i\sim\text{uni}[1,N]}} - \frac{\Big \langle O_k^*\Psi^*(X_i)\Phi(X_i)\Big \rangle_{i\sim\text{uni}[1,N]} }{\Big \langle \Psi^*(X_i)\Phi(X_i) \Big \rangle_{i\sim\text{uni}[1,N]}}
\end{equation*}
$$
$$
\begin{equation*}
\hat{\partial_k \mathcal{L}}_\text{Overlap phi} = \frac{\Big \langle O_k^*(X_i)\frac{\lVert \Psi(X_i)\rVert^2}{\lVert \Phi(X_i)\rVert^2} \Big \rangle_{X_i\sim \lVert \Phi(X_i)\rVert^2 }} {\Big \langle \frac{\lVert \Psi(X_i)\rVert^2}{\lVert \Phi(X_i)\rVert^2} \Big \rangle_{X_i\sim \lVert \Phi(X_i)\rVert^2 }} - \frac{\Big \langle O_k^*(X_i)\frac{ \Psi^*(X_i)}{ \Phi^*(X_i)} \Big \rangle_{X_i\sim \lVert \Phi(X_i)\rVert^2 }}{\Big \langle \frac{ \Psi^*(X_i)}{ \Phi^*(X_i)} \Big \rangle_{X_i\sim \lVert \Phi(X_i)\rVert^2 }}
\end{equation*}
$$
So for the overlap loss function, we have two gradient estimate, one is $\hat{\partial_k \mathcal{L}}_\text{Overlap uni}$, ```Overlap_uni```, and $\hat{\partial_k \mathcal{L}}_\text{Overlap phi}$, ```Overlap_phi```.
We save the loss function every iteration, and save the optimized parameters only every ``save_params_every`` iterations.
```
# Number of iteration
n_iter = 4000
# Run with "Overlap_phi" loss. Also available currently is "MSE, Overlap_uni"
spvsd.run(n_iter=n_iter, loss_function="Overlap_phi",
output_prefix='output', save_params_every=50)
```
## 5) Data Visualisation
We have optimized our machine to approximate the ground state of the J1-J2 model. The results for the loss function are stored in the ".log" file and the optimized parameters in the ".wf" file. The files are all in json format.
```
# Load the data from the .log file
import json
data=json.load(open("output.log"))
# Extract the relevant information
iters=[]
log_overlap=[]
mse=[]
mse_log=[]
data=json.load(open('output.log'))
for iteration in data["Output"]:
iters.append(iteration["Iteration"])
log_overlap.append(iteration["log_overlap"])
mse.append(iteration["mse"])
mse_log.append(iteration["mse_log"])
overlap = np.exp(-np.array(log_overlap))
```
Now, we plot the overlap, i.e. fidelity, with respect to the number of iteration.
```
J2 = 0.4
plt.subplot(2, 1, 1)
plt.title(r'$J_1 J_2$ model, $J_2=' + str(J2) + '$')
plt.ylabel('Overlap = F')
plt.xlabel('Iteration #')
plt.plot(iters, overlap)
plt.axhline(y=1, xmin=0, xmax=iters[-1], linewidth=2, color='k',label='max accuracy = 1')
plt.legend(frameon=False)
plt.subplot(2, 1, 2)
plt.ylabel('Overlap Error = 1-F')
plt.xlabel('Iteration #')
plt.semilogy(iters, 1.-overlap)
plt.show()
```
The result suggests that indeed we could have a good approximate to the state given by supervised learning.
|
github_jupyter
|
# Import netket library
import netket as nk
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
def load_ed_data(L, J2=0.4):
# Sigma^z*Sigma^z interactions
sigmaz = np.array([[1, 0], [0, -1]])
mszsz = (np.kron(sigmaz, sigmaz))
# Exchange interactions
exchange = np.asarray(
[[0, 0, 0, 0], [0, 0, 2, 0], [0, 2, 0, 0], [0, 0, 0, 0]])
# Couplings J1 and J2
J = [1., J2]
mats = []
sites = []
for i in range(L):
for d in [0, 1]:
# \sum_i J*sigma^z(i)*sigma^z(i+d)
mats.append((J[d] * mszsz).tolist())
sites.append([i, (i + d + 1) % L])
# \sum_i J*(sigma^x(i)*sigma^x(i+d) + sigma^y(i)*sigma^y(i+d))
mats.append(((-1.)**(d + 1) * J[d] * exchange).tolist())
sites.append([i, (i + d + 1) % L])
# 1D Lattice
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Spin based Hilbert Space
hi = nk.hilbert.Spin(s=0.5, graph=g)
# Custom Hamiltonian operator
ha = nk.operator.LocalOperator(hi)
for mat, site in zip(mats, sites):
ha += nk.operator.LocalOperator(hi, mat, site)
# Perform Lanczos Exact Diagonalization to get lowest three eigenvalues
res = nk.exact.lanczos_ed(ha, first_n=3, compute_eigenvectors=True)
# Eigenvector
ttargets = []
tsamples = []
for i, visible in enumerate(hi.states()):
# only pick zero-magnetization states
mag = np.sum(visible)
if(np.abs(mag) < 1.0e-4):
tsamples.append(visible.tolist())
ttargets.append([np.log(res.eigenvectors[0][i])])
return hi, tsamples, ttargets
L = 10
J2 = 0.4
# Load the Hilbert space info and data
hi, training_samples, training_targets = load_ed_data(L, J2)
# Machine
ma = nk.machine.RbmSpin(hilbert=hi, alpha=1)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Optimizer
op = nk.optimizer.AdaDelta()
# Supervised learning object
spvsd = nk.supervised.Supervised(
machine=ma,
optimizer=op,
batch_size=400,
samples=training_samples,
targets=training_targets)
# Number of iteration
n_iter = 4000
# Run with "Overlap_phi" loss. Also available currently is "MSE, Overlap_uni"
spvsd.run(n_iter=n_iter, loss_function="Overlap_phi",
output_prefix='output', save_params_every=50)
# Load the data from the .log file
import json
data=json.load(open("output.log"))
# Extract the relevant information
iters=[]
log_overlap=[]
mse=[]
mse_log=[]
data=json.load(open('output.log'))
for iteration in data["Output"]:
iters.append(iteration["Iteration"])
log_overlap.append(iteration["log_overlap"])
mse.append(iteration["mse"])
mse_log.append(iteration["mse_log"])
overlap = np.exp(-np.array(log_overlap))
J2 = 0.4
plt.subplot(2, 1, 1)
plt.title(r'$J_1 J_2$ model, $J_2=' + str(J2) + '$')
plt.ylabel('Overlap = F')
plt.xlabel('Iteration #')
plt.plot(iters, overlap)
plt.axhline(y=1, xmin=0, xmax=iters[-1], linewidth=2, color='k',label='max accuracy = 1')
plt.legend(frameon=False)
plt.subplot(2, 1, 2)
plt.ylabel('Overlap Error = 1-F')
plt.xlabel('Iteration #')
plt.semilogy(iters, 1.-overlap)
plt.show()
| 0.708818 | 0.988369 |
```
import sys
sys.path.append('../../pyutils')
import numpy as np
import scipy.linalg
import torch
import metrics
import utils
np.random.seed(12)
```
$$\frac{\partial}{\partial x} ||x||_2^2 = 2x, x \in \mathbb{R}^n$$
```
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = x@x
ty = torch.dot(tx, tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = 2 * x
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
```
$$\frac{\partial}{\partial x} ||x||_1 = sign(x), x \in \mathbb{R}^n$$
```
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.linalg.norm(x, ord=1)
ty = torch.norm(tx, p=1)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.sign(x)
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
```
$$\frac{\partial}{\partial x} \sum_{x=1}^n x_i = \mathbb{1}, x \in \mathbb{R}^n$$
```
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.sum(x)
ty = torch.sum(tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.ones((x.shape[0]))
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
```
$$x, y \in \mathbb{R}^n$$
$$\frac{\partial x^Ty}{\partial x} = y$$
$$\frac{\partial x^Ty}{\partial y} = x$$
```
x = np.random.randn(14)
y = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
z = x @ y
tz = torch.dot(tx, ty)
tz.backward()
print(z)
print(tz.data.numpy())
print(metrics.tdist(z, tz.data.numpy()))
dx = y
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
dy = x
dy_sol = ty.grad.data.numpy()
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
```
$$x \in \mathbb{R}^n, \space M \in \mathbb{R}^{n*n} \text{ symetric}$$
$$\frac{\partial x^TMx}{\partial x} = 2Mx$$
```
x = np.random.randn(3)
M = np.random.randn(3, 3)
M = M.T @ M
tx = torch.tensor(x, requires_grad=True)
tM = torch.tensor(M, requires_grad=True)
z = x @ M @ x
tz = torch.matmul(torch.matmul(tx, tM), tx)
tz.backward()
dx = 2 * M @ x
print(dx)
print(tx.grad.data.numpy())
print(metrics.tdist(dx, tx.grad.data.numpy()))
```
$$z = c * x, \space x \in \mathbb{R^n}, c \in \mathbb{R}$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{z} * c$$
$$\frac{\partial E}{\partial c} = \frac{\partial E}{z}^T x$$
```
x = np.random.randn(14)
c = np.array(2.3)
z = c * x
e = z.T @ z
tx = torch.tensor(x, requires_grad=True)
tc = torch.tensor(c, requires_grad=True)
tz = tc * tx
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
```
$$z = x^Ty, \space x, y, z \in \mathbb{R}^n$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{\partial z} * y$$
$$\frac{\partial E}{\partial y} = \frac{\partial E}{\partial z} * x$$
```
x = np.random.randn(14)
y = np.random.randn(14)
z = x @ y
e = z**2
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.dot(tx, ty)
te = tz**2
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dx = dz * y
dy = dz * x
dx_sol = tx.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
```
$$z = Xy, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^m, z \in \mathbb{R}^n$$
$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial z} y^T$$
$$\frac{\partial E}{\partial y} = X^T \frac{\partial E}{\partial z}$$
```
X = np.random.randn(7, 3)
y = np.random.randn(3)
z = X @ y
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(tX, ty)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(dz, y)
dy = X.T @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
```
$$z = y^TX, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^n, z \in \mathbb{R}^m$$
$$\frac{\partial E}{\partial X} = y^T\frac{\partial E}{\partial z}$$
$$\frac{\partial E}{\partial y} = X \frac{\partial E}{\partial z}$$
```
X = np.random.randn(7, 3)
y = np.random.randn(7)
z = y @ X
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(ty, tX)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(y, dz)
dy = X @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
```
$$Z = XY, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^{m*p}, z \in \mathbb{R}^{n*p}$$
$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial Z}Y^T$$
$$\frac{\partial E}{\partial Y} = X^T \frac{\partial E}{\partial Z}$$
```
X = np.random.randn(7, 3)
Y = np.random.randn(3, 2)
Z = X @ Y
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.matmul(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ @ Y.T
dY = X.T @ dZ
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
```
$$Z = X^TX, \space X \in \mathbb{R}^{n*m}, Z \in \mathbb{R}^{m*m}$$
$$\frac{\partial E}{\partial X} = X(\frac{\partial E}{\partial Z} + \frac{\partial E}{\partial Z}^T)$$
```
X = np.random.randn(5, 3)
Z = X.T @ X
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.matmul(torch.transpose(tX, 1, 0), tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = X @ (dZ + dZ.T)
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
```
$Z_I = f(X_I)$, with $Z$ and $X$ tensors of same size, $f: \mathbb{R} \rightarrow \mathbb{R}$
$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * f'(X_I)$$
```
X = np.random.randn(5, 3)
Z = np.cos(X)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.cos(tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * (-np.sin(X))
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
```
$Z_I = f(X_I, Y_I)$, with $Z$, $X$and $Y$ tensors of same size, $f: \mathbb{R}*\mathbb{R} \rightarrow \mathbb{R}$
$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial X_I}$$
$$\frac{\partial E}{\partial Y_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial Y_I}$$
```
X = np.random.rand(7, 3) + 0.1
Y = np.random.randn(7, 3)
Z = np.power(X, Y)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.pow(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * Y * np.power(X, Y-1)
dY = dZ * np.log(X) * np.power(X, Y)
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
```
Every tensor sum of an axis can be transformed into a 3D-tensor sum on axis 1, using only reshape.
$$X \in \mathbb{R}^{m * n * p}, Y \in \mathbb{R}^{m * p}$$
$y$ is the sum of $X$ on axis $2$.
$$Y_{ik} = \sum_{j=i}^n X_{ijk}$$
$$\frac{\partial E}{\partial X_{ijk}} = \frac{\partial E}{\partial Y_{ik}}$$
```
def prod(x):
res = 1
for v in x: res *= v
return res
def sum_axis(X, axis):
shape3 = (prod(X.shape[:axis]), X.shape[axis], prod(X.shape[axis+1:]))
final_shape = X.shape[:axis] + X.shape[axis+1:]
return np.sum(X.reshape(shape3), axis=1).reshape(final_shape)
X = np.random.randn(2, 4, 3, 7)
s = [sum_axis(X, i) for i in range(4)]
tX = torch.tensor(X, requires_grad = True)
s_sol = [torch.sum(tX, i) for i in range(4)]
for i in range(4):
print(s[i].shape)
print(s_sol[i].data.numpy().shape)
print(metrics.tdist(s[i], s_sol[i].data.numpy()))
def my_expand_dims3(x, size):
y = np.empty((x.shape[0], size, x.shape[1]))
for i in range(x.shape[0]):
for j in range(size):
for k in range(x.shape[1]):
y[i, j, k] = x[i, k]
return y
def dsum_axis(X, axis, dout):
dout = dout.reshape((prod(X.shape[:axis]), prod(X.shape[axis+1:])))
return my_expand_dims3(dout, X.shape[axis]).reshape(X.shape)
a = np.array([[1, 2, 3], [4, 5, 6]])
a2 = my_expand_dims3(a, 2)
print(a2)
for i in range(4):
ds = 2 * s[i]
dX = dsum_axis(X, i, ds)
si_flat = s_sol[i].view(-1)
tz = torch.dot(si_flat, si_flat)
tz.backward()
dX_sol = tX.grad.data.numpy()
print(dX.shape)
print(dX_sol.shape)
print(metrics.tdist(dX, dX_sol))
tX.grad.data.zero_()
```
## Derivatives sheet
$$(c)' = 0, \space c \in \mathbb{R}$$
$$(x)' = 1, \space x \in \mathbb{R}$$
$$(cx)' = c, \space c, x \in \mathbb{R}$$
$$(e^x)' = e^x, \space x \in \mathbb{R}$$
$$(ln(x))' = \frac{1}{x}, \space x \in \mathbb{R}$$
$$(\frac{1}{x})' = - \frac{1}{x^2}, \space x \in \mathbb{R}$$
$$(cos(x))' = -sin(x), \space x \in \mathbb{R}$$
$$(sin(x))' = cos(x), \space x \in \mathbb{R}$$
$$(cosh(x))' = -sinh(x), \space x \in \mathbb{R}$$
$$(sinh(x))' = cos(x), \space x \in \mathbb{R}$$
$$(tanh(x))' = 1 - tanh(x)^2, \space x \in \mathbb{R}$$
$$(\sigma(x))' = \sigma(x)(1 - \sigma(x)), \space x \in \mathbb{R}$$
$$\frac{\partial}{\partial x} x^y = y*x^{y-1}, \space x, y \in \mathbb{R}$$
$$\frac{\partial}{\partial y} x^y = ln(x)*x^{y}, \space x, y \in \mathbb{R}$$
$$\frac{\partial}{\partial x} ||x||_2^2 = 2x, x \in \mathbb{R}^n$$
$$\frac{\partial}{\partial x} \sum_{x=1}^n x_i = \mathbb{1}, x \in \mathbb{R}^n$$
$$z = ||x||_1, \space x \in \mathbb{R^n}$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{z} * sgn(x)$$
$$z = c + x, \space x, z \in \mathbb{R^n}, c \in \mathbb{R}$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{z}$$
$$\frac{\partial E}{\partial c} = \sum_{j=1}^n \frac{\partial E}{\partial z_i}$$
$$z = c * x, \space x \in \mathbb{R^n}, c \in \mathbb{R}$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{\partial z} * c$$
$$\frac{\partial E}{\partial c} = \frac{\partial E}{\partial z}^T x$$
$$z = c / x, \space x \in \mathbb{R^n}, c \in \mathbb{R}$$
$$\frac{\partial E}{\partial x} = -c * \frac{\partial E}{\partial z} / (x*x)$$
$$\frac{\partial E}{\partial c} = \frac{\partial E}{\partial z}^T \frac{1}{x}$$
$$z = \sum_{i=1}^n x_i, \space x \in \mathbb{R^n}, z \in \mathbb{R}$$
$$\frac{\partial E}{\partial x_i} = \frac{\partial E}{z}$$
$$x, y \in \mathbb{R}^n$$
$$\frac{\partial x^Ty}{\partial x} = y$$
$$\frac{\partial x^Ty}{\partial y} = x$$
$$x \in \mathbb{R}^n, \space M \in \mathbb{R}^{n*n} \text{ symetric}$$
$$\frac{\partial x^TMx}{\partial x} = 2Mx$$
$$z = x^Ty, \space x, y, z \in \mathbb{R}^n$$
$$\frac{\partial E}{\partial x} = \frac{\partial E}{\partial z} * y$$
$$\frac{\partial E}{\partial y} = \frac{\partial E}{\partial z} * x$$
$$z = Xy, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^m, z \in \mathbb{R}^n$$
$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial z} y^T$$
$$\frac{\partial E}{\partial y} = X^T \frac{\partial E}{\partial z}$$
$$z = y^TX, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^n, z \in \mathbb{R}^m$$
$$\frac{\partial E}{\partial X} = y^T\frac{\partial E}{\partial z}$$
$$\frac{\partial E}{\partial y} = X \frac{\partial E}{\partial z}$$
$$Z = XY, \space x \in \mathbb{R}^{n*m}, y \in \mathbb{R}^{m*p}, z \in \mathbb{R}^{n*p}$$
$$\frac{\partial E}{\partial X} = \frac{\partial E}{\partial Z}Y^T$$
$$\frac{\partial E}{\partial Y} = X^T \frac{\partial E}{\partial Z}$$
$$Z = X^TX, \space X \in \mathbb{R}^{n*m}, Z \in \mathbb{R}^{m*m}$$
$$\frac{\partial E}{\partial X} = X(\frac{\partial E}{\partial Z} + \frac{\partial E}{\partial Z}^T)$$
$Z_I = f(X_I)$, with $Z$ and $X$ tensors of same size, $f: \mathbb{R} \rightarrow \mathbb{R}$
$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * f'(X_I)$$
$Z_I = f(X_I, Y_I)$, with $Z$, $X$and $Y$ tensors of same size, $f: \mathbb{R}*\mathbb{R} \rightarrow \mathbb{R}$
$$\frac{\partial E}{\partial X_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial X_I}$$
$$\frac{\partial E}{\partial Y_I} = \frac{\partial E}{\partial Z_I} * \frac{\partial f(X_I, Y_I)}{\partial Y_I}$$
$x \in \mathbb{R}^n$, and $S_i$ = softmax$(x)_i$
$$\frac{\partial S_i}{x_j} = S_i(1 - S_j) \space (i = j)$$
$$\frac{\partial S_i}{x_j} = -S_iS_j \space (i \neq j)$$
|
github_jupyter
|
import sys
sys.path.append('../../pyutils')
import numpy as np
import scipy.linalg
import torch
import metrics
import utils
np.random.seed(12)
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = x@x
ty = torch.dot(tx, tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = 2 * x
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.linalg.norm(x, ord=1)
ty = torch.norm(tx, p=1)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.sign(x)
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
x = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
y = np.sum(x)
ty = torch.sum(tx)
ty.backward()
print(y)
print(ty.data.numpy())
print(metrics.tdist(y, ty.data.numpy()))
dx = np.ones((x.shape[0]))
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
x = np.random.randn(14)
y = np.random.randn(14)
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
z = x @ y
tz = torch.dot(tx, ty)
tz.backward()
print(z)
print(tz.data.numpy())
print(metrics.tdist(z, tz.data.numpy()))
dx = y
dx_sol = tx.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
dy = x
dy_sol = ty.grad.data.numpy()
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
x = np.random.randn(3)
M = np.random.randn(3, 3)
M = M.T @ M
tx = torch.tensor(x, requires_grad=True)
tM = torch.tensor(M, requires_grad=True)
z = x @ M @ x
tz = torch.matmul(torch.matmul(tx, tM), tx)
tz.backward()
dx = 2 * M @ x
print(dx)
print(tx.grad.data.numpy())
print(metrics.tdist(dx, tx.grad.data.numpy()))
x = np.random.randn(14)
c = np.array(2.3)
z = c * x
e = z.T @ z
tx = torch.tensor(x, requires_grad=True)
tc = torch.tensor(c, requires_grad=True)
tz = tc * tx
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
x = np.random.randn(14)
y = np.random.randn(14)
z = x @ y
e = z**2
tx = torch.tensor(x, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.dot(tx, ty)
te = tz**2
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dx = dz * y
dy = dz * x
dx_sol = tx.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dx)
print(dx_sol)
print(metrics.tdist(dx, dx_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
X = np.random.randn(7, 3)
y = np.random.randn(3)
z = X @ y
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(tX, ty)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(dz, y)
dy = X.T @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
X = np.random.randn(7, 3)
y = np.random.randn(7)
z = y @ X
e = z @ z
tX = torch.tensor(X, requires_grad=True)
ty = torch.tensor(y, requires_grad=True)
tz = torch.matmul(ty, tX)
te = torch.dot(tz, tz)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dz = 2 * z
dX = np.outer(y, dz)
dy = X @ dz
dX_sol = tX.grad.data.numpy()
dy_sol = ty.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dy)
print(dy_sol)
print(metrics.tdist(dy, dy_sol))
X = np.random.randn(7, 3)
Y = np.random.randn(3, 2)
Z = X @ Y
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.matmul(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ @ Y.T
dY = X.T @ dZ
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
X = np.random.randn(5, 3)
Z = X.T @ X
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.matmul(torch.transpose(tX, 1, 0), tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = X @ (dZ + dZ.T)
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
X = np.random.randn(5, 3)
Z = np.cos(X)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tZ = torch.cos(tX)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * (-np.sin(X))
dX_sol = tX.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
X = np.random.rand(7, 3) + 0.1
Y = np.random.randn(7, 3)
Z = np.power(X, Y)
Z_flat = Z.reshape(-1)
e = Z_flat @ Z_flat
tX = torch.tensor(X, requires_grad=True)
tY = torch.tensor(Y, requires_grad=True)
tZ = torch.pow(tX, tY)
tZ_flat = tZ.view(-1)
te = torch.dot(tZ_flat, tZ_flat)
te.backward()
print(e)
print(te.data.numpy())
print(metrics.tdist(e, te.data.numpy()))
dZ_flat = 2 * Z_flat
dZ = dZ_flat.reshape(Z.shape[0], Z.shape[1])
dX = dZ * Y * np.power(X, Y-1)
dY = dZ * np.log(X) * np.power(X, Y)
dX_sol = tX.grad.data.numpy()
dY_sol = tY.grad.data.numpy()
print(dX)
print(dX_sol)
print(metrics.tdist(dX, dX_sol))
print(dY)
print(dY_sol)
print(metrics.tdist(dY, dY_sol))
def prod(x):
res = 1
for v in x: res *= v
return res
def sum_axis(X, axis):
shape3 = (prod(X.shape[:axis]), X.shape[axis], prod(X.shape[axis+1:]))
final_shape = X.shape[:axis] + X.shape[axis+1:]
return np.sum(X.reshape(shape3), axis=1).reshape(final_shape)
X = np.random.randn(2, 4, 3, 7)
s = [sum_axis(X, i) for i in range(4)]
tX = torch.tensor(X, requires_grad = True)
s_sol = [torch.sum(tX, i) for i in range(4)]
for i in range(4):
print(s[i].shape)
print(s_sol[i].data.numpy().shape)
print(metrics.tdist(s[i], s_sol[i].data.numpy()))
def my_expand_dims3(x, size):
y = np.empty((x.shape[0], size, x.shape[1]))
for i in range(x.shape[0]):
for j in range(size):
for k in range(x.shape[1]):
y[i, j, k] = x[i, k]
return y
def dsum_axis(X, axis, dout):
dout = dout.reshape((prod(X.shape[:axis]), prod(X.shape[axis+1:])))
return my_expand_dims3(dout, X.shape[axis]).reshape(X.shape)
a = np.array([[1, 2, 3], [4, 5, 6]])
a2 = my_expand_dims3(a, 2)
print(a2)
for i in range(4):
ds = 2 * s[i]
dX = dsum_axis(X, i, ds)
si_flat = s_sol[i].view(-1)
tz = torch.dot(si_flat, si_flat)
tz.backward()
dX_sol = tX.grad.data.numpy()
print(dX.shape)
print(dX_sol.shape)
print(metrics.tdist(dX, dX_sol))
tX.grad.data.zero_()
| 0.329392 | 0.923936 |
## Generate Spark ML Decision Tree
### Setup SparkSession
```
from pyspark.sql import SparkSession
sparkSession = SparkSession.builder.getOrCreate()
```
### Load Training Dataset from S3 into Spark
```
data = sparkSession.read.format("csv") \
.option("inferSchema", "true").option("header", "true") \
.load("s3a://datapalooza/R/census.csv")
data.head()
```
### Build Spark ML Pipeline with Decision Tree Classifier
```
from pyspark.ml import Pipeline
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import DecisionTreeClassifier
formula = RFormula(formula = "income ~ .")
classifier = DecisionTreeClassifier()
pipeline = Pipeline(stages = [formula, classifier])
pipelineModel = pipeline.fit(data)
print(pipelineModel)
print(pipelineModel.stages[1].toDebugString)
```
## Convert Spark ML Pipeline to PMML
```
from jpmml import toPMMLBytes
pmmlBytes = toPMMLBytes(sparkSession, data, pipelineModel)
print(pmmlBytes.decode("utf-8"))
```
## Deployment Option 1: Mutable Model Deployment
```
from urllib import request
update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml/pmml_census'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = request.Request(update_url, headers=update_headers, data=pmmlBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
```
### GCP: Deploy New Model to Live, Running Model Server
```
from urllib import request
update_url = 'http://prediction-pmml-gcp.demo.pipeline.io/update-pmml/pmml_census'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = request.Request(update_url, headers=update_headers, data=pmmlBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
```
## Predict on New Data
### AWS
```
from urllib import request
evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml/pmml_census'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"age":39,"workclass":"State-gov","education":"Bachelors","education_num":13,"marital_status":"Never-married","occupation":"Adm-clerical","relationship":"Not-in-family","race":"White","sex":"Male","capital_gain":2174,"capital_loss":0,"hours_per_week":40,"native_country":"United-States"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return valid classification with probabilities
```
### GCP
```
from urllib import request
import json
evaluate_url = 'http://prediction-pmml-gcp.demo.pipeline.io/evaluate-pmml/pmml_census'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"age":39,"workclass":"State-gov","education":"Bachelors","education_num":13,"marital_status":"Never-married","occupation":"Adm-clerical","relationship":"Not-in-family","race":"White","sex":"Male","capital_gain":2174,"capital_loss":0,"hours_per_week":40,"native_country":"United-States"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return valid classification with probabilities
```
### [View of Prediction Services](http://hystrix.demo.pipeline.io/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22Predictions%20-%20AWS%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-aws.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Predictions%20-%20GCP%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-gcp.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D)
```
from IPython.display import display, HTML
html = '<iframe width=100% height=500px src="http://hystrix.demo.pipeline.io/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22Predictions%20-%20AWS%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-aws.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Predictions%20-%20GCP%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-gcp.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D">'
display(HTML(html))
```
## Deployment Option 2: Immutable Model Deployment
### Save Model to Disk
```
with open('/root/pipeline/prediction.ml/pmml/data/pmml_census/pmml_census.pmml', 'wb') as f:
f.write(pmmlBytes)
!cat /root/pipeline/prediction.ml/pmml/data/pmml_census/pmml_census.pmml
```
### Commit to Github and Trigger Canary Model Deployment
### Monitor Canary Model Deployment
```
from IPython.display import display, HTML
html = '<iframe width=100% height=500px src="http://airflow.demo.pipeline.io">'
display(HTML(html))
```
|
github_jupyter
|
from pyspark.sql import SparkSession
sparkSession = SparkSession.builder.getOrCreate()
data = sparkSession.read.format("csv") \
.option("inferSchema", "true").option("header", "true") \
.load("s3a://datapalooza/R/census.csv")
data.head()
from pyspark.ml import Pipeline
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import DecisionTreeClassifier
formula = RFormula(formula = "income ~ .")
classifier = DecisionTreeClassifier()
pipeline = Pipeline(stages = [formula, classifier])
pipelineModel = pipeline.fit(data)
print(pipelineModel)
print(pipelineModel.stages[1].toDebugString)
from jpmml import toPMMLBytes
pmmlBytes = toPMMLBytes(sparkSession, data, pipelineModel)
print(pmmlBytes.decode("utf-8"))
from urllib import request
update_url = 'http://prediction-pmml-aws.demo.pipeline.io/update-pmml/pmml_census'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = request.Request(update_url, headers=update_headers, data=pmmlBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
from urllib import request
update_url = 'http://prediction-pmml-gcp.demo.pipeline.io/update-pmml/pmml_census'
update_headers = {}
update_headers['Content-type'] = 'application/xml'
req = request.Request(update_url, headers=update_headers, data=pmmlBytes)
resp = request.urlopen(req)
print(resp.status) # Should return Http Status 200
from urllib import request
evaluate_url = 'http://prediction-pmml-aws.demo.pipeline.io/evaluate-pmml/pmml_census'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"age":39,"workclass":"State-gov","education":"Bachelors","education_num":13,"marital_status":"Never-married","occupation":"Adm-clerical","relationship":"Not-in-family","race":"White","sex":"Male","capital_gain":2174,"capital_loss":0,"hours_per_week":40,"native_country":"United-States"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return valid classification with probabilities
from urllib import request
import json
evaluate_url = 'http://prediction-pmml-gcp.demo.pipeline.io/evaluate-pmml/pmml_census'
evaluate_headers = {}
evaluate_headers['Content-type'] = 'application/json'
input_params = '{"age":39,"workclass":"State-gov","education":"Bachelors","education_num":13,"marital_status":"Never-married","occupation":"Adm-clerical","relationship":"Not-in-family","race":"White","sex":"Male","capital_gain":2174,"capital_loss":0,"hours_per_week":40,"native_country":"United-States"}'
encoded_input_params = input_params.encode('utf-8')
req = request.Request(evaluate_url, headers=evaluate_headers, data=encoded_input_params)
resp = request.urlopen(req)
print(resp.read()) # Should return valid classification with probabilities
from IPython.display import display, HTML
html = '<iframe width=100% height=500px src="http://hystrix.demo.pipeline.io/hystrix-dashboard/monitor/monitor.html?streams=%5B%7B%22name%22%3A%22Predictions%20-%20AWS%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-aws.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%2C%7B%22name%22%3A%22Predictions%20-%20GCP%22%2C%22stream%22%3A%22http%3A%2F%2Fturbine-gcp.demo.pipeline.io%2Fturbine.stream%22%2C%22auth%22%3A%22%22%2C%22delay%22%3A%22%22%7D%5D">'
display(HTML(html))
with open('/root/pipeline/prediction.ml/pmml/data/pmml_census/pmml_census.pmml', 'wb') as f:
f.write(pmmlBytes)
!cat /root/pipeline/prediction.ml/pmml/data/pmml_census/pmml_census.pmml
from IPython.display import display, HTML
html = '<iframe width=100% height=500px src="http://airflow.demo.pipeline.io">'
display(HTML(html))
| 0.578329 | 0.775732 |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/search_by_buffer_distance.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/search_by_buffer_distance.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=FeatureCollection/search_by_buffer_distance.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/search_by_buffer_distance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
Map.setCenter(-122.45, 37.75, 13)
bart = ee.FeatureCollection('ft:1xCCZkVn8DIkB7i7RVkvsYWxAxsdsQZ6SbD9PCXw')
parks = ee.FeatureCollection('ft:10KC6VfBWMUvNcuxU7mbSEg__F_4UVe9uDkCldBw')
buffered_bart = bart.map(lambda f: f.buffer(2000))
join_filter = ee.Filter.withinDistance(2000, '.geo', None, '.geo')
close_parks = ee.Join.simple().apply(parks, bart, join_filter)
Map.addLayer(buffered_bart, {'color': 'b0b0b0'}, "BART Stations")
Map.addLayer(close_parks, {'color': '008000'}, "Parks")
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
|
github_jupyter
|
# %%capture
# !pip install earthengine-api
# !pip install geehydro
import ee
import folium
import geehydro
# ee.Authenticate()
ee.Initialize()
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
Map.setCenter(-122.45, 37.75, 13)
bart = ee.FeatureCollection('ft:1xCCZkVn8DIkB7i7RVkvsYWxAxsdsQZ6SbD9PCXw')
parks = ee.FeatureCollection('ft:10KC6VfBWMUvNcuxU7mbSEg__F_4UVe9uDkCldBw')
buffered_bart = bart.map(lambda f: f.buffer(2000))
join_filter = ee.Filter.withinDistance(2000, '.geo', None, '.geo')
close_parks = ee.Join.simple().apply(parks, bart, join_filter)
Map.addLayer(buffered_bart, {'color': 'b0b0b0'}, "BART Stations")
Map.addLayer(close_parks, {'color': '008000'}, "Parks")
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 0.331877 | 0.956309 |
## Mixture Density Networks (Gaussian Mixture) for inverted sinusoidal function
### 0. Global seeding
```
from lagom import set_global_seeds
set_global_seeds(seed=0)
```
### 1. Data generation
```
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
class Data(Dataset):
r"""Generate a set of data point of an inverted sinusoidal function.
i.e. y(x) = 7sin(0.75x) + 0.5x + eps, eps~N(0, 1)
Then we ask the neural networks to predict x given y, in __getitem__().
In this case, the classic NN suffers due to only one output given input.
To address it, one can use Mixture Density Networks.
"""
def __init__(self, n):
self.n = n
self.x, self.y = self._generate_data(self.n)
def _generate_data(self, n):
eps = np.random.randn(n)
x = np.random.uniform(low=-10.5, high=10.5, size=n)
y = 7*np.sin(0.75*x) + 0.5*x + eps
return np.float32(x), np.float32(y) # Enforce the dtype to be float32, i.e. FloatTensor in PyTorch
def __len__(self):
return self.n
def __getitem__(self, index):
# Retrieve the x, y value
x = self.x[index]
y = self.y[index]
# Keep array shape due to scalar value
x = np.array([x], dtype=np.float32)
y = np.array([y], dtype=np.float32)
return y, x
import seaborn as sns
sns.set()
data = Data(n=1000)
sns.scatterplot(data.y, data.x, alpha=0.3)
```
### 2. Make MDN network
```
import torch.nn as nn
from lagom.core.networks import make_fc
from lagom.core.networks import ortho_init
from lagom.core.networks import BaseMDN
class MDN(BaseMDN):
def make_feature_layers(self, config):
out = make_fc(input_dim=1, hidden_sizes=[15, 15])
last_dim = 15
return out, last_dim
def make_mdn_heads(self, config, last_dim):
out = {}
num_density = 20
data_dim = 1
out['unnormalized_pi_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['mu_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['logvar_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['num_density'] = num_density
out['data_dim'] = data_dim
return out
def init_params(self, config):
for layer in self.feature_layers:
ortho_init(layer, nonlinearity='tanh', constant_bias=0.0)
ortho_init(self.unnormalized_pi_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.mu_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.logvar_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
def feature_forward(self, x):
for layer in self.feature_layers:
x = torch.tanh(layer(x))
return x
```
### 2. Training
```
import torch.optim as optim
D = Data(n=2500)
train_loader = DataLoader(D, batch_size=64)
device = torch.device('cuda')
model = MDN()
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=5e-3)
for i in range(1000):
model.train()
losses = []
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
log_pi, mu, std = model(data)
loss = model.MDN_loss(log_pi=log_pi,
mu=mu,
std=std,
target=target)
loss.backward()
losses.append(loss.item())
optimizer.step()
if i == 0 or (i+1)%100 == 0:
#IPython.display.clear_output(wait=True)
print(f'Epoch: {i+1}\t Loss: {np.mean(losses)}')
```
### 3. Evaluation
```
import matplotlib.pyplot as plt
sns.set()
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6*3, 4*2))
# Temperature: controls the uncertainty.
# Larger temperature leads to larger uncertainty.
list_tau = [0.1, 0.5, 0.8, 1.0, 1.5, 2.0]
test_data = torch.tensor(np.linspace(-15, 15, num=1000, dtype=np.float32), device=device).unsqueeze(1)
with torch.no_grad(): # disable gradient computation, save memory
model.eval() # evaluation mode
log_pi, mu, std = model(test_data.to(device))
for tau, ax in zip(list_tau, axes.reshape(-1)):
samples = model.sample(log_pi=log_pi, mu=mu, std=std, tau=tau, _fast_code=True)
samples.detach().cpu().numpy()
ax.scatter(D.y, D.x, alpha=0.2)
ax.scatter(test_data.detach().cpu().numpy(), samples.detach().cpu().numpy(), alpha=0.2, color='red')
offset = 2
ax.set_xlim(-15 - offset, 15 + offset)
ax.set_ylim(-10 - offset, 10 + offset)
ax.set_title(r'$\tau$: ' + str(tau))
#fig.savefig('samples.png')
#fig
```
|
github_jupyter
|
from lagom import set_global_seeds
set_global_seeds(seed=0)
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
class Data(Dataset):
r"""Generate a set of data point of an inverted sinusoidal function.
i.e. y(x) = 7sin(0.75x) + 0.5x + eps, eps~N(0, 1)
Then we ask the neural networks to predict x given y, in __getitem__().
In this case, the classic NN suffers due to only one output given input.
To address it, one can use Mixture Density Networks.
"""
def __init__(self, n):
self.n = n
self.x, self.y = self._generate_data(self.n)
def _generate_data(self, n):
eps = np.random.randn(n)
x = np.random.uniform(low=-10.5, high=10.5, size=n)
y = 7*np.sin(0.75*x) + 0.5*x + eps
return np.float32(x), np.float32(y) # Enforce the dtype to be float32, i.e. FloatTensor in PyTorch
def __len__(self):
return self.n
def __getitem__(self, index):
# Retrieve the x, y value
x = self.x[index]
y = self.y[index]
# Keep array shape due to scalar value
x = np.array([x], dtype=np.float32)
y = np.array([y], dtype=np.float32)
return y, x
import seaborn as sns
sns.set()
data = Data(n=1000)
sns.scatterplot(data.y, data.x, alpha=0.3)
import torch.nn as nn
from lagom.core.networks import make_fc
from lagom.core.networks import ortho_init
from lagom.core.networks import BaseMDN
class MDN(BaseMDN):
def make_feature_layers(self, config):
out = make_fc(input_dim=1, hidden_sizes=[15, 15])
last_dim = 15
return out, last_dim
def make_mdn_heads(self, config, last_dim):
out = {}
num_density = 20
data_dim = 1
out['unnormalized_pi_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['mu_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['logvar_head'] = nn.Linear(in_features=last_dim, out_features=num_density*data_dim)
out['num_density'] = num_density
out['data_dim'] = data_dim
return out
def init_params(self, config):
for layer in self.feature_layers:
ortho_init(layer, nonlinearity='tanh', constant_bias=0.0)
ortho_init(self.unnormalized_pi_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.mu_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
ortho_init(self.logvar_head, nonlinearity=None, weight_scale=0.01, constant_bias=0.0)
def feature_forward(self, x):
for layer in self.feature_layers:
x = torch.tanh(layer(x))
return x
import torch.optim as optim
D = Data(n=2500)
train_loader = DataLoader(D, batch_size=64)
device = torch.device('cuda')
model = MDN()
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=5e-3)
for i in range(1000):
model.train()
losses = []
for data, target in train_loader:
data = data.to(device)
target = target.to(device)
optimizer.zero_grad()
log_pi, mu, std = model(data)
loss = model.MDN_loss(log_pi=log_pi,
mu=mu,
std=std,
target=target)
loss.backward()
losses.append(loss.item())
optimizer.step()
if i == 0 or (i+1)%100 == 0:
#IPython.display.clear_output(wait=True)
print(f'Epoch: {i+1}\t Loss: {np.mean(losses)}')
import matplotlib.pyplot as plt
sns.set()
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6*3, 4*2))
# Temperature: controls the uncertainty.
# Larger temperature leads to larger uncertainty.
list_tau = [0.1, 0.5, 0.8, 1.0, 1.5, 2.0]
test_data = torch.tensor(np.linspace(-15, 15, num=1000, dtype=np.float32), device=device).unsqueeze(1)
with torch.no_grad(): # disable gradient computation, save memory
model.eval() # evaluation mode
log_pi, mu, std = model(test_data.to(device))
for tau, ax in zip(list_tau, axes.reshape(-1)):
samples = model.sample(log_pi=log_pi, mu=mu, std=std, tau=tau, _fast_code=True)
samples.detach().cpu().numpy()
ax.scatter(D.y, D.x, alpha=0.2)
ax.scatter(test_data.detach().cpu().numpy(), samples.detach().cpu().numpy(), alpha=0.2, color='red')
offset = 2
ax.set_xlim(-15 - offset, 15 + offset)
ax.set_ylim(-10 - offset, 10 + offset)
ax.set_title(r'$\tau$: ' + str(tau))
#fig.savefig('samples.png')
#fig
| 0.894637 | 0.910823 |
```
%matplotlib inline
import os
import random
import cPickle as pickle
import numpy as np
import matplotlib.pyplot
from matplotlib.pyplot import imshow
import keras
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
from keras.models import Model
from sklearn.decomposition import PCA
from scipy.spatial import distance
from tqdm import tqdm
model = keras.applications.VGG16(weights='imagenet', include_top=True)
model.summary()
# get_image will return a handle to the image itself, and a numpy array of its pixels to input the network
def get_image(path):
img = image.load_img(path, target_size=model.input_shape[1:3])
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return img, x
img, x = get_image("../images/scaled/0a219a4771fe880388022ef07092de91.jpg")
predictions = model.predict(x)
imshow(img)
for pred in decode_predictions(predictions)[0]:
print("predicted %s with probability %0.3f" % (pred[1], pred[2]))
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
feat_extractor.summary()
type(feat[0])
img, x = get_image("../images/scaled/0a219a4771fe880388022ef07092de91.jpg")
feat = feat_extractor.predict(x)
matplotlib.pyplot.figure(figsize=(16,4))
matplotlib.pyplot.plot(feat[0])
matplotlib.pyplot.show()
feat[0]
images_path = '../images/scaled'
max_num_images = 10000
images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(images_path) for f in filenames if os.path.splitext(f)[1].lower() in ['.jpg','.png','.jpeg']]
if max_num_images < len(images):
images = [images[i] for i in sorted(random.sample(xrange(len(images)), max_num_images))]
print("keeping %d images to analyze" % len(images))
features = []
for image_path in tqdm(images):
img, x = get_image(image_path);
feat = feat_extractor.predict(x)[0]
features.append(feat)
features = np.array(features)
pca = PCA(n_components=300)
pca.fit(features)
pca_features = pca.transform(features)
def get_closest_images(query_image_idx, num_results=5):
distances = [ distance.euclidean(pca_features[query_image_idx], feat) for feat in pca_features ]
idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])[1:num_results+1]
return idx_closest
def get_concatenated_images(indexes, thumb_height):
thumbs = []
for idx in indexes:
img = image.load_img(images[idx])
img = img.resize((int(img.width * thumb_height / img.height), thumb_height))
thumbs.append(img)
concat_image = np.concatenate([np.asarray(t) for t in thumbs], axis=1)
return concat_image
# do a query on a random image
query_image_idx = int(len(images) * random.random())
idx_closest = get_closest_images(query_image_idx)
query_image = get_concatenated_images([query_image_idx], 100)
results_image = get_concatenated_images(idx_closest, 80)
# display the query image
matplotlib.pyplot.figure(figsize = (4,4))
imshow(query_image)
matplotlib.pyplot.title("query image (%d)" % query_image_idx)
# display the resulting images
matplotlib.pyplot.figure(figsize = (8,6))
imshow(results_image)
matplotlib.pyplot.title("result images")
pickle.dump([images, pca_features], open('../output-ml4a/features_pca_base.p', 'wb'))
features = np.array(features)
pca2 = PCA(n_components=3)
pca2.fit(features)
pca_features2 = pca2.transform(features)
def get_image_path_between(query_image_idx_1, query_image_idx_2, num_hops=4):
path = [query_image_idx_1, query_image_idx_2]
for hop in range(num_hops-1):
t = float(hop+1) / num_hops
lerp_acts = t * pca_features2[query_image_idx_1] + (1.0-t) * pca_features2[query_image_idx_2]
distances = [distance.euclidean(lerp_acts, feat) for feat in pca_features2]
idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])
path.insert(1, [i for i in idx_closest if i not in path][0])
return path
# pick image and number of hops
num_hops = 6
query_image_idx_1 = int(len(images) * random.random())
query_image_idx_2 = int(len(images) * random.random())
# get path
path = get_image_path_between(query_image_idx_1, query_image_idx_2, num_hops)
# draw image
path_image = get_concatenated_images(path, 200)
matplotlib.pyplot.figure(figsize = (16,12))
imshow(path_image)
matplotlib.pyplot.title("result images")
```
|
github_jupyter
|
%matplotlib inline
import os
import random
import cPickle as pickle
import numpy as np
import matplotlib.pyplot
from matplotlib.pyplot import imshow
import keras
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
from keras.models import Model
from sklearn.decomposition import PCA
from scipy.spatial import distance
from tqdm import tqdm
model = keras.applications.VGG16(weights='imagenet', include_top=True)
model.summary()
# get_image will return a handle to the image itself, and a numpy array of its pixels to input the network
def get_image(path):
img = image.load_img(path, target_size=model.input_shape[1:3])
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return img, x
img, x = get_image("../images/scaled/0a219a4771fe880388022ef07092de91.jpg")
predictions = model.predict(x)
imshow(img)
for pred in decode_predictions(predictions)[0]:
print("predicted %s with probability %0.3f" % (pred[1], pred[2]))
feat_extractor = Model(inputs=model.input, outputs=model.get_layer("fc2").output)
feat_extractor.summary()
type(feat[0])
img, x = get_image("../images/scaled/0a219a4771fe880388022ef07092de91.jpg")
feat = feat_extractor.predict(x)
matplotlib.pyplot.figure(figsize=(16,4))
matplotlib.pyplot.plot(feat[0])
matplotlib.pyplot.show()
feat[0]
images_path = '../images/scaled'
max_num_images = 10000
images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(images_path) for f in filenames if os.path.splitext(f)[1].lower() in ['.jpg','.png','.jpeg']]
if max_num_images < len(images):
images = [images[i] for i in sorted(random.sample(xrange(len(images)), max_num_images))]
print("keeping %d images to analyze" % len(images))
features = []
for image_path in tqdm(images):
img, x = get_image(image_path);
feat = feat_extractor.predict(x)[0]
features.append(feat)
features = np.array(features)
pca = PCA(n_components=300)
pca.fit(features)
pca_features = pca.transform(features)
def get_closest_images(query_image_idx, num_results=5):
distances = [ distance.euclidean(pca_features[query_image_idx], feat) for feat in pca_features ]
idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])[1:num_results+1]
return idx_closest
def get_concatenated_images(indexes, thumb_height):
thumbs = []
for idx in indexes:
img = image.load_img(images[idx])
img = img.resize((int(img.width * thumb_height / img.height), thumb_height))
thumbs.append(img)
concat_image = np.concatenate([np.asarray(t) for t in thumbs], axis=1)
return concat_image
# do a query on a random image
query_image_idx = int(len(images) * random.random())
idx_closest = get_closest_images(query_image_idx)
query_image = get_concatenated_images([query_image_idx], 100)
results_image = get_concatenated_images(idx_closest, 80)
# display the query image
matplotlib.pyplot.figure(figsize = (4,4))
imshow(query_image)
matplotlib.pyplot.title("query image (%d)" % query_image_idx)
# display the resulting images
matplotlib.pyplot.figure(figsize = (8,6))
imshow(results_image)
matplotlib.pyplot.title("result images")
pickle.dump([images, pca_features], open('../output-ml4a/features_pca_base.p', 'wb'))
features = np.array(features)
pca2 = PCA(n_components=3)
pca2.fit(features)
pca_features2 = pca2.transform(features)
def get_image_path_between(query_image_idx_1, query_image_idx_2, num_hops=4):
path = [query_image_idx_1, query_image_idx_2]
for hop in range(num_hops-1):
t = float(hop+1) / num_hops
lerp_acts = t * pca_features2[query_image_idx_1] + (1.0-t) * pca_features2[query_image_idx_2]
distances = [distance.euclidean(lerp_acts, feat) for feat in pca_features2]
idx_closest = sorted(range(len(distances)), key=lambda k: distances[k])
path.insert(1, [i for i in idx_closest if i not in path][0])
return path
# pick image and number of hops
num_hops = 6
query_image_idx_1 = int(len(images) * random.random())
query_image_idx_2 = int(len(images) * random.random())
# get path
path = get_image_path_between(query_image_idx_1, query_image_idx_2, num_hops)
# draw image
path_image = get_concatenated_images(path, 200)
matplotlib.pyplot.figure(figsize = (16,12))
imshow(path_image)
matplotlib.pyplot.title("result images")
| 0.571408 | 0.53692 |
<a href="https://colab.research.google.com/github/AWH-GlobalPotential-X/AWH-Geo/blob/master/notebooks/Population_noSMDW.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This tool calculates the world population without safely managed drinking water (SMDW) based on official estimates from the JMP of WHO.
This tool requires a [Google Drive](https://drive.google.com/drive/my-drive) and [Earth Engine Account](https://developers.google.com/earth-engine/)
Click "Connect" at the top right of this notebook.
Then run each of the code blocks below, following instructions.
```
#@title Basic setup and earthengine access.
print('Welcome to the Population without SMDW tool')
# import, authenticate, then initialize EarthEngine module ee
# https://developers.google.com/earth-engine/python_install#package-import
import ee
print('Make sure the EE version is v0.1.215 or greater...')
print('Current EE version = v' + ee.__version__)
print('')
ee.Authenticate()
ee.Initialize()
worldGeo = ee.Geometry.Polygon( # Created for some masking and geo calcs
coords=[[-180,-90],[-180,0],[-180,90],[-30,90],[90,90],[180,90],
[180,0],[180,-90],[30,-90],[-90,-90],[-180,-90]],
geodesic=False,
proj='EPSG:4326'
)
#@title Click to run calculation
"""
This script creates an image representing the best-estimate distribution of the
world's 2.2 billion people without SMDW. It applies a weight to a high-resolution
population image based on the % proportion of people without SMDW from JMP
surveys, created in the JMP_GeoProcessor (on FigShare) and exported as the
JMP GeoFabric.
Results are created using population data from WorldPop 2017 top-down residential
population counts (1km res).
First, the JMP GeoFabric is converted into an image, then weighted across the pop image.
"""
jmpGeofabric = ee.FeatureCollection('users/awhgeoglobal/jmpGeofabric')
# JMP GeoFabric converted to image
jmpGeofabric_image = jmpGeofabric.reduceToImage(
properties=['WTRST_PER'],
reducer=ee.Reducer.first()
).rename('WTRST_PER')
# population calcs
# World Pop version
worldPop_2017 = ee.Image('users/awhgeoglobal/ppp_2017_1km_Aggregated')
worldPop_2017_scale = worldPop_2017.projection().nominalScale()
# calculate water starved population (100 - jmp_safelyManaged) * population of grid cell
pop_noSMDW = worldPop_2017.multiply(jmpGeofabric_image.select(['WTRST_PER']
).divide(100)).int().rename('pop_noSMDW')
# log of above
popLog_noSMDW = pop_noSMDW.add(1).log10()
# Export pop no SMDW WorldPop (default)
task1 = ee.batch.Export.image.toDrive(
image=ee.Image(pop_noSMDW),
region=worldGeo,
scale=worldPop_2017_scale.getInfo(),
crs='EPSG:4326',
maxPixels=1e12,
# folder='', # write Drive folder here if desired
description='pop_noSMDW',
fileNamePrefix='pop_noSMDW',
fileFormat='GeoTIFF'
)
task1.start()
# Export log of pop no SMDW
task2 = ee.batch.Export.image.toDrive(
image=ee.Image(popLog_noSMDW),
region=worldGeo,
scale=worldPop_2017_scale.getInfo(),
crs='EPSG:4326',
maxPixels=1e12,
# folder='', # write Drive folder here if desired
description='popLog_noSMDW',
fileNamePrefix='popLog_noSMDW',
fileFormat='GeoTIFF'
)
task2.start()
```
Go to https://code.earthengine.google.com/ to track task progress.
This will save as GeoTIFF named "pop_noSMDW" and "popLog_noSMDW" in your root [Google Drive folder](https://drive.google.com/drive/my-drive).
|
github_jupyter
|
#@title Basic setup and earthengine access.
print('Welcome to the Population without SMDW tool')
# import, authenticate, then initialize EarthEngine module ee
# https://developers.google.com/earth-engine/python_install#package-import
import ee
print('Make sure the EE version is v0.1.215 or greater...')
print('Current EE version = v' + ee.__version__)
print('')
ee.Authenticate()
ee.Initialize()
worldGeo = ee.Geometry.Polygon( # Created for some masking and geo calcs
coords=[[-180,-90],[-180,0],[-180,90],[-30,90],[90,90],[180,90],
[180,0],[180,-90],[30,-90],[-90,-90],[-180,-90]],
geodesic=False,
proj='EPSG:4326'
)
#@title Click to run calculation
"""
This script creates an image representing the best-estimate distribution of the
world's 2.2 billion people without SMDW. It applies a weight to a high-resolution
population image based on the % proportion of people without SMDW from JMP
surveys, created in the JMP_GeoProcessor (on FigShare) and exported as the
JMP GeoFabric.
Results are created using population data from WorldPop 2017 top-down residential
population counts (1km res).
First, the JMP GeoFabric is converted into an image, then weighted across the pop image.
"""
jmpGeofabric = ee.FeatureCollection('users/awhgeoglobal/jmpGeofabric')
# JMP GeoFabric converted to image
jmpGeofabric_image = jmpGeofabric.reduceToImage(
properties=['WTRST_PER'],
reducer=ee.Reducer.first()
).rename('WTRST_PER')
# population calcs
# World Pop version
worldPop_2017 = ee.Image('users/awhgeoglobal/ppp_2017_1km_Aggregated')
worldPop_2017_scale = worldPop_2017.projection().nominalScale()
# calculate water starved population (100 - jmp_safelyManaged) * population of grid cell
pop_noSMDW = worldPop_2017.multiply(jmpGeofabric_image.select(['WTRST_PER']
).divide(100)).int().rename('pop_noSMDW')
# log of above
popLog_noSMDW = pop_noSMDW.add(1).log10()
# Export pop no SMDW WorldPop (default)
task1 = ee.batch.Export.image.toDrive(
image=ee.Image(pop_noSMDW),
region=worldGeo,
scale=worldPop_2017_scale.getInfo(),
crs='EPSG:4326',
maxPixels=1e12,
# folder='', # write Drive folder here if desired
description='pop_noSMDW',
fileNamePrefix='pop_noSMDW',
fileFormat='GeoTIFF'
)
task1.start()
# Export log of pop no SMDW
task2 = ee.batch.Export.image.toDrive(
image=ee.Image(popLog_noSMDW),
region=worldGeo,
scale=worldPop_2017_scale.getInfo(),
crs='EPSG:4326',
maxPixels=1e12,
# folder='', # write Drive folder here if desired
description='popLog_noSMDW',
fileNamePrefix='popLog_noSMDW',
fileFormat='GeoTIFF'
)
task2.start()
| 0.433981 | 0.907271 |
# Simulations of _E. coli_
This notebook contains simple tests of the E. coli model `iML1515` both in regular and ecModel format.
Benjamín J. Sánchez, 2019-10-15
## 1. Loading models
* Metabolic model: https://github.com/SysBioChalmers/ecModels/blob/chore/updateiML1515/eciML1515/model/eciML1515.xml
* ecModel: https://github.com/SysBioChalmers/ecModels/blob/chore/updateiML1515/eciML1515/model/eciML1515.xml
(temporal, eventually they will be available in the master branch)
```
import cobra
import os
import wget
# Metabolic model:
wget.download("https://raw.githubusercontent.com/SysBioChalmers/ecModels/chore/updateiML1515/eciML1515/model/iML1515.xml", "iML1515.xml", bar=False)
model = cobra.io.read_sbml_model("iML1515.xml")
os.remove("iML1515.xml")
# Enzyme-constrained model:
os.remove("eciML1515.xml")
wget.download("https://raw.githubusercontent.com/SysBioChalmers/ecModels/chore/updateiML1515/eciML1515/model/eciML1515.xml", "eciML1515.xml", bar=False)
ecModel = cobra.io.read_sbml_model("eciML1515.xml")
os.remove("eciML1515.xml")
```
## 2. Simulating models
### 2.1. Simulating the metabolic model
```
model.objective.expression.args[0]
model.solver.timeout = 30
solution = model.optimize()
solution
model.summary()
for reaction in model.reactions:
if len(reaction.metabolites) == 1 and solution.fluxes[reaction.id] < 0:
print(reaction.id + ": " + str(solution.fluxes[reaction.id]))
```
Note that in total 16 metabolites are consumed: oxygen, glucose, ammonia, phosphate, sulphate and 11 minerals.
### 2.2. Simulating the enzyme-constrained model _as-is_
```
ecModel.objective.expression.args[0]
ecModel.solver.timeout = 30
ecModel.optimize()
```
### 2.3 Fixing the enzyme-constrained model
cobrapy cannot handle upper bounds = `Inf`, therefore we need to replace them with `1000` (standard in the field):
```
import math
for reaction in ecModel.reactions:
if math.isinf(reaction.upper_bound):
reaction.upper_bound = 1000
ecModel.optimize()
```
Pretty high objective function -> we need to lower the upper bound of glucose uptake:
```
ecModel.reactions.EX_glc__D_e_REV.upper_bound = 10
ecModel.optimize()
```
Nothing changed -> let's look at the summary of in/out fluxes:
```
ecModel.summary()
```
We see that there are many uptake fluxes fully unconstrained. Let's fix all of them to zero except for the original 16:
```
for reaction in ecModel.reactions:
if len(reaction.metabolites) == 1 and reaction.name.endswith(" (reversible)"):
reaction.lower_bound = 0
reaction.upper_bound = 0
ecModel.reactions.EX_glc__D_e_REV.upper_bound = 10 #glucose will be limiting
ecModel.reactions.EX_pi_e_REV.upper_bound = 1000
ecModel.reactions.EX_mn2_e_REV.upper_bound = 1000
ecModel.reactions.EX_fe2_e_REV.upper_bound = 1000
ecModel.reactions.EX_zn2_e_REV.upper_bound = 1000
ecModel.reactions.EX_mg2_e_REV.upper_bound = 1000
ecModel.reactions.EX_ca2_e_REV.upper_bound = 1000
ecModel.reactions.EX_ni2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cu2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cobalt2_e_REV.upper_bound = 1000
ecModel.reactions.EX_nh4_e_REV.upper_bound = 1000
ecModel.reactions.EX_mobd_e_REV.upper_bound = 1000
ecModel.reactions.EX_so4_e_REV.upper_bound = 1000
ecModel.reactions.EX_k_e_REV.upper_bound = 1000
ecModel.reactions.EX_o2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cl_e_REV.upper_bound = 1000
ecModel.optimize()
```
Success!!
## 3. Correcting model fields:
```
# Metabolite ids: remove the trailing "[comp]" (if any):
for metabolite in ecModel.metabolites:
trail = "[" + metabolite.compartment + "]"
if metabolite.id.endswith(trail):
metabolite.id = metabolite.id.split(trail)[0]
```
## 4. Model export
```
#Save model as .xml & .json:
cobra.io.write_sbml_model(ecModel, "eciML1515.xml")
cobra.io.save_json_model(ecModel, "eciML1515.json")
```
## 5. Changing GAM
```
print(model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M'))
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.atp_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.h2o_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.adp_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.h_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.pi_c: -10.0})
print(model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M'))
```
|
github_jupyter
|
import cobra
import os
import wget
# Metabolic model:
wget.download("https://raw.githubusercontent.com/SysBioChalmers/ecModels/chore/updateiML1515/eciML1515/model/iML1515.xml", "iML1515.xml", bar=False)
model = cobra.io.read_sbml_model("iML1515.xml")
os.remove("iML1515.xml")
# Enzyme-constrained model:
os.remove("eciML1515.xml")
wget.download("https://raw.githubusercontent.com/SysBioChalmers/ecModels/chore/updateiML1515/eciML1515/model/eciML1515.xml", "eciML1515.xml", bar=False)
ecModel = cobra.io.read_sbml_model("eciML1515.xml")
os.remove("eciML1515.xml")
model.objective.expression.args[0]
model.solver.timeout = 30
solution = model.optimize()
solution
model.summary()
for reaction in model.reactions:
if len(reaction.metabolites) == 1 and solution.fluxes[reaction.id] < 0:
print(reaction.id + ": " + str(solution.fluxes[reaction.id]))
ecModel.objective.expression.args[0]
ecModel.solver.timeout = 30
ecModel.optimize()
import math
for reaction in ecModel.reactions:
if math.isinf(reaction.upper_bound):
reaction.upper_bound = 1000
ecModel.optimize()
ecModel.reactions.EX_glc__D_e_REV.upper_bound = 10
ecModel.optimize()
ecModel.summary()
for reaction in ecModel.reactions:
if len(reaction.metabolites) == 1 and reaction.name.endswith(" (reversible)"):
reaction.lower_bound = 0
reaction.upper_bound = 0
ecModel.reactions.EX_glc__D_e_REV.upper_bound = 10 #glucose will be limiting
ecModel.reactions.EX_pi_e_REV.upper_bound = 1000
ecModel.reactions.EX_mn2_e_REV.upper_bound = 1000
ecModel.reactions.EX_fe2_e_REV.upper_bound = 1000
ecModel.reactions.EX_zn2_e_REV.upper_bound = 1000
ecModel.reactions.EX_mg2_e_REV.upper_bound = 1000
ecModel.reactions.EX_ca2_e_REV.upper_bound = 1000
ecModel.reactions.EX_ni2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cu2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cobalt2_e_REV.upper_bound = 1000
ecModel.reactions.EX_nh4_e_REV.upper_bound = 1000
ecModel.reactions.EX_mobd_e_REV.upper_bound = 1000
ecModel.reactions.EX_so4_e_REV.upper_bound = 1000
ecModel.reactions.EX_k_e_REV.upper_bound = 1000
ecModel.reactions.EX_o2_e_REV.upper_bound = 1000
ecModel.reactions.EX_cl_e_REV.upper_bound = 1000
ecModel.optimize()
# Metabolite ids: remove the trailing "[comp]" (if any):
for metabolite in ecModel.metabolites:
trail = "[" + metabolite.compartment + "]"
if metabolite.id.endswith(trail):
metabolite.id = metabolite.id.split(trail)[0]
#Save model as .xml & .json:
cobra.io.write_sbml_model(ecModel, "eciML1515.xml")
cobra.io.save_json_model(ecModel, "eciML1515.json")
print(model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M'))
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.atp_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.h2o_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.adp_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.h_c: -10.0})
model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M').add_metabolites({model.metabolites.pi_c: -10.0})
print(model.reactions.get_by_id('BIOMASS_Ec_iML1515_core_75p37M'))
| 0.347316 | 0.792544 |
<a href="https://colab.research.google.com/github/Arthurads-rj/machine-learning-portf-lio/blob/main/ML1_Notas_de_Alunos.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Uso do algoritmo Linear Regression
Nesse projeto, foi usado o algoritmo de regressão linear para fazer as previsões das **notas finais** de alunos em uma escola.
Esse algoritmo foi escolhido por conta da situação e facilidade de uso. Como eu estava mexendo só com números inteiros, esse algoritmo é o mais indicado.
```
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
import matplotlib.pyplot as pyplot
from google.colab import drive
drive.mount('/content/drive')
```
Depois de importar as bibliotecas necessárias, usei o comando describe() para saber se tinha valores faltando na tabela, além de outras curiosidades interessantes que talvez fossem vir a ser úteis para a análise de dados.
```
dados = pd.read_csv('../content/drive/MyDrive/Planilhas/student-mat.csv')
dados.describe()
```
E aqui, usei o comando head() para ver as cinco primeiras linhas da tabela para ter uma noção dos dados apresentados.
Tenham em mente que G1, G2 E G3 é, respectivamente, Grade 1 (Nota 1), Grade 2 (Nota 2) e Grade 3 (Nota 3).
```
dados.head()
```
Aqui esta todo o código utilizado. Usei features que achei que seriam relevantes para a máquina prever como o tempo de estudo, a primeira e a segunda nota, falhas, tempo livre e faltas.
no final, a precisão das previsões ficaram com uma media de 77%~87%.
20% dos dados da tabela foram usados para treinar a máquina para as previsões e os resultados podem ser arredondados.
Na parte de Notas, é para se ler como:
Notas: [G1], [G2], [Tempo de estudo], [Falhas], [Faltas], [Tempo Livre]
```
dados_features = ['G1', 'G2', 'studytime', 'failures', 'absences', 'freetime']
prever = 'G3'
#features usados
X = np.array(dados[dados_features])
y = np.array(dados[prever])
x_treino, x_teste, y_treino, y_teste = sklearn.model_selection.train_test_split(X, y, test_size=0.2)
linear = linear_model.LinearRegression()
linear.fit(x_treino, y_treino)
prec = linear.score(x_teste, y_teste)
print(f'Precisão: {prec:.0%}')
previsoes = linear.predict(x_teste)
for x in range(len(previsoes)):
print(f'Previsão: {previsoes[x]:.2f}, Notas: {x_teste[x]}, Resultado final: {y_teste[x]}')
print()
```
# Gráficos
Logo abaixo, temos gráficos mostrando a correlação entre tempo de estudo, tempo livre, faltas e falhas com a nota final.
```
pyplot.scatter(dados['G1'], dados['G3'])
pyplot.xlabel('Primeira Nota')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['G2'], dados['G3'])
pyplot.xlabel('Segunda Nota')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['studytime'], dados['G3'])
pyplot.xlabel('Tempo de estudo')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['failures'], dados['G3'])
pyplot.xlabel('Falhas')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['absences'], dados['G3'])
pyplot.xlabel('Faltas')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['freetime'], dados['G3'])
pyplot.xlabel('Tempo Livre')
pyplot.ylabel('Nota final')
pyplot.show()
```
Vimos que:
* Alunos com o maior tempo de estudo tem tendência a conseguir a nota máxima.
* Alunos com grande numero de falhas não conseguiriam uma boa nota final.
* Alunos com uma média de duas(2) horas de tempo livre teriam um bom desempenho.
* Alunos com poucas faltas teriam um melhor desempenho que aqueles com muitas faltas.
|
github_jupyter
|
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
import matplotlib.pyplot as pyplot
from google.colab import drive
drive.mount('/content/drive')
dados = pd.read_csv('../content/drive/MyDrive/Planilhas/student-mat.csv')
dados.describe()
dados.head()
dados_features = ['G1', 'G2', 'studytime', 'failures', 'absences', 'freetime']
prever = 'G3'
#features usados
X = np.array(dados[dados_features])
y = np.array(dados[prever])
x_treino, x_teste, y_treino, y_teste = sklearn.model_selection.train_test_split(X, y, test_size=0.2)
linear = linear_model.LinearRegression()
linear.fit(x_treino, y_treino)
prec = linear.score(x_teste, y_teste)
print(f'Precisão: {prec:.0%}')
previsoes = linear.predict(x_teste)
for x in range(len(previsoes)):
print(f'Previsão: {previsoes[x]:.2f}, Notas: {x_teste[x]}, Resultado final: {y_teste[x]}')
print()
pyplot.scatter(dados['G1'], dados['G3'])
pyplot.xlabel('Primeira Nota')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['G2'], dados['G3'])
pyplot.xlabel('Segunda Nota')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['studytime'], dados['G3'])
pyplot.xlabel('Tempo de estudo')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['failures'], dados['G3'])
pyplot.xlabel('Falhas')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['absences'], dados['G3'])
pyplot.xlabel('Faltas')
pyplot.ylabel('Nota final')
pyplot.show()
pyplot.scatter(dados['freetime'], dados['G3'])
pyplot.xlabel('Tempo Livre')
pyplot.ylabel('Nota final')
pyplot.show()
| 0.402157 | 0.962813 |
# Model quantization
If you plan on using this implementation, please cite our work (\url{https://www.sciencedirect.com/science/article/pii/S0141933119302844}):
@article{Nalepa2020MaM,
author = {Jakub Nalepa and Marek Antoniak and
Michal Myller and Pablo {Ribalta Lorenzo} and Michal Marcinkiewicz},
title = {Towards resource-frugal deep convolutional neural
networks for hyperspectral image segmentation},
journal = {Microprocessors and Microsystems},
volume = {73},
pages = {102994},
year = {2020},
issn = {0141-9331},
doi = {https://doi.org/10.1016/j.micpro.2020.102994},
url = {https://www.sciencedirect.com/science/article/pii/S0141933119302844}}
To perform model quantization, we use the Xillinx DNNDK tool (https://www.xilinx.com/support/documentation/sw_manuals/ai_inference/v1_6/ug1327-dnndk-user-guide.pdf).
```
import os
import sys
sys.path.append(os.path.dirname(os.getcwd()))
import os
import subprocess
import tensorflow as tf
from scripts.quantization import evaluate_graph, freeze_model
from scripts import prepare_data, artifacts_reporter, train_model, evaluate_model
from ml_intuition.data.utils import plot_training_curve, show_statistics
DEST_PATH = 'xillinx_model_compilation_results'
DATA_FILE_PATH = os.path.join(os.path.dirname(os.getcwd()), 'datasets/pavia/pavia.npy')
GT_FILE_PAT = os.path.join(os.path.dirname(os.getcwd()), 'datasets/pavia/pavia_gt.npy')
experiment_dest_path = os.path.join(DEST_PATH, 'experiment_0')
data_path = os.path.join(experiment_dest_path, 'data.h5')
os.makedirs(experiment_dest_path, exist_ok=True)
```
# Prepare the data
To fit into the the pipeline, the data has to be preprocessed. It is achieved by the `prepare_data.main` function. It accepts a path to a `.npy` file with the original cube as well as the corresponding ground truth. In this example, we randomly extract 250 samples from each class (balanced scenario), use 10% of them as validation set, and extract only spectral information of a pixel. The returned object is a dictionary with three keys: `train`, `test` and `val`. Each of them contains an additional dictionary with `data` and `labels` keys, holding corresponding `numpy.ndarray` objects with the data. For more details about the parameters, refer to the documentation of `prepare_data.main` function (located in `scripts/prepare_data`).
```
prepare_data.main(data_file_path=DATA_FILE_PATH,
ground_truth_path=GT_FILE_PAT,
output_path=data_path,
train_size=250,
val_size=0.1,
stratified=True,
background_label=0,
channels_idx=2,
neighborhood_size=None,
save_data=True,
seed=0)
```
# Train the model
The function `train_model.train` executed the trainig procedure. Trained model will be stored under `experiment_dest_path` folder path.
```
train_model.train(model_name='model_2d',
kernel_size=5,
n_kernels=200,
n_layers=1,
dest_path=experiment_dest_path,
data=data_path,
sample_size=103,
n_classes=9,
lr=0.001,
batch_size=128,
epochs=200,
verbose=0,
shuffle=True,
patience=15,
noise=[],
noise_sets=[])
plot_training_curve(os.path.join(experiment_dest_path, "training_metrics.csv"), ['val_loss', 'val_acc', 'loss', 'acc'])
```
# Evaluate full precision model
Evaluate performance of the model in full precision to later compare to the quantized one.
```
evaluate_model.evaluate(
model_path=os.path.join(experiment_dest_path, 'model_2d'),
data=data_path,
dest_path=experiment_dest_path,
n_classes=9,
batch_size=1024,
noise=[],
noise_sets=[])
tf.keras.backend.clear_session()
show_statistics(os.path.join(experiment_dest_path, "inference_metrics.csv"))
```
# Freeze model
Freeze the tensorflow model into the `.pb` format.
```
freeze_model.main(model_path=os.path.join(experiment_dest_path, 'model_2d'),
output_dir=experiment_dest_path)
```
# Quantize the model
Perform the quantization by running the `quantize.sh` bash script with appropriate parameters. It executes the `decent_q` command from the Xillinx DNNDK library. The output is the `quantize_eval_model.pb` file and a `deploy_model.pb` file, which should be used for compilation for a specific DPU.
```
node_names_file = os.path.join(experiment_dest_path, 'freeze_input_output_node_name.json')
frozen_graph_path = os.path.join(experiment_dest_path, 'frozen_graph.pb')
cmd = '../scripts/quantize.sh ' + node_names_file + ' ' \
+ frozen_graph_path + ' ' + data_path + ' ' + \
'?,103,1,1' + ' ' + \
'ml_intuition.data.input_fn.calibrate_2d_input' + ' ' + \
'128' + ' ' + experiment_dest_path + \
' ' + str(0)
f = open(os.path.join(experiment_dest_path, 'call_output.txt'),'w')
env = os.environ.copy()
env['PYTHONPATH'] = os.path.dirname(os.getcwd())
subprocess.call(cmd, shell=True, env=env, stderr=f)
f.close()
```
# Evaluate the quantized model (graph)
Evaluate the performance of the quantized model to check whether there was any loss in performance. Results for the graph are stored in `inference_graph_metrics.csv`.
```
graph_path = os.path.join(experiment_dest_path, 'quantize_eval_model.pb')
evaluate_graph.main(graph_path=graph_path,
node_names_path=node_names_file,
dataset_path=data_path,
batch_size=1024)
tf.keras.backend.clear_session()
show_statistics(os.path.join(experiment_dest_path, "inference_graph_metrics.csv"))
```
|
github_jupyter
|
import os
import sys
sys.path.append(os.path.dirname(os.getcwd()))
import os
import subprocess
import tensorflow as tf
from scripts.quantization import evaluate_graph, freeze_model
from scripts import prepare_data, artifacts_reporter, train_model, evaluate_model
from ml_intuition.data.utils import plot_training_curve, show_statistics
DEST_PATH = 'xillinx_model_compilation_results'
DATA_FILE_PATH = os.path.join(os.path.dirname(os.getcwd()), 'datasets/pavia/pavia.npy')
GT_FILE_PAT = os.path.join(os.path.dirname(os.getcwd()), 'datasets/pavia/pavia_gt.npy')
experiment_dest_path = os.path.join(DEST_PATH, 'experiment_0')
data_path = os.path.join(experiment_dest_path, 'data.h5')
os.makedirs(experiment_dest_path, exist_ok=True)
prepare_data.main(data_file_path=DATA_FILE_PATH,
ground_truth_path=GT_FILE_PAT,
output_path=data_path,
train_size=250,
val_size=0.1,
stratified=True,
background_label=0,
channels_idx=2,
neighborhood_size=None,
save_data=True,
seed=0)
train_model.train(model_name='model_2d',
kernel_size=5,
n_kernels=200,
n_layers=1,
dest_path=experiment_dest_path,
data=data_path,
sample_size=103,
n_classes=9,
lr=0.001,
batch_size=128,
epochs=200,
verbose=0,
shuffle=True,
patience=15,
noise=[],
noise_sets=[])
plot_training_curve(os.path.join(experiment_dest_path, "training_metrics.csv"), ['val_loss', 'val_acc', 'loss', 'acc'])
evaluate_model.evaluate(
model_path=os.path.join(experiment_dest_path, 'model_2d'),
data=data_path,
dest_path=experiment_dest_path,
n_classes=9,
batch_size=1024,
noise=[],
noise_sets=[])
tf.keras.backend.clear_session()
show_statistics(os.path.join(experiment_dest_path, "inference_metrics.csv"))
freeze_model.main(model_path=os.path.join(experiment_dest_path, 'model_2d'),
output_dir=experiment_dest_path)
node_names_file = os.path.join(experiment_dest_path, 'freeze_input_output_node_name.json')
frozen_graph_path = os.path.join(experiment_dest_path, 'frozen_graph.pb')
cmd = '../scripts/quantize.sh ' + node_names_file + ' ' \
+ frozen_graph_path + ' ' + data_path + ' ' + \
'?,103,1,1' + ' ' + \
'ml_intuition.data.input_fn.calibrate_2d_input' + ' ' + \
'128' + ' ' + experiment_dest_path + \
' ' + str(0)
f = open(os.path.join(experiment_dest_path, 'call_output.txt'),'w')
env = os.environ.copy()
env['PYTHONPATH'] = os.path.dirname(os.getcwd())
subprocess.call(cmd, shell=True, env=env, stderr=f)
f.close()
graph_path = os.path.join(experiment_dest_path, 'quantize_eval_model.pb')
evaluate_graph.main(graph_path=graph_path,
node_names_path=node_names_file,
dataset_path=data_path,
batch_size=1024)
tf.keras.backend.clear_session()
show_statistics(os.path.join(experiment_dest_path, "inference_graph_metrics.csv"))
| 0.354545 | 0.799364 |
## Classify Radio Signals from Space with Keras
### Import Libraries
```
from livelossplot.tf_keras import PlotLossesCallback
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import numpy as np
np.random.seed(42)
import warnings;warnings.simplefilter('ignore')
%matplotlib inline
print('Tensorflow version:', tf.__version__)
```
### Load and Preprocess SETI Data
```
train_images = pd.read_csv('dataset/train/images.csv', header=None)
train_labels = pd.read_csv('dataset/train/labels.csv', header=None)
val_images = pd.read_csv('dataset/validation/images.csv', header=None)
val_labels = pd.read_csv('dataset/validation/labels.csv', header=None)
train_images.head()
train_labels.head()
print("Training set shape:", train_images.shape, train_labels.shape)
print("Validation set shape:", val_images.shape, val_labels.shape)
x_train = train_images.values.reshape(3200, 64, 128, 1)
x_val = val_images.values.reshape(800, 64, 128, 1)
y_train = train_labels.values
y_val = val_labels.values
```
### Plot 2D Spectrograms
```
plt.figure(0, figsize=(12,12))
for i in range(1,4):
plt.subplot(1,3,i)
img = np.squeeze(x_train[np.random.randint(0, x_train.shape[0])])
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.imshow(np.squeeze(x_train[3]), cmap="gray");
```
### Create Training and Validation Data Generators
```
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen_train = ImageDataGenerator(horizontal_flip=True)
datagen_train.fit(x_train)
datagen_val = ImageDataGenerator(horizontal_flip=True)
datagen_val.fit(x_val)
```
### Creating the CNN Model
```
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
# Initialising the CNN
model = Sequential()
# 1st Convolution
model.add(Conv2D(32,(5,5), padding='same', input_shape=(64, 128,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(64,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(4, activation='softmax'))
initial_learning_rate = 0.005
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=5,
decay_rate=0.96,
staircase=True)
optimizer = Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
```
### Training the Model
```
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_loss',
save_weights_only=True, mode='min', verbose=0)
callbacks = [PlotLossesCallback(), checkpoint] #reduce_lr]
batch_size = 32
history = model.fit(
datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size,
validation_data = datagen_val.flow(x_val, y_val, batch_size=batch_size, shuffle=True),
validation_steps = len(x_val)//batch_size,
epochs=12,
callbacks=callbacks
)
```
### Model Evaluation
```
model.evaluate(x_val, y_val)
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import seaborn as sns
y_true = np.argmax(y_val, 1)
y_pred = np.argmax(model.predict(x_val), 1)
print(metrics.classification_report(y_true, y_pred))
print("Classification accuracy: %0.6f" % metrics.accuracy_score(y_true, y_pred))
labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"]
ax= plt.subplot()
sns.heatmap(metrics.confusion_matrix(y_true, y_pred, normalize='true'), annot=True, ax = ax, cmap=plt.cm.Blues); #annot=True to annotate cells
# labels, title and ticks
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(labels); ax.yaxis.set_ticklabels(labels);
```
|
github_jupyter
|
from livelossplot.tf_keras import PlotLossesCallback
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import numpy as np
np.random.seed(42)
import warnings;warnings.simplefilter('ignore')
%matplotlib inline
print('Tensorflow version:', tf.__version__)
train_images = pd.read_csv('dataset/train/images.csv', header=None)
train_labels = pd.read_csv('dataset/train/labels.csv', header=None)
val_images = pd.read_csv('dataset/validation/images.csv', header=None)
val_labels = pd.read_csv('dataset/validation/labels.csv', header=None)
train_images.head()
train_labels.head()
print("Training set shape:", train_images.shape, train_labels.shape)
print("Validation set shape:", val_images.shape, val_labels.shape)
x_train = train_images.values.reshape(3200, 64, 128, 1)
x_val = val_images.values.reshape(800, 64, 128, 1)
y_train = train_labels.values
y_val = val_labels.values
plt.figure(0, figsize=(12,12))
for i in range(1,4):
plt.subplot(1,3,i)
img = np.squeeze(x_train[np.random.randint(0, x_train.shape[0])])
plt.xticks([])
plt.yticks([])
plt.imshow(img)
plt.imshow(np.squeeze(x_train[3]), cmap="gray");
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen_train = ImageDataGenerator(horizontal_flip=True)
datagen_train.fit(x_train)
datagen_val = ImageDataGenerator(horizontal_flip=True)
datagen_val.fit(x_val)
from tensorflow.keras.layers import Dense, Input, Dropout,Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
# Initialising the CNN
model = Sequential()
# 1st Convolution
model.add(Conv2D(32,(5,5), padding='same', input_shape=(64, 128,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# 2nd Convolution layer
model.add(Conv2D(64,(5,5), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Flattening
model.add(Flatten())
# Fully connected layer
model.add(Dense(1024))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Dense(4, activation='softmax'))
initial_learning_rate = 0.005
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=5,
decay_rate=0.96,
staircase=True)
optimizer = Adam(learning_rate=lr_schedule)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
checkpoint = ModelCheckpoint("model_weights.h5", monitor='val_loss',
save_weights_only=True, mode='min', verbose=0)
callbacks = [PlotLossesCallback(), checkpoint] #reduce_lr]
batch_size = 32
history = model.fit(
datagen_train.flow(x_train, y_train, batch_size=batch_size, shuffle=True),
steps_per_epoch=len(x_train)//batch_size,
validation_data = datagen_val.flow(x_val, y_val, batch_size=batch_size, shuffle=True),
validation_steps = len(x_val)//batch_size,
epochs=12,
callbacks=callbacks
)
model.evaluate(x_val, y_val)
from sklearn.metrics import confusion_matrix
from sklearn import metrics
import seaborn as sns
y_true = np.argmax(y_val, 1)
y_pred = np.argmax(model.predict(x_val), 1)
print(metrics.classification_report(y_true, y_pred))
print("Classification accuracy: %0.6f" % metrics.accuracy_score(y_true, y_pred))
labels = ["squiggle", "narrowband", "noise", "narrowbanddrd"]
ax= plt.subplot()
sns.heatmap(metrics.confusion_matrix(y_true, y_pred, normalize='true'), annot=True, ax = ax, cmap=plt.cm.Blues); #annot=True to annotate cells
# labels, title and ticks
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(labels); ax.yaxis.set_ticklabels(labels);
| 0.838316 | 0.852568 |
# Net Surgery
Caffe networks can be transformed to your particular needs by editing the model parameters. The data, diffs, and parameters of a net are all exposed in pycaffe.
Roll up your sleeves for net surgery with pycaffe!
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import Image
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# configure plotting
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
```
## Designer Filters
To show how to load, manipulate, and save parameters we'll design our own filters into a simple network that's only a single convolution layer. This net has two blobs, `data` for the input and `conv` for the convolution output and one parameter `conv` for the convolution filter weights and biases.
```
# Load the net, list its data and params, and filter an example image.
caffe.set_mode_cpu()
net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))
# load image and prepare as a single input batch for Caffe
im = np.array(Image.open('images/cat_gray.jpg'))
plt.title("original image")
plt.imshow(im)
plt.axis('off')
im_input = im[np.newaxis, np.newaxis, :, :]
net.blobs['data'].reshape(*im_input.shape)
net.blobs['data'].data[...] = im_input
```
The convolution weights are initialized from Gaussian noise while the biases are initialized to zero. These random filters give output somewhat like edge detections.
```
# helper show filter outputs
def show_filters(net):
net.forward()
plt.figure()
filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()
for i in range(3):
plt.subplot(1,4,i+2)
plt.title("filter #{} output".format(i))
plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)
plt.tight_layout()
plt.axis('off')
# filter the image with initial
show_filters(net)
```
Raising the bias of a filter will correspondingly raise its output:
```
# pick first filter output
conv0 = net.blobs['conv'].data[0, 0]
print("pre-surgery output mean {:.2f}".format(conv0.mean()))
# set first filter bias to 10
net.params['conv'][1].data[0] = 1.
net.forward()
print("post-surgery output mean {:.2f}".format(conv0.mean()))
```
Altering the filter weights is more exciting since we can assign any kernel like Gaussian blur, the Sobel operator for edges, and so on. The following surgery turns the 0th filter into a Gaussian blur and the 1st and 2nd filters into the horizontal and vertical gradient parts of the Sobel operator.
See how the 0th output is blurred, the 1st picks up horizontal edges, and the 2nd picks up vertical edges.
```
ksize = net.params['conv'][0].data.shape[2:]
# make Gaussian blur
sigma = 1.
y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]
g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))
gaussian = (g / g.sum()).astype(np.float32)
net.params['conv'][0].data[0] = gaussian
# make Sobel operator for edge detection
net.params['conv'][0].data[1:] = 0.
sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))
net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal
net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical
show_filters(net)
```
With net surgery, parameters can be transplanted across nets, regularized by custom per-parameter operations, and transformed according to your schemes.
## Casting a Classifier into a Fully Convolutional Network
Let's take the standard Caffe Reference ImageNet model "CaffeNet" and transform it into a fully convolutional net for efficient, dense inference on large inputs. This model generates a classification map that covers a given input size instead of a single classification. In particular a 8 $\times$ 8 classification map on a 451 $\times$ 451 input gives 64x the output in only 3x the time. The computation exploits a natural efficiency of convolutional network (convnet) structure by amortizing the computation of overlapping receptive fields.
To do so we translate the `InnerProduct` matrix multiplication layers of CaffeNet into `Convolutional` layers. This is the only change: the other layer types are agnostic to spatial size. Convolution is translation-invariant, activations are elementwise operations, and so on. The `fc6` inner product when carried out as convolution by `fc6-conv` turns into a 6 \times 6 filter with stride 1 on `pool5`. Back in image space this gives a classification for each 227 $\times$ 227 box with stride 32 in pixels. Remember the equation for output map / receptive field size, output = (input - kernel_size) / stride + 1, and work out the indexing details for a clear understanding.
```
!diff net_surgery/bvlc_caffenet_full_conv.prototxt ../models/bvlc_reference_caffenet/deploy.prototxt
```
The only differences needed in the architecture are to change the fully connected classifier inner product layers into convolutional layers with the right filter size -- 6 x 6, since the reference model classifiers take the 36 elements of `pool5` as input -- and stride 1 for dense classification. Note that the layers are renamed so that Caffe does not try to blindly load the old parameters when it maps layer names to the pretrained model.
```
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# Load the original network and extract the fully connected layers' parameters.
net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt',
'../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
params = ['fc6', 'fc7', 'fc8']
# fc_params = {name: (weights, biases)}
fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}
for fc in params:
print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape)
```
Consider the shapes of the inner product parameters. The weight dimensions are the output and input sizes while the bias dimension is the output size.
```
# Load the fully convolutional network to transplant the parameters.
net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt',
'../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']
# conv_params = {name: (weights, biases)}
conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}
for conv in params_full_conv:
print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape)
```
The convolution weights are arranged in output $\times$ input $\times$ height $\times$ width dimensions. To map the inner product weights to convolution filters, we could roll the flat inner product vectors into channel $\times$ height $\times$ width filter matrices, but actually these are identical in memory (as row major arrays) so we can assign them directly.
The biases are identical to those of the inner product.
Let's transplant!
```
for pr, pr_conv in zip(params, params_full_conv):
conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays
conv_params[pr_conv][1][...] = fc_params[pr][1]
```
Next, save the new model weights.
```
net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')
```
To conclude, let's make a classification map from the example cat image and visualize the confidence of "tiger cat" as a probability heatmap. This gives an 8-by-8 prediction on overlapping regions of the 451 $\times$ 451 input.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# load input and configure preprocessing
im = caffe.io.load_image('images/cat.jpg')
transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape})
transformer.set_mean('data', np.load('../python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
# make classification map by forward and print prediction indices at each location
out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)]))
print out['prob'][0].argmax(axis=0)
# show net input and confidence map (probability of the top prediction at each location)
plt.subplot(1, 2, 1)
plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0]))
plt.subplot(1, 2, 2)
plt.imshow(out['prob'][0,281])
```
The classifications include various cats -- 282 = tiger cat, 281 = tabby, 283 = persian -- and foxes and other mammals.
In this way the fully connected layers can be extracted as dense features across an image (see `net_full_conv.blobs['fc6'].data` for instance), which is perhaps more useful than the classification map itself.
Note that this model isn't totally appropriate for sliding-window detection since it was trained for whole-image classification. Nevertheless it can work just fine. Sliding-window training and finetuning can be done by defining a sliding-window ground truth and loss such that a loss map is made for every location and solving as usual. (This is an exercise for the reader.)
*A thank you to Rowland Depp for first suggesting this trick.*
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import Image
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# configure plotting
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Load the net, list its data and params, and filter an example image.
caffe.set_mode_cpu()
net = caffe.Net('net_surgery/conv.prototxt', caffe.TEST)
print("blobs {}\nparams {}".format(net.blobs.keys(), net.params.keys()))
# load image and prepare as a single input batch for Caffe
im = np.array(Image.open('images/cat_gray.jpg'))
plt.title("original image")
plt.imshow(im)
plt.axis('off')
im_input = im[np.newaxis, np.newaxis, :, :]
net.blobs['data'].reshape(*im_input.shape)
net.blobs['data'].data[...] = im_input
# helper show filter outputs
def show_filters(net):
net.forward()
plt.figure()
filt_min, filt_max = net.blobs['conv'].data.min(), net.blobs['conv'].data.max()
for i in range(3):
plt.subplot(1,4,i+2)
plt.title("filter #{} output".format(i))
plt.imshow(net.blobs['conv'].data[0, i], vmin=filt_min, vmax=filt_max)
plt.tight_layout()
plt.axis('off')
# filter the image with initial
show_filters(net)
# pick first filter output
conv0 = net.blobs['conv'].data[0, 0]
print("pre-surgery output mean {:.2f}".format(conv0.mean()))
# set first filter bias to 10
net.params['conv'][1].data[0] = 1.
net.forward()
print("post-surgery output mean {:.2f}".format(conv0.mean()))
ksize = net.params['conv'][0].data.shape[2:]
# make Gaussian blur
sigma = 1.
y, x = np.mgrid[-ksize[0]//2 + 1:ksize[0]//2 + 1, -ksize[1]//2 + 1:ksize[1]//2 + 1]
g = np.exp(-((x**2 + y**2)/(2.0*sigma**2)))
gaussian = (g / g.sum()).astype(np.float32)
net.params['conv'][0].data[0] = gaussian
# make Sobel operator for edge detection
net.params['conv'][0].data[1:] = 0.
sobel = np.array((-1, -2, -1, 0, 0, 0, 1, 2, 1), dtype=np.float32).reshape((3,3))
net.params['conv'][0].data[1, 0, 1:-1, 1:-1] = sobel # horizontal
net.params['conv'][0].data[2, 0, 1:-1, 1:-1] = sobel.T # vertical
show_filters(net)
!diff net_surgery/bvlc_caffenet_full_conv.prototxt ../models/bvlc_reference_caffenet/deploy.prototxt
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# Load the original network and extract the fully connected layers' parameters.
net = caffe.Net('../models/bvlc_reference_caffenet/deploy.prototxt',
'../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
params = ['fc6', 'fc7', 'fc8']
# fc_params = {name: (weights, biases)}
fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}
for fc in params:
print '{} weights are {} dimensional and biases are {} dimensional'.format(fc, fc_params[fc][0].shape, fc_params[fc][1].shape)
# Load the fully convolutional network to transplant the parameters.
net_full_conv = caffe.Net('net_surgery/bvlc_caffenet_full_conv.prototxt',
'../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']
# conv_params = {name: (weights, biases)}
conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}
for conv in params_full_conv:
print '{} weights are {} dimensional and biases are {} dimensional'.format(conv, conv_params[conv][0].shape, conv_params[conv][1].shape)
for pr, pr_conv in zip(params, params_full_conv):
conv_params[pr_conv][0].flat = fc_params[pr][0].flat # flat unrolls the arrays
conv_params[pr_conv][1][...] = fc_params[pr][1]
net_full_conv.save('net_surgery/bvlc_caffenet_full_conv.caffemodel')
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# load input and configure preprocessing
im = caffe.io.load_image('images/cat.jpg')
transformer = caffe.io.Transformer({'data': net_full_conv.blobs['data'].data.shape})
transformer.set_mean('data', np.load('../python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1))
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
# make classification map by forward and print prediction indices at each location
out = net_full_conv.forward_all(data=np.asarray([transformer.preprocess('data', im)]))
print out['prob'][0].argmax(axis=0)
# show net input and confidence map (probability of the top prediction at each location)
plt.subplot(1, 2, 1)
plt.imshow(transformer.deprocess('data', net_full_conv.blobs['data'].data[0]))
plt.subplot(1, 2, 2)
plt.imshow(out['prob'][0,281])
| 0.564219 | 0.924891 |
# 📃 Solution for Exercise 04
In the previous notebook, we saw the effect of applying some regularization
on the coefficient of a linear model.
In this exercise, we will study the advantage of using some regularization
when dealing with correlated features.
We will first create a regression dataset. This dataset will contain 2,000
samples and 5 features from which only 2 features will be informative.
```
from sklearn.datasets import make_regression
data, target, coef = make_regression(
n_samples=2_000,
n_features=5,
n_informative=2,
shuffle=False,
coef=True,
random_state=0,
noise=30,
)
```
When creating the dataset, `make_regression` returns the true coefficient
used to generate the dataset. Let's plot this information.
```
import pandas as pd
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(coef, index=feature_names)
coef.plot.barh()
coef
```
Create a `LinearRegression` regressor and fit on the entire dataset and
check the value of the coefficients. Are the coefficients of the linear
regressor close to the coefficients used to generate the dataset?
```
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
linear_regression.fit(data, target)
linear_regression.coef_
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(linear_regression.coef_, index=feature_names)
_ = coef.plot.barh()
```
We see that the coefficients are close to the coefficients used to generate
the dataset. The dispersion is indeed cause by the noise injected during the
dataset generation.
Now, create a new dataset that will be the same as `data` with 4 additional
columns that will repeat twice features 0 and 1. This procedure will create
perfectly correlated features.
```
import numpy as np
data = np.concatenate([data, data[:, [0, 1]], data[:, [0, 1]]], axis=1)
```
Fit again the linear regressor on this new dataset and check the
coefficients. What do you observe?
```
linear_regression = LinearRegression()
linear_regression.fit(data, target)
linear_regression.coef_
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(linear_regression.coef_, index=feature_names)
_ = coef.plot.barh()
```
We see that the coefficient values are far from what one could expect.
By repeating the informative features, one would have expected these
coefficients to be similarly informative.
Instead, we see that some coefficients have a huge norm ~1e14. It indeed
means that we try to solve an mathematical ill-posed problem. Indeed, finding
coefficients in a linear regression involves inverting the matrix
`np.dot(data.T, data)` which is not possible (or lead to high numerical
errors).
Create a ridge regressor and fit on the same dataset. Check the coefficients.
What do you observe?
```
from sklearn.linear_model import Ridge
ridge = Ridge()
ridge.fit(data, target)
ridge.coef_
coef = pd.Series(ridge.coef_, index=feature_names)
_ = coef.plot.barh()
```
We see that the penalty applied on the weights give a better results: the
values of the coefficients do not suffer from numerical issues. Indeed, the
matrix to be inverted internally is `np.dot(data.T, data) + alpha * I`.
Adding this penalty `alpha` allow the inversion without numerical issue.
Can you find the relationship between the ridge coefficients and the original
coefficients?
```
ridge.coef_[:5] * 3
```
Repeating three times each informative features induced to divide the
ridge coefficients by three.
<div class="admonition tip alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Tip</p>
<p class="last">We always advise to use l2-penalized model instead of non-penalized model
in practice. In scikit-learn, <tt class="docutils literal">LogisticRegression</tt> applies such penalty
by default. However, one needs to use <tt class="docutils literal">Ridge</tt> (and even <tt class="docutils literal">RidgeCV</tt> to tune
the parameter <tt class="docutils literal">alpha</tt>) instead of <tt class="docutils literal">LinearRegression</tt>.</p>
</div>
|
github_jupyter
|
from sklearn.datasets import make_regression
data, target, coef = make_regression(
n_samples=2_000,
n_features=5,
n_informative=2,
shuffle=False,
coef=True,
random_state=0,
noise=30,
)
import pandas as pd
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(coef, index=feature_names)
coef.plot.barh()
coef
from sklearn.linear_model import LinearRegression
linear_regression = LinearRegression()
linear_regression.fit(data, target)
linear_regression.coef_
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(linear_regression.coef_, index=feature_names)
_ = coef.plot.barh()
import numpy as np
data = np.concatenate([data, data[:, [0, 1]], data[:, [0, 1]]], axis=1)
linear_regression = LinearRegression()
linear_regression.fit(data, target)
linear_regression.coef_
feature_names = [f"Features {i}" for i in range(data.shape[1])]
coef = pd.Series(linear_regression.coef_, index=feature_names)
_ = coef.plot.barh()
from sklearn.linear_model import Ridge
ridge = Ridge()
ridge.fit(data, target)
ridge.coef_
coef = pd.Series(ridge.coef_, index=feature_names)
_ = coef.plot.barh()
ridge.coef_[:5] * 3
| 0.80871 | 0.993209 |
Road To AI
--------
*© MLSA : Achraf Jday & Abhishek Jaiswal.*
## Phase 1: data structures
The objective of this task is to become familiar with the data structures used for data manipulations and to implement the measures seen during the workshops: mean, median, quartiles, correlations.
<font size="+1" color="RED">**[Q]**</font> **Indicate in the box below your name :**
*Double-click here and insert your name. If you work in groups please insert all of your names below*
<font color="RED" size="+1">**[Q]**</font> **Rename this file ipython**
At the very top of this page, click on <tt>RoadToAI_1</tt> and add after <tt>RoadToAI_1</tt> your name.
For example, for the binomial Luke Skywalker and Han Solo, the file name becomes: <pre>RoadToAI_1-Skywalker-Solo</pre>.
Remember to save the file frequently while working :
- either by clicking on the "floppy disk" icon
- or by the key combination [Ctrl]-S
<font color="RED" size="+1">IMPORTANT : submission of your final file</font>
**Name to be given to the file to be posted** : *Name1_Name2.ipynb*
- *Name1* and *Name2*: names of the members of the pair
- don't compress or make an archive: you have to render the ipython file as it is.
The file is to be pushed to the repo as shown during the workshop.
## Presentation ##
### Objectives of this phase
The work to be done is as follows:
- learn how to use jupyter notebook (cf doc: <https://jupyter.org/>).
- to learn about the data structures that will be used to program throughout the sessions: numpy and pandas.
- implement first functions that will be useful.
## Learn how to use jupyter notebook
This document is dynamic: it is composed of "boxes" which can be "compiled" by using the "Run Cells" command in the "Cell" menu (either by clicking on the >| icon or by using the [SHIFT][ENTER] key combination).
There are **2 main types** of boxes :
- the "Markdown" boxes: everything typed in these boxes is text that will be rendered "nicely" after being compiled. It is possible to write Latex commands to define equations, and it recognizes some HTML tags. By double-clicking in a compiled Markdown box, one can access its contents and then modify it.
- the "Code" boxes: in these boxes, we write Python code. The compilation of the box produces the execution of this Python code and produces an "Out" box in which the result of the last command is displayed. Any valid Python code can be written in this box. This type of box can be recognized by the "In [n]" written next to its top left corner ("n" is an integer).
The type of a box can be changed using the menu at the top of the page (just above the text).
The "+" icon creates a new box just below the active box.
The icon with the scissors erases the active box (attention ! it is **irreversible !**).
To know more about it :
- http://ipython.org/
You can also refer to the Python documentation:
- https://www.python.org/
In what follows, the Markdown boxes beginning with <font color="RED" size="+1">**[Q]**</font> ask questions that must be answered in the box that follows directly (possibly by creating new "Code" or "Markdown" type boxes, at your convenience).
Some of the "Code" boxes to be filled in are followed by an output "Out[.]:" which shows an example of an expected result.
Feel free to create new "Code" or "Markdown" boxes in this document if you need them.
```
# sample code box :
# --> select this box (by clicking inside)
# --> Run Cells from the "Cell" Menu (or click on the icon >|)
#
print("A test :")
2+3
import sys
print("Version Python: ", sys.version)
```
<font color="RED" size="+1">**[Q]**</font> In the following "Code" box, give the Python instructions to perform the calculation :
$$-\frac{1}{3}\log_2(\frac{1}{3})-\frac{2}{3}\log_2(\frac{2}{3})$$
Whose value to find is :
0.9182958340544896
**<font style="BACKGROUND-COLOR: lightgray" color='red'> Important</font>** :
when reading the text of a jupyter file on your computer, remember to execute the "Code" boxes in the order they appear. The whole page behaves as a single Python session and to execute some boxes it is necessary that the previous ones have been executed beforehand.
<font color="RED" size="+1">**[Q]**</font> In the next "Code" box, write the function <tt>calculation</tt> which, given a real $x$ of [0,1] makes the value of the calculation $$-x\log_2(x)-(1-x)\log_2(1-x)$$ if $x\not= 0$ and $x \not= 1$ or the value $0.0$ otherwise.
```
calcul(0)
calcul(1/3)
calcul(0.5)
```
## Documentation
At first, you have to take in hand the **numpy**, **pandas** and **matplotlib** libraries:
- Numpy = <http://scipy.github.io/old-wiki/pages/Tentative_NumPy_Tutorial>
or <https://realpython.com/numpy-tutorial/>
- Pandas = http://pandas.pydata.org/pandas-docs/stable/10min.html
- Matplotlib = https://matplotlib.org/1.3.1/users/pyplot_tutorial.html
Read these pages and practice these tutorials to become familiar with these tools.
<font color="RED" size="+1">**[Q]**</font> Give in the following box the main characteristics of each of these 3 libraries: What are they used for? What do they allow to represent ? What is their usefulness and their specificities? etc.
Give examples of their specific uses.
To use the 3 previous libraries, you must first import them into Jupyter using the commands given in the following box.
If a library is not installed, an error may occur when importing it. It is then necessary to install it. For example, for the pandas library :
- in a terminal, execute the command:
pip install --user -U pandas
- once the library is installed, it is necessary to restart the Jupyter Python kernel:
in the Jupyter menu at the top of the page, choose "<tt>Kernel -> restart</tt>".
```
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
```
## Programming and experimentation ##
The dataset that will be used in this session to validate your functions corresponds to data concerning prices in different states of North America.
The reference of this dataset is available here : <https://github.com/amitkaps/weed/blob/master/1-Acquire.ipynb>
This data is also provided in the file <tt>data-01.zip</tt> to be downloaded from the Moodle site.
It consists of three files:
* <tt>"data-01/Weed_Price.csv"</tt>: price per date and per condition (for three different qualities)
* <tt>"data-01/Demographics_State.csv"</tt>: demographic information on each state
* <tt>"data-01/Population_State.csv"</tt>: population of each state
The first step is to download these files into pandas dataframes.
As they are <a files href="https://fr.wikipedia.org/wiki/Comma-separated_values"><tt>CSV</tt></a>, we use the Pandas library which contains a function to read such file types.
```
# Loading data files :
prices_pd = pd.read_csv("data-01/Weed_Price.csv", parse_dates=[-1])
demography_pd = pd.read_csv("data-01/Demographics_State.csv")
population_pd = pd.read_csv("data-01/Population_State.csv")
```
<font color="RED" size="+1">**[Q]**</font> **Dataframes**
Start by looking at the documentation of the <tt>read_csv</tt> function of the Pandas library. What does this function render (what is the type of what is rendered)?
<font color="RED" size="+1">**[Q]**</font> **Dataframes**
Pandas dataframes allow to store together data whose values can be different. This can be similar to an Excel sheet: each row corresponds to the same data (an "example") and contains in each column values that can be of different types.
Examine the type of the three variables that have just been defined. To do this, use the Python function <tt>type</tt>: for example <tt>type(prices_pd)</tt>.
```
# type of prices_pd:
# type of demography_pd
# type of population_pd
```
**Important**: each time you use a command, look at the type of the result obtained (list, DataFrame, Series, array,...) it will allow you to know what you can apply on this result.
<font color="RED" size="+1">**[Q]**</font> **Learn more about the data...**
* Begin by getting familiar with the data by viewing it and displaying examples of the rows or columns that these DataFrames contain. To do this, manipulate the functions of the libraries you have just discovered (for example, <tt>head()</tt>, <tt>tail()</tt>, ...).
```
# 15 first lines of prices_pd
# 7 last lines of prices_pd
```
Data types can be retrieved through the <tt>dtypes</tt> method:
```
prices_pd.dtypes
```
There are a lot of features to discover to get useful information about DataFrames. For example, the list of states can be obtained this way:
```
les_etats = np.unique(prices_pd["State"].values)
# Show the list of states :
```
Compare the number of values of :
prices_pd["MedQ"].values
and
np.unique(prices_pd["MedQ"].values
Explain what's going on.
## Implementation of classical measurements ##
We will now write the functions allowing to calculate "basic" measurements on one-dimensional data. For this, we will work with the <tt>array</tt> structure of numpy. To convert a column of DataFrame to '<tt>array</tt>, here is how to do:
### Average
<font color="RED" size="+1">**[Q]**</font> **The average**
Write the function average(values) that calculates the average of the elements of an array (using the ''for'' loop, or Sum and Len, without using the functions already implemented in numpy).
Test the average function and compare your results with the python base implementation given below:
```
print("The average (MedQ) with my function is : %f dollars" % average(prices_pd["MedQ"]))
print("The average (MedQ) with numpy's mean is : %f dollars" % prices_pd["MedQ"].mean())
```
<font color="RED" size="+1">**[Q]**</font> **Averages on qualities**
Calculate:
* The average price for medium and high qualities
* The average price for medium and high grades in the state of New York.
Calculations will be done on one hand using your function, and on the other hand using python functions.
<font color="RED" size="+1">**[Q]**</font> **Average over states**
Calculate the average price of medium and high qualities in all states -- the list of states is obtained as follows states=np.unique(prices_pd["State"].values)
You can (must) do this in two ways:
* Make a loop on each of the states
* Use the groupby function as explained here: http://pandas.pydata.org/pandas-docs/stable/groupby.html
and here: https://www.kaggle.com/crawford/python-groupby-tutorial
<font color="RED" size="+1">**[Q]**</font> **Other averages**
Calculate the average (with both functions) on the price of the low quality. What do you find? Explain...
<font color="RED" size="+1">**[Q]**</font> **Data modification**
Replace the <tt>NAN</tt> in the <tt>LowQ</tt> column using the function described here: http://pandas.pydata.org/pandas-docs/version/0.17.1/generated/pandas.DataFrame.fillna.html.
In particular, we want to use the <tt>fill</tt> method after sorting by state and date using the <tt>fill</tt> function:
prices_sorted = prices_pd.sort_values(by=['State', 'date'], inplace=False)
Explain the result of this command.
<font color="RED" size="+1">**[Q]**</font> **Changes in results**
Recalculate the average price for quality <tt>Low</tt>. What happens now?
<font color="RED" size="+1">**[Q]**</font> **Histogram plotting**
Give Python instructions to plot the histogram of price averages (<tt>LowQ</tt>) by state.
To help you build a histogram, you can study the next page:
<http://matplotlib.org/examples/lines_bars_and_markers/barh_demo.html
```
```
<font color="RED" size="+1">**[Q]**</font> **Density estimation**
We will now look at the calculation of the number of states concerned by a price range. For this purpose:
* Calculate the table of average prices as follows
```
average_price=prices_pd[["State","LowQ"]].groupby(["State"]).mean()
print(average_price)
print("===========")
average_price=average_price.values.ravel()
print(average_price)
```
<font color="RED" size="+1">**[Q]**</font> **Variation bounds**
* Calculate the min and max values of the average prices
<font color="RED" size="+1">**[Q]**</font> **Calculation of a workforce**
Take a discretization interval of size 20, and calculate the number of states per bins (as a vector). Draw the corresponding histogram
<font color="RED" size="+1">**[Q]**</font> **Change of scale**
Now take an interval of size 40.
This can be done in the following way with Pandas:
```
effectif=pd.cut(average_price,20)
effectif2=pd.value_counts(effectif)
effectif3=effectif2.reindex(effectif.categories)
effectif3.plot(kind='bar')
```
And like that with Numpy:
```
plt.hist(average_price,bins=20)
```
The estimation of density in pandas can be done as follows
```
effectif=pd.DataFrame(average_price)
effectif.plot(kind='kde')
```
### Cumulative density
<font color="RED" size="+1">**[Q]**</font> **Calculation of the cumulative density**
Calculate the cumulative density from the average prices, with a given discretization interval and represent it graphically.
The method must render two tables: the absciss $X$ (the possible average prices between min and max prices), and the associated density
```
```
<font color="RED" size="+1">**[Q]**</font> **Quartiles**
Write the function Q(alpha,x,y) which allows to find the quartile(alpha,x,y).
- quartile(0.5,x,densite) will correspond to the median.
Compute and graphically represent the boxplot.
Remark: a boxplot in pandas is made like this:
a=pd.DataFrame(average_price)
a.boxplot()
### Variance
<font color="RED" size="+1">**[Q]**</font> **Calculation of variance**
We now want to add a column <tt>HighQ_var</tt> to the original data containing the price variance by state. Give the corresponding Python intructions.
**WARNING**, this supposes to process the states one after the other .
# Synthesis work: California
Pandas allows you to synthesize data in the following way (for the DataFrame with the name <tt>df</tt>):
df.describe()
<font color="RED" size="+1">**[Q]**</font> Check that the values found on the state of California match the values found through your various functions.
To do this, give in the following the code that uses your functions (means, variance, and quartiles) as well as the result of the function <tt>describe</tt>.
<font color="RED" size="+1">**[Q]**</font> **Correlation Matrix**
We will now focus on calculating the correlation between prices in New York and prices in California.
Start by representing the price point cloud (by date) in California ($X$ axis) and New York ($Y$ axis) for the good quality.
To do this, start by creating a DataFrame with this information:
```
price_ny=prices_pd[prices_pd['State']=='New York']
price_ca=prices_pd[prices_pd['State']=='California']
price_ca_ny=price_ca.merge(price_ny,on='date')
price_ca_ny.head()
# Run this box and comment on the result obtained
```
<font color="RED" size="+1">**[Q]**</font> **Spot clouds*
Graphically represent the cloud of points: see <http://matplotlib.org/examples/shapes_and_collections/scatter_demo.html>
<font color="RED" size="+1">**[Q]**</font> **Correlations**
Using the previously written mean function, write a function <tt>correlation(x,y)</tt> which calculates the linear correlation between two Numpy tables.
```
# Apply your function with the following instruction:
# print("The correlation is :%f"%correlation(price_ca_ny["HighQ_x"].values,price_ca_ny["HighQ_y"].values))
```
<font color="RED" size="+1">**[Q]**</font> **Correlation matrix**
Calculate the correlation matrix for all combinations of states.
<font color="RED" size="+1">**[Q]**</font> **Other correlations...**
Calculate the correlations between price (<tt>low</tt> and <tt>high</tt>) based on the average income per available state in the table <tt>demography_pd</tt> loaded at the beginning of this Jupyter sheet.
What can we conclude from this?
|
github_jupyter
|
# sample code box :
# --> select this box (by clicking inside)
# --> Run Cells from the "Cell" Menu (or click on the icon >|)
#
print("A test :")
2+3
import sys
print("Version Python: ", sys.version)
calcul(0)
calcul(1/3)
calcul(0.5)
import numpy as np
import pandas as pd
from datetime import datetime as dt
import matplotlib.pyplot as plt
%matplotlib inline
# Loading data files :
prices_pd = pd.read_csv("data-01/Weed_Price.csv", parse_dates=[-1])
demography_pd = pd.read_csv("data-01/Demographics_State.csv")
population_pd = pd.read_csv("data-01/Population_State.csv")
# type of prices_pd:
# type of demography_pd
# type of population_pd
# 15 first lines of prices_pd
# 7 last lines of prices_pd
prices_pd.dtypes
les_etats = np.unique(prices_pd["State"].values)
# Show the list of states :
print("The average (MedQ) with my function is : %f dollars" % average(prices_pd["MedQ"]))
print("The average (MedQ) with numpy's mean is : %f dollars" % prices_pd["MedQ"].mean())
```
<font color="RED" size="+1">**[Q]**</font> **Density estimation**
We will now look at the calculation of the number of states concerned by a price range. For this purpose:
* Calculate the table of average prices as follows
<font color="RED" size="+1">**[Q]**</font> **Variation bounds**
* Calculate the min and max values of the average prices
<font color="RED" size="+1">**[Q]**</font> **Calculation of a workforce**
Take a discretization interval of size 20, and calculate the number of states per bins (as a vector). Draw the corresponding histogram
<font color="RED" size="+1">**[Q]**</font> **Change of scale**
Now take an interval of size 40.
This can be done in the following way with Pandas:
And like that with Numpy:
The estimation of density in pandas can be done as follows
### Cumulative density
<font color="RED" size="+1">**[Q]**</font> **Calculation of the cumulative density**
Calculate the cumulative density from the average prices, with a given discretization interval and represent it graphically.
The method must render two tables: the absciss $X$ (the possible average prices between min and max prices), and the associated density
<font color="RED" size="+1">**[Q]**</font> **Quartiles**
Write the function Q(alpha,x,y) which allows to find the quartile(alpha,x,y).
- quartile(0.5,x,densite) will correspond to the median.
Compute and graphically represent the boxplot.
Remark: a boxplot in pandas is made like this:
a=pd.DataFrame(average_price)
a.boxplot()
### Variance
<font color="RED" size="+1">**[Q]**</font> **Calculation of variance**
We now want to add a column <tt>HighQ_var</tt> to the original data containing the price variance by state. Give the corresponding Python intructions.
**WARNING**, this supposes to process the states one after the other .
# Synthesis work: California
Pandas allows you to synthesize data in the following way (for the DataFrame with the name <tt>df</tt>):
df.describe()
<font color="RED" size="+1">**[Q]**</font> Check that the values found on the state of California match the values found through your various functions.
To do this, give in the following the code that uses your functions (means, variance, and quartiles) as well as the result of the function <tt>describe</tt>.
<font color="RED" size="+1">**[Q]**</font> **Correlation Matrix**
We will now focus on calculating the correlation between prices in New York and prices in California.
Start by representing the price point cloud (by date) in California ($X$ axis) and New York ($Y$ axis) for the good quality.
To do this, start by creating a DataFrame with this information:
<font color="RED" size="+1">**[Q]**</font> **Spot clouds*
Graphically represent the cloud of points: see <http://matplotlib.org/examples/shapes_and_collections/scatter_demo.html>
<font color="RED" size="+1">**[Q]**</font> **Correlations**
Using the previously written mean function, write a function <tt>correlation(x,y)</tt> which calculates the linear correlation between two Numpy tables.
| 0.502197 | 0.936372 |
# Serving a Keras Resnet Model
Keras is a high level API that can be used to build deep neural nets with only a few lines of code, and supports a number of [backends](https://keras.io/backend/) for computation (TensorFlow, Theano, and CNTK). Keras also contains a library of pre-trained models, including a Resnet model with 50-layers, trained on the ImageNet dataset, which we will use for this exercise.
This notebook teaches how to create a servable version of the Resnet50 model in Keras using the TensorFlow backend. The servable model can be served using [TensorFlow Serving](https://www.tensorflow.org/serving/), which runs very efficiently in C++ and supports multiple platforms (different OSes, as well as hardware with different types of accelerators such as GPUs). The model will need to handle RPC prediction calls coming from a client that sends requests containing a batch of jpeg images.
See https://github.com/keras-team/keras/blob/master/keras/applications/resnet50.py for the implementation of ResNet50.
# Preamble
Import the required libraries.
```
# Import Keras libraries
from keras.applications.resnet50 import preprocess_input
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras import backend as K
import numpy as np
# Import TensorFlow saved model libraries
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
from tensorflow.contrib.session_bundle import exporter
```
# Constants
```
_DEFAULT_IMAGE_SIZE = 224
```
# Setting the Output Directory
Set a version number and directory for the output of the servable model. Note that with TensorFlow, if you've successfully saved the servable in a directory, trying to save another servable will fail hard. You always want to increment your version number, or otherwise delete the output directory and re-run the servable creation code.
```
VERSION_NUMBER = 1 #Increment this if you want to generate more than one servable model
SERVING_DIR = "keras_servable/" + str(VERSION_NUMBER)
```
# Build the Servable Model from Keras
Keras has a prepackaged ImageNet-trained ResNet50 model which takes in a 4d input tensor and outputs a list of class probabilities for all of the classes.
We will create a servable model whose input takes in a batch of jpeg-encoded images, and outputs a dictionary containing the top k classes and probabilities for each image in the batch. We've refactored the input preprocessing and output postprocessing into helper functions.
# Helper Functions for Building a TensorFlow Graph
TensorFlow is essentially a computation graph with variables and states. The graph must be built before it can ingest and process data. Typically, a TensorFlow graph will contain a set of input nodes (called placeholders) from which data can be ingested, and a set of TensorFlow functions that take existing nodes as inputs and produces a dependent node that performs a computation on the input nodes.
It is often useful to create helper functions for building a TensorFlow graphs for two reasons:
1. Modularity: you can reuse functions in different places; for instance, a different image model or ResNet architecture can reuse functions.
2. Testability: you can unit test different parts of your code easily!
## Helper function: convert JPEG strings to Normalized 3D Tensors
The client (resnet_client.py) sends jpeg encoded images into an array of jpegs (each entry a string) to send to the server. These jpegs are all appropriately resized to 224x224x3, and do not need resizing on the server side to enter into the ResNet model. However, the ResNet50 model was trained with pixel values normalized (approximately) between -0.5 and 0.5. We will need to extract the raw 3D tensor from each jpeg string and normalize the values.
**Exercise:** Add a command in the helper function to build a node that decodes a jpeg string into a 3D RGB image tensor.
**Useful References:**
* [tf.image module](https://www.tensorflow.org/api_guides/python/image)
```
# Preprocessing helper function similar to `resnet_training_to_serving_solution.ipynb`.
def build_jpeg_to_image_graph(jpeg_image):
"""Build graph elements to preprocess an image by subtracting out the mean from all channels.
Args:
image: A jpeg-formatted byte stream represented as a string.
Returns:
A 3d tensor of image pixels normalized for the Keras ResNet50 model.
The canned ResNet50 pretrained model was trained after running
keras.applications.resnet50.preprocess_input in 'caffe' mode, which
flips the RGB channels and centers the pixel around the mean [103.939, 116.779, 123.68].
There is no normalizing on the range.
"""
image = ???
image = tf.to_float(image)
image = resnet50.preprocess_input(image)
return image
```
### Unit test the helper function
**Exercise:** We are going to construct an input node in a graph, and use the helper function to add computational components on the input node. Afterwards, we will run the graph by providing sample input into the graph using a TensorFlow session.
Input nodes for receiving data are called [placeholders](https://www.tensorflow.org/api_docs/python/tf/placeholder). A placeholder can store a Tensor of arbitrary dimension, and arbitrary length in any dimension. In the second step where the graph is run, the placeholder is populated with input data, and dependent nodes in your graph can then operate on the data and ultimately return an output. An example of a placeholder that holds a 1d tensor of floating values is:
```
x = tf.placeholder(dtype=tf.float32, shape=[10], 'my_input_node')
```
Note that we assigned a Python variable x to be a pointer to the placeholder, but simply calling tf.placeholder() would create an element in the TensorFlow graph that can be referenced in a global dictionary as 'my_input_node'. However, it helps to keep a Python pointer to this graph element since we can more easily pass it into helper functions.
Any dependent node in the graph can serve as an output node. For instance, passing an input node x through `y = build_jpeg_to_image_graph(x)` would return a node referenced by python variable y which is the result of processing the input through the operations in the helper function. When we run the test graph with real data below, you will see how to return the output of y.
**Remember:** TensorFlow helper functions are used to help construct a computational graph! build_jpeg_to_image_graph() does not return a 3D array. It returns a graph node that returns a 3D array after processing a jpeg-encoded string!**
```
# Defining input test graph nodes: only needs to be run once!
test_jpeg = ??? # Input node, a placeholder for a jpeg string
test_decoded_tensor = ??? # Output node, which returns a 3D tensor after processing.
# Print the graph elements to check shapes. ? indicates that TensorFlow does not know the length of those dimensions.
print(test_jpeg)
print(test_decoded_tensor)
```
### Run the Test Graph
Now we come to the data processing portion. To run data through a constructed TensorFlow graph, a session must be created to read input data into the graph and return output data. TensorFlow will only run a portion of the graph that is required to map a set of inputs (a dictionary of graph nodes, usually placeholders, as keys, and the input data as values) to an output graph node. This is invoked by the command:
```
tf.Session().run(output_node,
{input_node_1: input_data_1, input_node_2: input_data_2, ...})
```
To test the helper function, we assign a jpeg string to the input placeholder, and return a 3D tensor result which is the normalized image.
**Exercise:** Add more potentially useful assert statements to test the output.
```
# Run the graph! Validate the result of the function using a sample image client/cat_sample.jpg
ERROR_TOLERANCE = 1e-4
with open("client/cat_sample.jpg", "rb") as imageFile:
jpeg_str = imageFile.read()
with tf.Session() as sess:
result = sess.run(test_decoded_tensor, feed_dict={test_jpeg: jpeg_str})
assert result.shape == (224, 224, 3)
# TODO: Replace with assert statements to check max and min normalized pixel values
assert result.max() <= ??? + ERROR_TOLERANCE # Max pixel value after subtracting mean
assert result.min() >= ??? - ERROR_TOLERANCE # Min pixel value after subtracting mean
print('Hooray! JPEG decoding test passed!')
```
### Remarks
The approach above uses vanilla TensorFlow to perform unit testing. You may notice that the code is more verbose than ideal, since you have to create a session, feed input through a dictionary, etc. We encourage the student to investigate some options below:
[TensorFlow Eager](https://research.googleblog.com/2017/10/eager-execution-imperative-define-by.html) was introduced in TensorFlow 1.5 as a way to execute TensorFlow graphs in a way similar to numpy operations. After testing individual parts of the graph using Eager, you will need to rebuild a graph with the Eager option turned off in order to build a performance optimized TensorFlow graph. Also, keep in mind that you will need another virtual environment with TensorFlow 1.5 in order to run eager execution, which may not be compatible with TensorFlow Serving 1.4 used in this tutorial.
[TensorFlow unit testing](https://www.tensorflow.org/api_guides/python/test) is a more software engineer oriented approach to run tests. By writing test classes that can be invoked individually when building the project, calling tf.test.main() will run all tests and return a list of ones that succeeded and failed, allowing you to inspect errors. Because we are in a notebook environment, such a test would not succeed due to an already running kernel that tf.test cannot access. The tests must be run from the command line, e.g. `python test_my_graph.py`.
We've provided both eager execution and unit test examples in the [testing](./testing) directory showing how to unit test various components in this notebook. Note that because these examples contain the solution to exercises below, please complete all notebook exercises prior to reading through these examples.
Now that we know how to run TensorFlow tests, let's create and test more helper functions!
## Helper Function: Preprocessing Server Input
The server receives a client request in the form of a dictionary {'images': tensor_of_jpeg_encoded_strings}, which must be preprocessed into a 4D tensor before feeding into the Keras ResNet50 model.
**Exercise**: You will need to modify the input to the Keras Model to be compliant with [the ResNet client](./client/resnet_client.py). Using tf.map_fn and build_jpeg_to_image_graph, fill in the missing line (marked ???) to convert the client request into an array of 3D floating-point, preprocessed tensor. The following lines stack and reshape this array into a 4D tensor.
**Useful References:**
* [tf.map_fn](https://www.tensorflow.org/api_docs/python/tf/map_fn)
* [tf.DType](https://www.tensorflow.org/api_docs/python/tf/DType)
```
def preprocess_input(jpeg_tensor):
processed_images = ??? # Convert list of JPEGs to a list of 3D tensors
processed_images = tf.stack(processed_images) # Convert list of tensors to tensor of tensors
processed_images = tf.reshape(tensor=processed_images, # Reshape to ensure TF graph knows the final dimensions
shape=[-1, _DEFAULT_IMAGE_SIZE, _DEFAULT_IMAGE_SIZE, 3])
return processed_images
```
### Unit Test the Input Preprocessing Helper Function
**Exercise**: Construct a TensorFlow unit test graph for the input function.
**Hint:** the input node test_jpeg_tensor should be a [tf.placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder). You need to define the `shape` parameter in tf.placeholder. `None` inside an array indicates that the length can vary along that dimension.
```
# Build a Test Input Preprocessing Network: only needs to be run once!
test_jpeg_tensor = ??? # A placeholder for a single string, which is a dimensionless (0D) tensor.
test_processed_images = ??? # Output node, which returns a 3D tensor after processing.
# Print the graph elements to check shapes. ? indicates that TensorFlow does not know the length of those dimensions.
print(test_jpeg_tensor)
print(test_processed_images)
# Run test network using a sample image client/cat_sample.jpg
with open("client/cat_sample.jpg", "rb") as imageFile:
jpeg_str = imageFile.read()
with tf.Session() as sess:
result = sess.run(test_processed_images, feed_dict={test_jpeg_tensor: np.array([jpeg_str, jpeg_str])}) # Duplicate for length 2 array
assert result.shape == (2, 224, 224, 3) # 4D tensor with first dimension length 2, since we have 2 images
# TODO: add a test for min and max normalized pixel values
assert result.max() <= 255.0 - 103.939 + ERROR_TOLERANCE # Normalized
assert result.min() >= -123.68 - ERROR_TOLERANCE # Normalized
# TODO: add a test to verify that the resulting tensor for image 0 and image 1 are identical.
assert result[0].all() == result[1].all()
print('Hooray! Input unit test succeeded!')
```
## Helper Function: Postprocess Server Output
**Exercise:** The Keras model returns a 1D tensor of probabilities for each class. We want to wrote a postprocess_output() that returns only the top k classes and probabilities.
**Useful References:**
* [tf.nn.top_k](https://www.tensorflow.org/api_docs/python/tf/nn/top_k)
```
TOP_K = 5
def postprocess_output(model_output):
'''Return top k classes and probabilities.'''
top_k_probs, top_k_classes = ???
return {'classes': top_k_classes, 'probabilities': top_k_probs}
```
### Unit Test the Output Postprocessing Helper Function
**Exercise:** Fill in the shape field for the model output, which should be a tensor of probabilities. Try to use the number of classes that is returned by the ImageNet-trained ResNet50 model.
```
# Build Test Output Postprocessing Network: only needs to be run once!
test_model_output = tf.placeholder(dtype=tf.float32, shape=???, name='test_logits_tensor')
test_prediction_output = postprocess_output(test_model_output)
# Print the graph elements to check shapes.
print(test_model_output)
print(test_prediction_output)
# Import numpy testing framework for float comparisons
import numpy.testing as npt
# Run test network
# Input a tensor with clear winners, and perform checks
# Be very specific about what is expected from your mock model.
model_probs = np.ones(???) # TODO: use the same dimensions as your test_model_output placeholder.
model_probs[2] = 2.5 # TODO: you can create your own tests as well
model_probs[5] = 3.5
model_probs[10] = 4
model_probs[49] = 3
model_probs[998] = 2
TOTAL_WEIGHT = np.sum(model_probs)
model_probs = model_probs / TOTAL_WEIGHT
with tf.Session() as sess:
result = sess.run(test_prediction_output, {test_model_output: model_probs})
classes = result['classes']
probs = result['probabilities']
# Check values
assert len(probs) == 5
npt.assert_almost_equal(probs[0], model_probs[10])
npt.assert_almost_equal(probs[1], model_probs[5])
npt.assert_almost_equal(probs[2], model_probs[49])
npt.assert_almost_equal(probs[3], model_probs[2])
npt.assert_almost_equal(probs[4], model_probs[998])
assert len(classes) == 5
assert classes[0] == 10
assert classes[1] == 5
assert classes[2] == 49
assert classes[3] == 2
assert classes[4] == 998
print('Hooray! Output unit test succeeded!')
```
# Load the Keras Model and Build the Graph
The Keras Model uses TensorFlow as its backend, and therefore its inputs and outputs can be treated as elements of a TensorFlow graph. In other words, you can provide an input that is a TensorFlow tensor, and read the model output like a TensorFlow tensor!
**Exercise**: Build the end to end network by filling in the TODOs below.
**Useful References**:
* [Keras ResNet50 API](https://www.tensorflow.org/api_docs/python/tf/keras/applications/ResNet50)
* [Keras Model class API](https://faroit.github.io/keras-docs/1.2.2/models/model/): ResNet50 model inherits this class.
```
# TODO: Create a placeholder for your arbitrary-length 1D Tensor of JPEG strings
images = tf.placeholder(???)
# TODO: Call preprocess_input to return processed_images
processed_images = ???
# Load (and download if missing) the ResNet50 Keras Model (may take a while to run)
# TODO: Use processed_images as input
model = resnet50.ResNet50(???)
# Rename the model to 'resnet' for serving
model.name = 'resnet'
# TODO: Call postprocess_output on the output of the model to create predictions to send back to the client
predictions = ???
```
# Creating the Input-Output Signature
**Exercise:** The final step to creating a servable model is to define the end-to-end input and output API. Edit the inputs and outputs parameters to predict_signature_def below to ensure that the signature correctly handles client request. The inputs parameter should be a dictionary {'images': tensor_of_strings}, and the outputs parameter a dictionary {'classes': tensor_of_top_k_classes, 'probabilities': tensor_of_top_k_probs}.
```
# Create a saved model builder as an endpoint to dataflow execution
builder = saved_model_builder.SavedModelBuilder(SERVING_DIR)
# TODO: set the inputs and outputs parameters in predict_signature_def()
signature = predict_signature_def(inputs=???,
outputs=???)
```
# Export the Servable Model
```
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature})
builder.save()
```
|
github_jupyter
|
# Import Keras libraries
from keras.applications.resnet50 import preprocess_input
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras import backend as K
import numpy as np
# Import TensorFlow saved model libraries
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import utils
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
from tensorflow.contrib.session_bundle import exporter
_DEFAULT_IMAGE_SIZE = 224
VERSION_NUMBER = 1 #Increment this if you want to generate more than one servable model
SERVING_DIR = "keras_servable/" + str(VERSION_NUMBER)
# Preprocessing helper function similar to `resnet_training_to_serving_solution.ipynb`.
def build_jpeg_to_image_graph(jpeg_image):
"""Build graph elements to preprocess an image by subtracting out the mean from all channels.
Args:
image: A jpeg-formatted byte stream represented as a string.
Returns:
A 3d tensor of image pixels normalized for the Keras ResNet50 model.
The canned ResNet50 pretrained model was trained after running
keras.applications.resnet50.preprocess_input in 'caffe' mode, which
flips the RGB channels and centers the pixel around the mean [103.939, 116.779, 123.68].
There is no normalizing on the range.
"""
image = ???
image = tf.to_float(image)
image = resnet50.preprocess_input(image)
return image
x = tf.placeholder(dtype=tf.float32, shape=[10], 'my_input_node')
# Defining input test graph nodes: only needs to be run once!
test_jpeg = ??? # Input node, a placeholder for a jpeg string
test_decoded_tensor = ??? # Output node, which returns a 3D tensor after processing.
# Print the graph elements to check shapes. ? indicates that TensorFlow does not know the length of those dimensions.
print(test_jpeg)
print(test_decoded_tensor)
tf.Session().run(output_node,
{input_node_1: input_data_1, input_node_2: input_data_2, ...})
# Run the graph! Validate the result of the function using a sample image client/cat_sample.jpg
ERROR_TOLERANCE = 1e-4
with open("client/cat_sample.jpg", "rb") as imageFile:
jpeg_str = imageFile.read()
with tf.Session() as sess:
result = sess.run(test_decoded_tensor, feed_dict={test_jpeg: jpeg_str})
assert result.shape == (224, 224, 3)
# TODO: Replace with assert statements to check max and min normalized pixel values
assert result.max() <= ??? + ERROR_TOLERANCE # Max pixel value after subtracting mean
assert result.min() >= ??? - ERROR_TOLERANCE # Min pixel value after subtracting mean
print('Hooray! JPEG decoding test passed!')
def preprocess_input(jpeg_tensor):
processed_images = ??? # Convert list of JPEGs to a list of 3D tensors
processed_images = tf.stack(processed_images) # Convert list of tensors to tensor of tensors
processed_images = tf.reshape(tensor=processed_images, # Reshape to ensure TF graph knows the final dimensions
shape=[-1, _DEFAULT_IMAGE_SIZE, _DEFAULT_IMAGE_SIZE, 3])
return processed_images
# Build a Test Input Preprocessing Network: only needs to be run once!
test_jpeg_tensor = ??? # A placeholder for a single string, which is a dimensionless (0D) tensor.
test_processed_images = ??? # Output node, which returns a 3D tensor after processing.
# Print the graph elements to check shapes. ? indicates that TensorFlow does not know the length of those dimensions.
print(test_jpeg_tensor)
print(test_processed_images)
# Run test network using a sample image client/cat_sample.jpg
with open("client/cat_sample.jpg", "rb") as imageFile:
jpeg_str = imageFile.read()
with tf.Session() as sess:
result = sess.run(test_processed_images, feed_dict={test_jpeg_tensor: np.array([jpeg_str, jpeg_str])}) # Duplicate for length 2 array
assert result.shape == (2, 224, 224, 3) # 4D tensor with first dimension length 2, since we have 2 images
# TODO: add a test for min and max normalized pixel values
assert result.max() <= 255.0 - 103.939 + ERROR_TOLERANCE # Normalized
assert result.min() >= -123.68 - ERROR_TOLERANCE # Normalized
# TODO: add a test to verify that the resulting tensor for image 0 and image 1 are identical.
assert result[0].all() == result[1].all()
print('Hooray! Input unit test succeeded!')
TOP_K = 5
def postprocess_output(model_output):
'''Return top k classes and probabilities.'''
top_k_probs, top_k_classes = ???
return {'classes': top_k_classes, 'probabilities': top_k_probs}
# Build Test Output Postprocessing Network: only needs to be run once!
test_model_output = tf.placeholder(dtype=tf.float32, shape=???, name='test_logits_tensor')
test_prediction_output = postprocess_output(test_model_output)
# Print the graph elements to check shapes.
print(test_model_output)
print(test_prediction_output)
# Import numpy testing framework for float comparisons
import numpy.testing as npt
# Run test network
# Input a tensor with clear winners, and perform checks
# Be very specific about what is expected from your mock model.
model_probs = np.ones(???) # TODO: use the same dimensions as your test_model_output placeholder.
model_probs[2] = 2.5 # TODO: you can create your own tests as well
model_probs[5] = 3.5
model_probs[10] = 4
model_probs[49] = 3
model_probs[998] = 2
TOTAL_WEIGHT = np.sum(model_probs)
model_probs = model_probs / TOTAL_WEIGHT
with tf.Session() as sess:
result = sess.run(test_prediction_output, {test_model_output: model_probs})
classes = result['classes']
probs = result['probabilities']
# Check values
assert len(probs) == 5
npt.assert_almost_equal(probs[0], model_probs[10])
npt.assert_almost_equal(probs[1], model_probs[5])
npt.assert_almost_equal(probs[2], model_probs[49])
npt.assert_almost_equal(probs[3], model_probs[2])
npt.assert_almost_equal(probs[4], model_probs[998])
assert len(classes) == 5
assert classes[0] == 10
assert classes[1] == 5
assert classes[2] == 49
assert classes[3] == 2
assert classes[4] == 998
print('Hooray! Output unit test succeeded!')
# TODO: Create a placeholder for your arbitrary-length 1D Tensor of JPEG strings
images = tf.placeholder(???)
# TODO: Call preprocess_input to return processed_images
processed_images = ???
# Load (and download if missing) the ResNet50 Keras Model (may take a while to run)
# TODO: Use processed_images as input
model = resnet50.ResNet50(???)
# Rename the model to 'resnet' for serving
model.name = 'resnet'
# TODO: Call postprocess_output on the output of the model to create predictions to send back to the client
predictions = ???
# Create a saved model builder as an endpoint to dataflow execution
builder = saved_model_builder.SavedModelBuilder(SERVING_DIR)
# TODO: set the inputs and outputs parameters in predict_signature_def()
signature = predict_signature_def(inputs=???,
outputs=???)
with K.get_session() as sess:
builder.add_meta_graph_and_variables(sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={'predict': signature})
builder.save()
| 0.750187 | 0.993404 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
```
## Player Count
* Display the total number of players
```
#creates a dataframe using the length of the purchase data dataframe
#stores the number of total players in a variable for use later
total_players_df = pd.DataFrame({'Total Players':[len(purchase_data['SN'].unique())]})
total_players = len(purchase_data['SN'].unique())
total_players_df
```
## Purchasing Analysis (Total)
* Run basic calculations to obtain number of unique items, average price, etc.
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
#find the number of unique items, average price, number of purchases, total revenue by performing functions on the respective rows in the purchase data
unique_items = len(purchase_data['Item ID'].unique())
average_price = purchase_data['Price'].mean()
number_of_purchases = purchase_data['Purchase ID'].max()+1
total_revenue = purchase_data['Price'].sum()
#making a data frame and formatting to display dollar amounts
purchasing_analysis = pd.DataFrame({'Number of Unique Items':[unique_items],'Average Price':average_price,'Number of Purchases':number_of_purchases,'Total Revenue':total_revenue})
purchasing_analysis['Average Price'] = purchasing_analysis['Average Price'].map("${:,.2f}".format)
purchasing_analysis['Total Revenue'] = purchasing_analysis['Total Revenue'].map("${:,.2f}".format)
purchasing_analysis
```
## Gender Demographics
* Percentage and Count of Male Players
* Percentage and Count of Female Players
* Percentage and Count of Other / Non-Disclosed
```
gender_groups = purchase_data.groupby('Gender')
gender_sn = pd.DataFrame(gender_groups['SN'].unique())
#counting females, males, other
female = len(gender_sn.loc['Female','SN'])
male = len(gender_sn.loc['Male','SN'])
other = len(gender_sn.loc['Other / Non-Disclosed','SN'])
#doing the math to find percentages
female_pct = female/total_players
male_pct = male/total_players
other_pct = other/total_players
#making and formatting the dataframe
gender_demographic = pd.DataFrame(index=['Male','Female','Other / Non-Disclosed'],data={'Total Count':[male,female,other],'Percentage of Players':[male_pct,female_pct,other_pct]})
gender_demographic['Percentage of Players']= gender_demographic['Percentage of Players'].map("{:.2%}".format)
gender_demographic
```
## Purchasing Analysis (Gender)
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
#creating dataframes for a count of purchases, average price, total price for each gender
gender_purchasing = pd.DataFrame(gender_groups['Purchase ID'].count())
gender_avg_price = pd.DataFrame(gender_groups['Price'].mean())
gender_total_vol = pd.DataFrame(gender_groups['Price'].sum())
gender_total_vol
#doing math to find totals for each gender, and average perchases per person
male_total = gender_total_vol.iloc[1,0]
avg_per_male = male_total/male
female_total = gender_total_vol.iloc[0,0]
avg_per_female = female_total/female
other_total =gender_total_vol.iloc[2,0]
avg_per_other = other_total/other
avg_per_other
# merging the above dataframes, and adding the average per person data
gender_analysis = pd.merge(gender_purchasing,gender_avg_price,on='Gender')
gender_analysis = pd.merge(gender_analysis,gender_total_vol,on='Gender')
gender_analysis = gender_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Volume'})
gender_analysis['Avg Total Purchase per Person'] = [avg_per_female,avg_per_male,avg_per_other]
gender_analysis
#formatting
gender_analysis['Average Purchase Price'] = gender_analysis['Average Purchase Price'].map("${:,.2f}".format)
gender_analysis['Total Purchase Volume'] = gender_analysis['Total Purchase Volume'].map("${:,.2f}".format)
gender_analysis['Avg Total Purchase per Person'] = gender_analysis['Avg Total Purchase per Person'].map("${:,.2f}".format)
gender_analysis
```
## Age Demographics
* Establish bins for ages
* Categorize the existing players using the age bins. Hint: use pd.cut()
* Calculate the numbers and percentages by age group
* Create a summary data frame to hold the results
* Optional: round the percentage column to two decimal points
* Display Age Demographics Table
```
#creating the bins and putting it into a dataframe
max_age = purchase_data['Age'].max()
bins= [0,9,14,19,24,29,34,39,max_age+1]
group_names = ['<10','10-14','15-19','20-24','25-29','30-34','35-39','40+']
purchase_data_agebins = purchase_data
purchase_data_agebins['Age Ranges'] = pd.cut(purchase_data_agebins['Age'], bins, labels=group_names, include_lowest=True)
purchase_data_agebins.head()
#grouping into the bins, creating data from using a count of unique screennames
age_groups = purchase_data_agebins.groupby('Age Ranges')
age_sn = pd.DataFrame(age_groups['SN'].unique())
age_demographic = age_sn.rename(columns={'SN':'Total Count'})
#using a for loop to store percentage of players in each age range into the dataframe
age_demographic['Percentage of Players']=[0,0,0,0,0,0,0,0]
for i in range(8):
num_in_range = len(age_demographic.iloc[i,0])
age_demographic.iloc[i,0]=num_in_range
age_demographic.iloc[i,1]=num_in_range/total_players
#formatting
age_demographic['Percentage of Players']=age_demographic['Percentage of Players'].map("{:.2%}".format)
age_demographic
```
## Purchasing Analysis (Age)
* Bin the purchase_data data frame by age
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
```
age_purchase = pd.DataFrame(age_groups['Purchase ID'].count())
age_avg_price = pd.DataFrame(age_groups['Price'].mean())
age_total_value = pd.DataFrame(age_groups['Price'].sum())
age_total_value
#storing the total average purchase per person for each age group into an array
age_range_total = []
age_unique = age_groups['SN'].unique()
for i in range(8):
people = len(age_unique.iloc[i])
total = age_total_value.iloc[i,0]
age_range_total.append(total/people)
age_range_total
#merging all the dataframes above together
age_analysis = pd.merge(age_purchase,age_avg_price,on='Age Ranges')
age_analysis = pd.merge(age_analysis,age_total_value,on='Age Ranges')
age_analysis['Avg Total Purchase per Person'] = age_range_total
age_analysis = age_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Volume'})
age_analysis
#formatting
age_analysis['Average Purchase Price']=age_analysis['Average Purchase Price'].map("${:,.2f}".format)
age_analysis['Total Purchase Volume']=age_analysis['Total Purchase Volume'].map("${:,.2f}".format)
age_analysis['Avg Total Purchase per Person']=age_analysis['Avg Total Purchase per Person'].map("${:,.2f}".format)
age_analysis
```
## Top Spenders
* Run basic calculations to obtain the results in the table below
* Create a summary data frame to hold the results
* Sort the total purchase value column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
#groupby screenname, making dataframes for the purchase count, avg purchase, total volume
sn_groups = purchase_data.groupby('SN')
sn_purchase = pd.DataFrame(sn_groups['Purchase ID'].count())
sn_avg_price = pd.DataFrame(sn_groups['Price'].mean())
sn_total_value = pd.DataFrame(sn_groups['Price'].sum())
#merging the dataframs together
sn_analysis = pd.merge(sn_purchase,sn_avg_price, on='SN')
sn_analysis = pd.merge(sn_analysis,sn_total_value,on='SN')
sn_analysis = sn_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Value'})
sn_analysis = sn_analysis.sort_values('Total Purchase Value',ascending=False)
#formatting
sn_analysis['Average Purchase Price'] = sn_analysis['Average Purchase Price'].map("${:.2f}".format)
sn_analysis['Total Purchase Value'] = sn_analysis['Total Purchase Value'].map("${:.2f}".format)
sn_analysis.head()
```
## Most Popular Items
* Retrieve the Item ID, Item Name, and Item Price columns
* Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value
* Create a summary data frame to hold the results
* Sort the purchase count column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
```
#getting just the columns we want
smaller_df = purchase_data[['Item ID','Item Name','Price']]
#multigrouping into item id and name
item_group = smaller_df.groupby(['Item ID','Item Name'])
item_group
#dataframes for purchse count, item price, total purchase value
items_count = pd.DataFrame(item_groups.count())
items_price = pd.DataFrame(item_groups['Price'].mean())
items_total = pd.DataFrame(item_groups['Price'].sum())
items_total
#merging and sorting on purchase counts
popular_items_analysis = pd.merge(items_count,items_price,on=['Item ID','Item Name'])
popular_items_analysis = pd.merge(popular_items_analysis,items_total,on=['Item ID','Item Name'])
popular_items_analysis = popular_items_analysis.rename(columns={'Price_x':'Purchase Count','Price_y':'Item Price','Price':'Total Purchase Value'})
popular_items_analysis_sorted_count = popular_items_analysis.sort_values('Purchase Count',ascending=False)
popular_items_analysis
#formatting
popular_items_analysis_sorted_count['Item Price'] = popular_items_analysis_sorted_count['Item Price'].map("${:.2f}".format)
popular_items_analysis_sorted_count['Total Purchase Value'] = popular_items_analysis_sorted_count['Total Purchase Value'].map("${:.2f}".format)
popular_items_analysis_sorted_count.head()
```
## Most Profitable Items
* Sort the above table by total purchase value in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the data frame
```
#sorting the previous dataframe on total purchase value
popular_items_analysis_sorted_total = popular_items_analysis.sort_values('Total Purchase Value',ascending=False)
#formatting
popular_items_analysis_sorted_total['Item Price'] = popular_items_analysis_sorted_total['Item Price'].map("${:.2f}".format)
popular_items_analysis_sorted_total['Total Purchase Value'] = popular_items_analysis_sorted_total['Total Purchase Value'].map("${:.2f}".format)
popular_items_analysis_sorted_total.head()
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
#creates a dataframe using the length of the purchase data dataframe
#stores the number of total players in a variable for use later
total_players_df = pd.DataFrame({'Total Players':[len(purchase_data['SN'].unique())]})
total_players = len(purchase_data['SN'].unique())
total_players_df
#find the number of unique items, average price, number of purchases, total revenue by performing functions on the respective rows in the purchase data
unique_items = len(purchase_data['Item ID'].unique())
average_price = purchase_data['Price'].mean()
number_of_purchases = purchase_data['Purchase ID'].max()+1
total_revenue = purchase_data['Price'].sum()
#making a data frame and formatting to display dollar amounts
purchasing_analysis = pd.DataFrame({'Number of Unique Items':[unique_items],'Average Price':average_price,'Number of Purchases':number_of_purchases,'Total Revenue':total_revenue})
purchasing_analysis['Average Price'] = purchasing_analysis['Average Price'].map("${:,.2f}".format)
purchasing_analysis['Total Revenue'] = purchasing_analysis['Total Revenue'].map("${:,.2f}".format)
purchasing_analysis
gender_groups = purchase_data.groupby('Gender')
gender_sn = pd.DataFrame(gender_groups['SN'].unique())
#counting females, males, other
female = len(gender_sn.loc['Female','SN'])
male = len(gender_sn.loc['Male','SN'])
other = len(gender_sn.loc['Other / Non-Disclosed','SN'])
#doing the math to find percentages
female_pct = female/total_players
male_pct = male/total_players
other_pct = other/total_players
#making and formatting the dataframe
gender_demographic = pd.DataFrame(index=['Male','Female','Other / Non-Disclosed'],data={'Total Count':[male,female,other],'Percentage of Players':[male_pct,female_pct,other_pct]})
gender_demographic['Percentage of Players']= gender_demographic['Percentage of Players'].map("{:.2%}".format)
gender_demographic
#creating dataframes for a count of purchases, average price, total price for each gender
gender_purchasing = pd.DataFrame(gender_groups['Purchase ID'].count())
gender_avg_price = pd.DataFrame(gender_groups['Price'].mean())
gender_total_vol = pd.DataFrame(gender_groups['Price'].sum())
gender_total_vol
#doing math to find totals for each gender, and average perchases per person
male_total = gender_total_vol.iloc[1,0]
avg_per_male = male_total/male
female_total = gender_total_vol.iloc[0,0]
avg_per_female = female_total/female
other_total =gender_total_vol.iloc[2,0]
avg_per_other = other_total/other
avg_per_other
# merging the above dataframes, and adding the average per person data
gender_analysis = pd.merge(gender_purchasing,gender_avg_price,on='Gender')
gender_analysis = pd.merge(gender_analysis,gender_total_vol,on='Gender')
gender_analysis = gender_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Volume'})
gender_analysis['Avg Total Purchase per Person'] = [avg_per_female,avg_per_male,avg_per_other]
gender_analysis
#formatting
gender_analysis['Average Purchase Price'] = gender_analysis['Average Purchase Price'].map("${:,.2f}".format)
gender_analysis['Total Purchase Volume'] = gender_analysis['Total Purchase Volume'].map("${:,.2f}".format)
gender_analysis['Avg Total Purchase per Person'] = gender_analysis['Avg Total Purchase per Person'].map("${:,.2f}".format)
gender_analysis
#creating the bins and putting it into a dataframe
max_age = purchase_data['Age'].max()
bins= [0,9,14,19,24,29,34,39,max_age+1]
group_names = ['<10','10-14','15-19','20-24','25-29','30-34','35-39','40+']
purchase_data_agebins = purchase_data
purchase_data_agebins['Age Ranges'] = pd.cut(purchase_data_agebins['Age'], bins, labels=group_names, include_lowest=True)
purchase_data_agebins.head()
#grouping into the bins, creating data from using a count of unique screennames
age_groups = purchase_data_agebins.groupby('Age Ranges')
age_sn = pd.DataFrame(age_groups['SN'].unique())
age_demographic = age_sn.rename(columns={'SN':'Total Count'})
#using a for loop to store percentage of players in each age range into the dataframe
age_demographic['Percentage of Players']=[0,0,0,0,0,0,0,0]
for i in range(8):
num_in_range = len(age_demographic.iloc[i,0])
age_demographic.iloc[i,0]=num_in_range
age_demographic.iloc[i,1]=num_in_range/total_players
#formatting
age_demographic['Percentage of Players']=age_demographic['Percentage of Players'].map("{:.2%}".format)
age_demographic
age_purchase = pd.DataFrame(age_groups['Purchase ID'].count())
age_avg_price = pd.DataFrame(age_groups['Price'].mean())
age_total_value = pd.DataFrame(age_groups['Price'].sum())
age_total_value
#storing the total average purchase per person for each age group into an array
age_range_total = []
age_unique = age_groups['SN'].unique()
for i in range(8):
people = len(age_unique.iloc[i])
total = age_total_value.iloc[i,0]
age_range_total.append(total/people)
age_range_total
#merging all the dataframes above together
age_analysis = pd.merge(age_purchase,age_avg_price,on='Age Ranges')
age_analysis = pd.merge(age_analysis,age_total_value,on='Age Ranges')
age_analysis['Avg Total Purchase per Person'] = age_range_total
age_analysis = age_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Volume'})
age_analysis
#formatting
age_analysis['Average Purchase Price']=age_analysis['Average Purchase Price'].map("${:,.2f}".format)
age_analysis['Total Purchase Volume']=age_analysis['Total Purchase Volume'].map("${:,.2f}".format)
age_analysis['Avg Total Purchase per Person']=age_analysis['Avg Total Purchase per Person'].map("${:,.2f}".format)
age_analysis
#groupby screenname, making dataframes for the purchase count, avg purchase, total volume
sn_groups = purchase_data.groupby('SN')
sn_purchase = pd.DataFrame(sn_groups['Purchase ID'].count())
sn_avg_price = pd.DataFrame(sn_groups['Price'].mean())
sn_total_value = pd.DataFrame(sn_groups['Price'].sum())
#merging the dataframs together
sn_analysis = pd.merge(sn_purchase,sn_avg_price, on='SN')
sn_analysis = pd.merge(sn_analysis,sn_total_value,on='SN')
sn_analysis = sn_analysis.rename(columns={'Purchase ID':'Purchase Count','Price_x':'Average Purchase Price','Price_y':'Total Purchase Value'})
sn_analysis = sn_analysis.sort_values('Total Purchase Value',ascending=False)
#formatting
sn_analysis['Average Purchase Price'] = sn_analysis['Average Purchase Price'].map("${:.2f}".format)
sn_analysis['Total Purchase Value'] = sn_analysis['Total Purchase Value'].map("${:.2f}".format)
sn_analysis.head()
#getting just the columns we want
smaller_df = purchase_data[['Item ID','Item Name','Price']]
#multigrouping into item id and name
item_group = smaller_df.groupby(['Item ID','Item Name'])
item_group
#dataframes for purchse count, item price, total purchase value
items_count = pd.DataFrame(item_groups.count())
items_price = pd.DataFrame(item_groups['Price'].mean())
items_total = pd.DataFrame(item_groups['Price'].sum())
items_total
#merging and sorting on purchase counts
popular_items_analysis = pd.merge(items_count,items_price,on=['Item ID','Item Name'])
popular_items_analysis = pd.merge(popular_items_analysis,items_total,on=['Item ID','Item Name'])
popular_items_analysis = popular_items_analysis.rename(columns={'Price_x':'Purchase Count','Price_y':'Item Price','Price':'Total Purchase Value'})
popular_items_analysis_sorted_count = popular_items_analysis.sort_values('Purchase Count',ascending=False)
popular_items_analysis
#formatting
popular_items_analysis_sorted_count['Item Price'] = popular_items_analysis_sorted_count['Item Price'].map("${:.2f}".format)
popular_items_analysis_sorted_count['Total Purchase Value'] = popular_items_analysis_sorted_count['Total Purchase Value'].map("${:.2f}".format)
popular_items_analysis_sorted_count.head()
#sorting the previous dataframe on total purchase value
popular_items_analysis_sorted_total = popular_items_analysis.sort_values('Total Purchase Value',ascending=False)
#formatting
popular_items_analysis_sorted_total['Item Price'] = popular_items_analysis_sorted_total['Item Price'].map("${:.2f}".format)
popular_items_analysis_sorted_total['Total Purchase Value'] = popular_items_analysis_sorted_total['Total Purchase Value'].map("${:.2f}".format)
popular_items_analysis_sorted_total.head()
| 0.30549 | 0.894467 |

<center>https://xkcd.com/license.html (CC BY-NC 2.5)</center>
### What is web scraping?
---
Web scraping is a technique for extracting information from websites. This can be done *manually* but it is usually faster, more efficient and less error-prone to automate the task.
Web scraping allows you to acquire non-tabular or poorly structured data from websites and convert it into a usable, structured format, such as a .csv file or spreadsheet.
Scraping is about more than just acquiring data: it can also help you archive data and track changes to data online.
It is closely related to the practice of web indexing, which is what search engines like Google do when mass-analysing the Web to build their indices. But contrary to web indexing, which typically parses the entire content of a web page to make it searchable, web scraping targets specific information on the pages visited.
For example, online stores will often scour the publicly available pages of their competitors, scrape item prices, and then use this information to adjust their own prices. Another common practice is “contact scraping” in which personal information like email addresses or phone numbers is collected for marketing purposes.
### Why do we need it as a skill?
---
Web scraping is increasingly being used by academics and researchers to create data sets for text mining projects; these might be collections of journal articles or digitised texts. The practice of data journalism, in particular, relies on the ability of investigative journalists to harvest data that is not always presented or published in a form that allows analysis.
### When do we need scraping?
---
As useful as scraping is, there might be better options for the task. Choose the right (i.e. the easiest) tool for the job.
- Check whether or not you can easily copy and paste data from a site into Excel or Google Sheets. This might be quicker than scraping.
- Check if the site or service already provides an API to extract structured data. If it does, that will be a much more efficient and effective pathway. Good examples are the Facebook API, the Twitter APIs or the YouTube comments API.
- For much larger needs, Freedom of information requests can be useful. Be specific about the formats required for the data you want.
#### Challenge
---
If you had to gather data from a website which provides updated figures every 4 hours on an ongoing pandemic, would you :
- Check their terms of service
- Scrape the site directly
- Ask for permission and then scrape the site
- Use an official API (if it exists) that might have limitations
### Structured vs unstructured data
---
When presented with information, human beings are good at quickly categorizing it and extracting the data that they are interested in. For example, when we look at a magazine rack, provided the titles are written in a script that we are able to read, we can rapidly figure out the titles of the magazines, the stories they contain, the language they are written in, etc. and we can probably also easily organize them by topic, recognize those that are aimed at children, or even whether they lean toward a particular end of the political spectrum.
Computers have a much harder time making sense of such unstructured data unless we specifically tell them what elements data is made of, for example by adding labels such as this is the title of this magazine or this is a magazine about food. Data in which individual elements are separated and labelled is said to be structured.
We see that this data has been structured for displaying purposes (it is arranged in rows inside a table) but the different elements of information are not clearly labelled.
What if we wanted to download this dataset and, for example, compare the revenues of these companies against each other or the industry that they work in? We could try copy-pasting the entire table into a spreadsheet or even manually copy-pasting the names and websites in another document, but this can quickly become impractical when faced with a large set of data. What if we wanted to collect this information for all the companies that are there?
Fortunately, there are tools to automate at least part of the process. This technique is called web scraping. From Wikipedia,
> "*Web scraping (web harvesting or web data extraction) is a computer software technique of extracting information from websites.*"
Web scraping typically targets one web site at a time to extract unstructured information and put it in a structured form for reuse.
In this lesson, we will continue exploring the examples above and try different techniques to extract the information they contain. But before we launch into web scraping proper, we need to look a bit closer at how information is organized within an HTML document and how to build queries to access a specific subset of that information.
#### Challenge
---
Which of the following would you consider to be structure and unstructured data?
A.
```python
"The latest figures showed that webscraper INC saw a 120% increase in their revenue bringing their market cap to 2 Billion Dollars. This could be attributed to their new policies."
```
B.
```html
<company>
<name> webscraper INC</name>
<revenue> 120% </revenue>
<marketcap>2 billion </marketcap>
</company>
```
C.
```python
{
'company_name' : 'webscraper INC',
'revenue_in_%)' : 120,
'market_cap' : '2 billion USD'
}
```
---
#### What is HTML?
- HTML stands for **HyperText Markup Language**
- It is the standard markup language for the webpages which make up the internet.
- HTML contains a series of elements which make up a webpage which can connect with other webpages altogether forming a website.
- The HTML elements are represented in tags which tell the web browser how to display the web content.
A sample raw HTML file below :
```html
<!DOCTYPE html>
<html>
<head>
<title>My Title</title>
</head>
<body>
<h1>A Heading</h1>
<a href="#">Link text</a>
</body>
</html>
```
A webpage is simply a document. Every HTML element within this document corresponds to display specific content on the web browser. The following image shows the HTML code and the webpage generated (please refer to `intro_html_example.html`).

#### What is XML?
- XML stands for **eXtensible Markup Language**
- XML is a markup language much like HTML
- XML was designed to store and transport data
- XML was designed to be self-descriptive
```xml
<note>
<date>2015-09-01</date>
<hour>08:30</hour>
<to>Tove</to>
<from>Jani</from>
<body>Don't forget me this weekend!</body>
</note>
```
### HTML DOM (or Document Object Model)
---
From the World Wide Web Consortium (W3C),
> "*The W3C Document Object Model (DOM) is a platform and language-neutral interface that allows programs and scripts to dynamically access and update the content, structure, and style of a document.*"
Everytime a web page is loaded in the browser, it creates a **D**ocument **O**bject **M**odel of the page. It essentially treats the HTML (or XML) document as a tree structure and the different HTML elements are represented as nodes and objects.
More broadly, it is a programming interface for HTML and XML documents and can be considered as the object-oriented representation of a web page which can be modified with a scripting language like JavaScript.
It also provides us with a rich visual representation of how the different elements interact and inform us about their relative position within the tree. This helps us find and target crucial **tags**, **id** or **classes** within the document and extract the same. To sumarize, DOM is a standard which allows us to :
- **get**
- **change**
- **add**, or
- **delete**
HTML elements. Here we will be primarily interested in accessing and getting the data as opposed to manipulation of the document itself.
Let's look at the DOM for the HTML from our previous example below

The next question then is : How do we access the source code or DOM of **any** web page on the internet?
#### DOM inspector and `F12` to the rescue!
To inspect individual elements within a web page, we can simply use the DOM inspector (or its variants) that comes with every browser.
- Easiest way to access the source code of any web page is through the console by clicking **F12**
- Alternatively, we can right-click on a specific element in the webpage and select **inspect** or **inspect element** from the dropdown. This is especially useful in cases where we want to target a specific piece of data present within some HTML element.
- It helps highlight different attributes, properties and styles within the HTML
- It is known as **DOM inspector** and **Developers Tools** in Firefox and Chrome respectively.
> Note : Some webpages prohibit right-click and in those cases we might have to resort to inspecting the source code via F12.
A Google Chrome window along with the developer console accessed though **F12** (found under **Developers Tool**) below

### References
- https://xkcd.com/2054/
- https://developer.mozilla.org/en-US/docs/Web/API/Document_Object_Model/Introduction
- https://en.wikipedia.org/wiki/Document_Object_Model
- https://www.w3schools.com/html/
- https://www.w3schools.com/js/js_htmldom.asp
|
github_jupyter
|
"The latest figures showed that webscraper INC saw a 120% increase in their revenue bringing their market cap to 2 Billion Dollars. This could be attributed to their new policies."
<company>
<name> webscraper INC</name>
<revenue> 120% </revenue>
<marketcap>2 billion </marketcap>
</company>
{
'company_name' : 'webscraper INC',
'revenue_in_%)' : 120,
'market_cap' : '2 billion USD'
}
<!DOCTYPE html>
<html>
<head>
<title>My Title</title>
</head>
<body>
<h1>A Heading</h1>
<a href="#">Link text</a>
</body>
</html>
<note>
<date>2015-09-01</date>
<hour>08:30</hour>
<to>Tove</to>
<from>Jani</from>
<body>Don't forget me this weekend!</body>
</note>
| 0.252753 | 0.951459 |
```
%matplotlib inline
```
Source localization with MNE/dSPM/sLORETA
=========================================
The aim of this tutorials is to teach you how to compute and apply a linear
inverse method such as MNE/dSPM/sLORETA on evoked/raw/epochs data.
```
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
```
Process MEG data
```
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False)
raw.set_eeg_reference() # set EEG average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_r=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
exclude='bads')
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, reject=reject, add_eeg_ref=False)
```
Compute regularized noise covariance
------------------------------------
For more details see `tut_compute_covariance`.
```
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'])
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
```
Compute the evoked response
---------------------------
```
evoked = epochs.average()
evoked.plot()
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
# Show whitening
evoked.plot_white(noise_cov)
```
Inverse modeling: MNE/dSPM on evoked and raw data
-------------------------------------------------
```
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Restrict forward solution as necessary for MEG
fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator)
```
Compute inverse solution
------------------------
```
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
del fwd, inverse_operator, epochs # to save memory
```
Visualization
-------------
View activation time-series
```
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
```
Here we use peak getter to move visualization to the time point of the peak
and draw a marker at the maximum peak vertex.
```
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.show_view('lateral')
```
Morph data to average brain
---------------------------
```
fs_vertices = [np.arange(10242)] * 2
morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertices,
fs_vertices, smooth=None,
subjects_dir=subjects_dir)
stc_fsaverage = stc.morph_precomputed('fsaverage', fs_vertices, morph_mat)
brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain_fsaverage.show_view('lateral')
```
Exercise
--------
- By changing the method parameter to 'sloreta' recompute the source
estimates using the sLORETA method.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False)
raw.set_eeg_reference() # set EEG average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_r=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
exclude='bads')
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=baseline, reject=reject, add_eeg_ref=False)
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'])
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
evoked = epochs.average()
evoked.plot()
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
# Show whitening
evoked.plot_white(noise_cov)
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Restrict forward solution as necessary for MEG
fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator)
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
del fwd, inverse_operator, epochs # to save memory
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.show_view('lateral')
fs_vertices = [np.arange(10242)] * 2
morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertices,
fs_vertices, smooth=None,
subjects_dir=subjects_dir)
stc_fsaverage = stc.morph_precomputed('fsaverage', fs_vertices, morph_mat)
brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]),
initial_time=time_max, time_unit='s')
brain_fsaverage.show_view('lateral')
| 0.583678 | 0.906529 |
# Scapy
## basic
**Import scapy**
```
from scapy.all import *
```
**How to build packets**
```
IP()
IP(dst="10.0.2.4")
```
**Packet composed over two layers (IP - layer 3, TCP - layer 4). / operator is used to compose over layers.**
```
p = IP(dst="10.0.2.4")/TCP()/"Hello world!"
p
hexdump(p)
```
**Lets move for a while to the second layer:**
```
p = Ether()/IP(dst="10.0.2.4")/TCP()/"Hello world!"
p
ls(p)
str(p)
p.show()
```
**How to modify packets:**
```
p[IP].src
p[IP].src='10.0.2.99'
p.show()
```
## sending and receiving packets - ICMP example
**Create IP packet with ICMP protocol**
* dst - is destination address, or addresses (please note that example uses range from 1 to 10)
* timeout - in second how long scapy should wait after last sent packet
* "Hello world" - packet payload
**Respone will be tuple with two elements:**
* answered packets
* unanswered packets
**To just sent packet use send command**
```
send(IP(dst="10.0.2.5")/ICMP()/"Hello world!")
```
T**o send and receive one received packet use sr1 method**
```
ans = sr1(IP(dst="10.0.2.4")/ICMP()/"Hello world!")
ans.summary()
ans.show()
```
**To send and receive all packets use sr method**
```
ans,unans=sr(IP(dst="10.0.2.1-10")/ICMP()/"Hello world!",timeout=2)
ans.summary()
```
**Let's make some changes in the request:**
* src - it's source address
* ttl - time to live
```
ans,unans=sr(IP(src="10.0.2.2",dst="10.0.2.4",ttl=128)/ICMP()/"Hello world!",timeout=2)
ans.summary()
```
**Source address has been changed to other machine thus we do not receive any response - guess who received ;)**
**Let's display all unanswered packets.**
```
unans.summary()
```
**Not enough, OK, we can also send ping reply in behalf of 10.0.2.4**
**ICMP type 0 means - echo reply, let's create such packet**
```
send(IP(src="10.0.2.4", dst="10.0.2.2", ttl=128)/ICMP(type=0)/"HelloWorld")
```
## Simple port scanner
**Let's create basic TCP/IP packet**
```
ans,unans = sr(IP(dst="10.0.2.4")/TCP(dport=23))
ans.summary()
ans,unans = sr(IP(dst="10.0.2.4")/TCP(dport=678))
ans.summary()
```
**Please note response flags:**
* port 23 - response is SA which means SYN-ACK (port open)
* port 678 - response is RA which means RESET-ACK (port closed)
**Would you like to have more control over sent packet, no problem:**
* sport - source port
* dport - list of ports instead of one port!
* flag - request flag (here S - SYN)
```
ans,unans = sr(IP(dst="10.0.2.4")/TCP(sport=777,dport=[23,80,10000],flags="S"))
ans.summary()
```
**OK, maybe is worth to add sport randomized + some retries?**
* sport - RandShort(), random source port
* flags - S = Syn
* inter - time interval between two packets,
* retry - number of retries
* timeout - how long scapy should wait after last sent packet
```
ans,unans = sr(IP(dst="10.0.2.4")/TCP(sport=RandShort(),dport=80,flags="S"),inter=0.5,retry=2,timeout=1)
ans.summary()
```
## ARP ping
```
arping("10.0.2.*")
```
## TCP ping
```
ans,unans=sr(IP(dst="10.0.2.0-10")/TCP(dport=80, flags="S"),timeout=4)
ans.summary()
```
## UDP ping
```
ans,unans=sr(IP(dst="10.0.2.0-10")/UDP(dport=1))
ans.summary()
```
## Traceroute
```
traceroute(["www.google.com"], maxttl=20)
```
## DNS query
**First of all check what can be set for DNS question record**
```
DNSQR().show()
DNS().show()
```
**DNS is UDP packet, so create IP()/UDP()/DNS() packet**
```
ans = sr1(IP(dst="8.8.8.8")/UDP()/DNS(rd=1,qd=DNSQR(qname="www.google.com")),timeout=5)
ans.show()
```
## Sniffing
```
def printPacket(p):
destAddress = p[IP].dst
sourceAddress = p[IP].src
load = ''
if Raw in p:
load = p[Raw].load
print('{} -> {}:{}'.format(sourceAddress, destAddress, load))
sniff(filter='tcp and port 21',prn=printPacket,count=10)
```
## Writing packages to a pcap file
```
def writePacket(p):
wrpcap('scapy_example.pcap',p,append=True)
sniff(filter='tcp and port 21',prn=writePacket,count=10)
```
|
github_jupyter
|
from scapy.all import *
IP()
IP(dst="10.0.2.4")
p = IP(dst="10.0.2.4")/TCP()/"Hello world!"
p
hexdump(p)
p = Ether()/IP(dst="10.0.2.4")/TCP()/"Hello world!"
p
ls(p)
str(p)
p.show()
p[IP].src
p[IP].src='10.0.2.99'
p.show()
send(IP(dst="10.0.2.5")/ICMP()/"Hello world!")
ans = sr1(IP(dst="10.0.2.4")/ICMP()/"Hello world!")
ans.summary()
ans.show()
ans,unans=sr(IP(dst="10.0.2.1-10")/ICMP()/"Hello world!",timeout=2)
ans.summary()
ans,unans=sr(IP(src="10.0.2.2",dst="10.0.2.4",ttl=128)/ICMP()/"Hello world!",timeout=2)
ans.summary()
unans.summary()
send(IP(src="10.0.2.4", dst="10.0.2.2", ttl=128)/ICMP(type=0)/"HelloWorld")
ans,unans = sr(IP(dst="10.0.2.4")/TCP(dport=23))
ans.summary()
ans,unans = sr(IP(dst="10.0.2.4")/TCP(dport=678))
ans.summary()
ans,unans = sr(IP(dst="10.0.2.4")/TCP(sport=777,dport=[23,80,10000],flags="S"))
ans.summary()
ans,unans = sr(IP(dst="10.0.2.4")/TCP(sport=RandShort(),dport=80,flags="S"),inter=0.5,retry=2,timeout=1)
ans.summary()
arping("10.0.2.*")
ans,unans=sr(IP(dst="10.0.2.0-10")/TCP(dport=80, flags="S"),timeout=4)
ans.summary()
ans,unans=sr(IP(dst="10.0.2.0-10")/UDP(dport=1))
ans.summary()
traceroute(["www.google.com"], maxttl=20)
DNSQR().show()
DNS().show()
ans = sr1(IP(dst="8.8.8.8")/UDP()/DNS(rd=1,qd=DNSQR(qname="www.google.com")),timeout=5)
ans.show()
def printPacket(p):
destAddress = p[IP].dst
sourceAddress = p[IP].src
load = ''
if Raw in p:
load = p[Raw].load
print('{} -> {}:{}'.format(sourceAddress, destAddress, load))
sniff(filter='tcp and port 21',prn=printPacket,count=10)
def writePacket(p):
wrpcap('scapy_example.pcap',p,append=True)
sniff(filter='tcp and port 21',prn=writePacket,count=10)
| 0.222531 | 0.882479 |
# Large-Scale Stochastic Variational GP Regression in 1D (w/ KISS-GP)
## Introduction
This example shows how to perform GP regression, but using **variational inference** rather than exact inference. There are a few cases where variational inference may be prefereable:
1) If you have lots of data, and want to perform **stochastic optimization**
2) If you have a model where you want to use other variational distributions
KISS-GP with SVI was introduced in:
https://papers.nips.cc/paper/6426-stochastic-variational-deep-kernel-learning.pdf
```
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
# Create a training set
# We're going to learn a sine function
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi)) + torch.randn(train_x.size()) * 0.2
```
## Performing SGD - the dataloader
Because we want to do stochastic optimization, we have to put the dataset in a pytorch **DataLoader**.
This creates easy minibatches of the data
```
from torch.utils.data import TensorDataset, DataLoader
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
```
## The model
This is pretty similar to a normal regression model, except now we're using a `gpytorch.models.GridInducingVariationalGP` instead of a `gpytorch.models.ExactGP`.
Any of the variational models would work. We're using the `GridInducingVariationalGP` because we have many data points, but only 1 dimensional data.
Similar to exact regression, we use a `GaussianLikelihood`.
```
class GPRegressionModel(gpytorch.models.GridInducingVariationalGP):
def __init__(self):
super(GPRegressionModel, self).__init__(grid_size=20, grid_bounds=[(-0.05, 1.05)])
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(
math.exp(-3), math.exp(6), sigma=0.1, transform=torch.exp
)
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
model = GPRegressionModel()
likelihood = gpytorch.likelihoods.GaussianLikelihood()
```
## The training loop
This training loop will use **stochastic optimization** rather than batch optimization
```
model.train()
likelihood.train()
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': likelihood.parameters()},
], lr=0.01)
# Our loss object
# We're using the VariationalMarginalLogLikelihood object
mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=train_y.numel())
# The training loop
def train(n_epochs=20):
# We use a Learning rate scheduler from PyTorch to lower the learning rate during optimization
# We're going to drop the learning rate by 1/10 after 3/4 of training
# This helps the model converge to a minimum
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.75 * n_epochs], gamma=0.1)
for i in range(n_epochs):
scheduler.step()
# Within each iteration, we will go over each minibatch of data
for x_batch, y_batch in train_loader:
optimizer.zero_grad()
output = model(x_batch)
loss = -mll(output, y_batch)
# The actual optimization step
loss.backward()
optimizer.step()
print('Epoch %d/%d - Loss: %.3f' % (i + 1, n_epochs, loss.item()))
%time train()
```
## Testing the model
```
model.eval()
likelihood.eval()
test_x = torch.linspace(0, 1, 51)
test_y = torch.sin(test_x * (4 * math.pi))
with torch.no_grad():
observed_pred = likelihood(model(test_x))
lower, upper = observed_pred.confidence_region()
fig, ax = plt.subplots(1, 1, figsize=(4, 3))
ax.plot(test_x.detach().cpu().numpy(), test_y.detach().cpu().numpy(), 'k*')
ax.plot(test_x.detach().cpu().numpy(), observed_pred.mean.detach().cpu().numpy(), 'b')
ax.fill_between(test_x.detach().cpu().numpy(), lower.detach().cpu().numpy(), upper.detach().cpu().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
```
|
github_jupyter
|
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
%matplotlib inline
# Create a training set
# We're going to learn a sine function
train_x = torch.linspace(0, 1, 1000)
train_y = torch.sin(train_x * (4 * math.pi)) + torch.randn(train_x.size()) * 0.2
from torch.utils.data import TensorDataset, DataLoader
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
class GPRegressionModel(gpytorch.models.GridInducingVariationalGP):
def __init__(self):
super(GPRegressionModel, self).__init__(grid_size=20, grid_bounds=[(-0.05, 1.05)])
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(
math.exp(-3), math.exp(6), sigma=0.1, transform=torch.exp
)
)
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
model = GPRegressionModel()
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model.train()
likelihood.train()
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': likelihood.parameters()},
], lr=0.01)
# Our loss object
# We're using the VariationalMarginalLogLikelihood object
mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=train_y.numel())
# The training loop
def train(n_epochs=20):
# We use a Learning rate scheduler from PyTorch to lower the learning rate during optimization
# We're going to drop the learning rate by 1/10 after 3/4 of training
# This helps the model converge to a minimum
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[0.75 * n_epochs], gamma=0.1)
for i in range(n_epochs):
scheduler.step()
# Within each iteration, we will go over each minibatch of data
for x_batch, y_batch in train_loader:
optimizer.zero_grad()
output = model(x_batch)
loss = -mll(output, y_batch)
# The actual optimization step
loss.backward()
optimizer.step()
print('Epoch %d/%d - Loss: %.3f' % (i + 1, n_epochs, loss.item()))
%time train()
model.eval()
likelihood.eval()
test_x = torch.linspace(0, 1, 51)
test_y = torch.sin(test_x * (4 * math.pi))
with torch.no_grad():
observed_pred = likelihood(model(test_x))
lower, upper = observed_pred.confidence_region()
fig, ax = plt.subplots(1, 1, figsize=(4, 3))
ax.plot(test_x.detach().cpu().numpy(), test_y.detach().cpu().numpy(), 'k*')
ax.plot(test_x.detach().cpu().numpy(), observed_pred.mean.detach().cpu().numpy(), 'b')
ax.fill_between(test_x.detach().cpu().numpy(), lower.detach().cpu().numpy(), upper.detach().cpu().numpy(), alpha=0.5)
ax.set_ylim([-3, 3])
ax.legend(['Observed Data', 'Mean', 'Confidence'])
| 0.915663 | 0.990178 |
```
import pandas
import matplotlib as mpl
import xarray as xr
import numpy as np
import datetime as dt
import os.path
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
dir_data_clim='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/clim/'
def sss_filename(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'subset_mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new_yearly(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'year_subset_mercatorglorys12v1_gl12_mean_' + syr + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new_monthly(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'subset_mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
d2 = d + dt.timedelta(days = +2)
syr2, smon2, sdym2 =str(d2.year).zfill(4), str(d2.month).zfill(2), str(d2.day).zfill(2)
fname_tem = 'metoffice_coupled_orca025_GL4_SAL_b' + syr2 + smon2 + sdym2 + '_dm' + syr + smon + sdym + '.nc'
filename = dir_data + fname_tem
return filename
def ssh_nrt_filename(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
d2 = d + dt.timedelta(days = +2)
syr2, smon2, sdym2 =str(d2.year).zfill(4), str(d2.month).zfill(2), str(d2.day).zfill(2)
fname_tem = 'metoffice_coupled_orca025_GL4_SSH_b' + syr2 + smon2 + sdym2 + '_dm' + syr + smon + sdym + '.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/monthly_subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + smon + '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new_monthly(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + smon+ '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new_yearly(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
#get 0.25 km grid from cmc data to downsamplt this
filename = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/1994/002/19940102120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
ds_v2 = xr.open_dataset(filename)
new_lat = np.linspace(ds_v2.lat[0], ds_v2.lat[-1], ds_v2.dims['lat'])
new_lon = np.linspace(ds_v2.lon[0], ds_v2.lon[-1], ds_v2.dims['lon'])
ds_v2.close()
#MONTHLY files are huge becauseo of depth layers, so lower spatial resolution and only save surface data
for lyr in range(2018,2019): #2017):
for imon in range(1,13): #3,13):
d = dt.date(lyr,imon,1)
filename = sss_filename(d)
if os.path.exists(filename):
ds = xr.open_dataset(filename)
ds_subset = ds.sel(depth = ds.depth[0].data)
ds_low_res = ds_subset.interp(latitude = new_lat,longitude = new_lon)
ds.close()
filename_new = sss_filename_new(d)
ds_low_res.to_netcdf(filename_new)
os.remove(filename)
lyr, idyjl = 2018, 1
d = dt.date(lyr,1,1) + dt.timedelta(idyjl - 1)
filename = sss_nrt_filename(d)
print(filename)
ds = xr.open_dataset(filename)
ds.close()
ds = ds.mean('depth')
ds
filename = ssh_nrt_filename(d)
print(filename)
ds2 = xr.open_dataset(filename)
ds2.close()
ds2
ds['zos']=ds2['zos']
ds
#Daily NRT files are huge becauseo of depth layers, so lower spatial resolution and only save surface data
#also put into monthly file
file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/2017/year_subset_mercatorglorys12v1_gl12_mean_2017.nc'
ds1 = xr.open_dataset(file1)
ds1.close()
for lyr in range(2018,2019):
ds_mnth=[]
for imon in range(1,13): #:13):
init = 0
for idyjl in range(1,366):
d = dt.date(lyr,1,1) + dt.timedelta(idyjl - 1)
dd=dt.datetime(lyr,1,1) + dt.timedelta(idyjl - 1)
if d.month!=imon:
continue
filename = sss_nrt_filename(d)
ds = xr.open_dataset(filename)
ds = ds.drop('time').mean('time')
ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)).sortby('lon').sortby('lat')
#ds['time']=dd
ds.assign_coords(time=dd)
ds.expand_dims('time')
ds_subset = ds.sel(depth = ds.depth[0].data)
ds.close()
filename = ssh_nrt_filename(d)
ds_tem = xr.open_dataset(filename)
ds_tem = ds_tem.drop('time').mean('time')
ds_tem = ds_tem.assign_coords(lon=(((ds_tem.lon + 180) % 360) - 180)).sortby('lon').sortby('lat')
#ds_tem['time']=dd
ds_tem.assign_coords(time=dd)
ds_tem.expand_dims('time')
ds_subset['zos']=ds_tem['zos']
ds_tem.close()
ds_low_res = ds_subset.interp(lat = new_lat,lon = new_lon)
print(ds_low_res)
if init==0:
ds_sum = ds_low_res
init = 1
else:
ds_sum = xr.concat([ds_sum,ds_low_res],dim = 'time')
print(idyjl,ds_sum.dims)
#ds_clim2 = ds_sum.resample(time='M').mean()
ds_clim2 = ds_sum.mean('time',keep_attrs=True)
#ds_clim2.assign_coords(time=dd.month)
ds_clim2.expand_dims('time',0)
dd=dt.datetime(lyr,imon,1)
ds_clim2.coords['time']=ds1.time[imon-1].values+np.timedelta64(365,'D')
# ds_clim2 = ds_sum.groupby('time.month').mean('time')
#ds_sum = ds_sum.mean('time',skipna=True)
ds_mnth.append(ds_clim2)
d = dt.date(lyr,imon,1)
filename_month = sss_nrt_filename_new_monthly(d)
print('out:',filename_month)
ds_clim2.to_netcdf(filename_month)
combined = xr.concat(ds_mnth, dim='time')
filename_new = sss_nrt_filename_new_yearly(d)
combined.to_netcdf(filename_new)
d = dt.date(2018,12,1)
fname = sss_nrt_filename_new_yearly(d)
print(fname)
ds = xr.open_dataset(fname)
d = dt.date(2017,12,1)
print(fname)
fname = sss_filename_new_yearly(d)
ds2 = xr.open_dataset(fname)
ds
ds2
file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/2017/year_subset_mercatorglorys12v1_gl12_mean_2017.nc'
ds1 = xr.open_dataset(file1)
ds1.close()
print(ds1.time[0])
print(ds1.time[0].values+np.timedelta64(days=365))
print(filename_new)
print(combined)
combined.so[0,:,:].plot()
#make monthly average ssts into one yearly file
for lyr in range(2017,2018): #2017):
ds_mnth=[]
for imon in range(1,13):
d = dt.date(lyr,imon,1)
filename = sss_filename_new(d)
filename_new = sss_filename_new_yearly(d)
ds = xr.open_dataset(filename)
ds.close()
ds_mnth.append(ds)
combined = xr.concat(ds_mnth, dim='time')
combined = combined.rename({'longitude':'lon','latitude':'lat'})
combined.to_netcdf(filename_new)
#test nrt and reanalysis yearly files
#file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/1993/year_subset_mercatorglorys12v1_gl12_mean_1993.nc'
file2 = 'F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b2018_dm20180208.nc'
file2a = 'F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b2018_dm20180208a.nc'
#ds1 = xr.open_dataset(file1)
ds2 = xr.open_dataset(file2)
#ds2['time']=ds2.time.values+np.timedelta64(365,'D')
#ds1.close()
ds2.close()
#print(ds2)
#print(ds1)
print(ds2)
ds2.to_netcdf(file2a)
#print(dt.timedelta(days=25))
#print(ds1.time[0]+dt.timedelta(days=25))
#d=ds1['time'].values
#d[0].time
ds2
print(ds2.time[0].values,ds2.time[0].values+np.timedelta64(days=365))
ds2
ds1.so[0,:,:].plot(vmin=30,vmax=35)
ds2.so[0,:,:].plot(vmin=30,vmax=35)
#calculate climatology
for icase in range(0,3):
if icase==0:
iyr1,iyr2 = 1993,2000
if icase==1:
iyr1,iyr2 = 2000,2010
if icase==2:
iyr1,iyr2 = 2010,2019
init = 0
for lyr in range(iyr1,iyr2):
d = dt.date(lyr,1,1)
filename = sss_filename_new_yearly(d)
if lyr == 2018:
filename = sss_nrt_filename_new_yearly(d)
ds = xr.open_dataset(filename,drop_variables={'mlotst','bottomT','sithick','siconc','usi','vsi','thetao','uo','vo'})
if init==0:
ds_sum = ds
init = 1
else:
ds_sum = xr.concat([ds_sum,ds],dim = 'time')
print(lyr,ds_sum.dims)
ds_sum2 = ds_sum.groupby('time.month').mean('time',keep_attrs=True)
fname_tem=dir_data_clim + 'climatology_'+str(iyr1)+'_'+str(iyr2-1)+'_mercatorglorys12v1_gl12_mean.nc'
ds_sum2.to_netcdf(fname_tem)
num_year = 2018-1993+1
num_year_file1 = 1999 - 1993 + 1
num_year_file2 = 2009 - 2000 + 1
num_year_file3 = 2018 - 2010 + 1
frac_file1 = num_year_file1 / num_year
frac_file2 = num_year_file2 / num_year
frac_file3 = num_year_file3 / num_year
print(frac_file1+frac_file2+frac_file3,frac_file1,frac_file2,frac_file3)
fname_tem = dir_data_clim + 'climatology_1993_1999_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds = xr.open_dataset(fname_tem)
ds.close()
fname_tem = dir_data_clim + 'climatology_2000_2009_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds2 = xr.open_dataset(fname_tem)
ds2.close()
fname_tem = dir_data_clim + 'climatology_2010_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds3 = xr.open_dataset(fname_tem)
ds3.close()
ds_ave = frac_file1*ds + frac_file2*ds2 + frac_file3*ds3
fname_tem = dir_data_clim + 'climatology_1993_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds_ave.to_netcdf(fname_tem)
num_year = 2018-2000+1
#num_year_file1 = 1999 - 1993 + 1
num_year_file2 = 2009 - 2000 + 1
num_year_file3 = 2018 - 2010 + 1
#frac_file1 = num_year_file1 / num_year
frac_file2 = num_year_file2 / num_year
frac_file3 = num_year_file3 / num_year
print(frac_file2+frac_file3,frac_file2,frac_file3)
#fname_tem = dir_data_clim + 'climatology_1993_1999_mercatorglorys12v1_gl12_mean.nc'
#print(fname_tem)
#ds = xr.open_dataset(fname_tem)
#ds.close()
fname_tem = dir_data_clim + 'climatology_2000_2009_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds2 = xr.open_dataset(fname_tem)
ds2.close()
fname_tem = dir_data_clim + 'climatology_2010_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds3 = xr.open_dataset(fname_tem)
ds3.close()
print(frac_file2+frac_file3,frac_file2,frac_file3)
ds_ave = frac_file2*ds2 + frac_file3*ds3
fname_tem = dir_data_clim + 'climatology_2000_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds_ave.to_netcdf(fname_tem)
ds_ave
#put clim together into one
fname_tem='climatology_monthly_1993_1998_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = ds
ds.close()
fname_tem='climatology_monthly_1999_2004_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
fname_tem='climatology_monthly_2005_2010_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
fname_tem='climatology_monthly_2011_2017_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
ds_sum2 = ds_sum.groupby('month').mean('month')
#ds_sum2.rename({'longitude':'lon','latitude':'lat'}, inplace = True)
fname_tem='climatology_monthly_1993_2017_mercatorglorys12v1_gl12_mean.nc'
filename_out = dir_data_clim + fname_tem
ds_sum2.to_netcdf(filename_out)
ds_sum2
#change clim to lat and lon rather than latitude and longitude
fname_tem='climatology_monthly_1993_2016_mercatorglorys12v1_gl12_mean.nc'
filename_out = dir_data_clim + fname_tem
ds=xr.open_dataset(filename_out)
#ds.rename({'longitude':'lon','latitude':'lat'}, inplace = True)
print(ds)
#ds.to_netcdf(filename_out)
ds.close()
ds_sum
```
|
github_jupyter
|
import pandas
import matplotlib as mpl
import xarray as xr
import numpy as np
import datetime as dt
import os.path
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
dir_data_clim='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/clim/'
def sss_filename(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'subset_mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new_yearly(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'year_subset_mercatorglorys12v1_gl12_mean_' + syr + '.nc'
filename = dir_data + fname_tem
return filename
def sss_filename_new_monthly(d):
dir_data='F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem=syr + '/'+ 'subset_mercatorglorys12v1_gl12_mean_' + syr + smon + '.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
d2 = d + dt.timedelta(days = +2)
syr2, smon2, sdym2 =str(d2.year).zfill(4), str(d2.month).zfill(2), str(d2.day).zfill(2)
fname_tem = 'metoffice_coupled_orca025_GL4_SAL_b' + syr2 + smon2 + sdym2 + '_dm' + syr + smon + sdym + '.nc'
filename = dir_data + fname_tem
return filename
def ssh_nrt_filename(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
d2 = d + dt.timedelta(days = +2)
syr2, smon2, sdym2 =str(d2.year).zfill(4), str(d2.month).zfill(2), str(d2.day).zfill(2)
fname_tem = 'metoffice_coupled_orca025_GL4_SSH_b' + syr2 + smon2 + sdym2 + '_dm' + syr + smon + sdym + '.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/monthly_subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + smon + '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new_monthly(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + smon+ '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
def sss_nrt_filename_new_yearly(d):
dir_data='F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/'
syr, smon, sdym =str(d.year).zfill(4), str(d.month).zfill(2), str(d.day).zfill(2)
fname_tem = 'monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b' + syr + '_dm20180208.nc'
filename = dir_data + fname_tem
return filename
#get 0.25 km grid from cmc data to downsamplt this
filename = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/1994/002/19940102120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
ds_v2 = xr.open_dataset(filename)
new_lat = np.linspace(ds_v2.lat[0], ds_v2.lat[-1], ds_v2.dims['lat'])
new_lon = np.linspace(ds_v2.lon[0], ds_v2.lon[-1], ds_v2.dims['lon'])
ds_v2.close()
#MONTHLY files are huge becauseo of depth layers, so lower spatial resolution and only save surface data
for lyr in range(2018,2019): #2017):
for imon in range(1,13): #3,13):
d = dt.date(lyr,imon,1)
filename = sss_filename(d)
if os.path.exists(filename):
ds = xr.open_dataset(filename)
ds_subset = ds.sel(depth = ds.depth[0].data)
ds_low_res = ds_subset.interp(latitude = new_lat,longitude = new_lon)
ds.close()
filename_new = sss_filename_new(d)
ds_low_res.to_netcdf(filename_new)
os.remove(filename)
lyr, idyjl = 2018, 1
d = dt.date(lyr,1,1) + dt.timedelta(idyjl - 1)
filename = sss_nrt_filename(d)
print(filename)
ds = xr.open_dataset(filename)
ds.close()
ds = ds.mean('depth')
ds
filename = ssh_nrt_filename(d)
print(filename)
ds2 = xr.open_dataset(filename)
ds2.close()
ds2
ds['zos']=ds2['zos']
ds
#Daily NRT files are huge becauseo of depth layers, so lower spatial resolution and only save surface data
#also put into monthly file
file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/2017/year_subset_mercatorglorys12v1_gl12_mean_2017.nc'
ds1 = xr.open_dataset(file1)
ds1.close()
for lyr in range(2018,2019):
ds_mnth=[]
for imon in range(1,13): #:13):
init = 0
for idyjl in range(1,366):
d = dt.date(lyr,1,1) + dt.timedelta(idyjl - 1)
dd=dt.datetime(lyr,1,1) + dt.timedelta(idyjl - 1)
if d.month!=imon:
continue
filename = sss_nrt_filename(d)
ds = xr.open_dataset(filename)
ds = ds.drop('time').mean('time')
ds = ds.assign_coords(lon=(((ds.lon + 180) % 360) - 180)).sortby('lon').sortby('lat')
#ds['time']=dd
ds.assign_coords(time=dd)
ds.expand_dims('time')
ds_subset = ds.sel(depth = ds.depth[0].data)
ds.close()
filename = ssh_nrt_filename(d)
ds_tem = xr.open_dataset(filename)
ds_tem = ds_tem.drop('time').mean('time')
ds_tem = ds_tem.assign_coords(lon=(((ds_tem.lon + 180) % 360) - 180)).sortby('lon').sortby('lat')
#ds_tem['time']=dd
ds_tem.assign_coords(time=dd)
ds_tem.expand_dims('time')
ds_subset['zos']=ds_tem['zos']
ds_tem.close()
ds_low_res = ds_subset.interp(lat = new_lat,lon = new_lon)
print(ds_low_res)
if init==0:
ds_sum = ds_low_res
init = 1
else:
ds_sum = xr.concat([ds_sum,ds_low_res],dim = 'time')
print(idyjl,ds_sum.dims)
#ds_clim2 = ds_sum.resample(time='M').mean()
ds_clim2 = ds_sum.mean('time',keep_attrs=True)
#ds_clim2.assign_coords(time=dd.month)
ds_clim2.expand_dims('time',0)
dd=dt.datetime(lyr,imon,1)
ds_clim2.coords['time']=ds1.time[imon-1].values+np.timedelta64(365,'D')
# ds_clim2 = ds_sum.groupby('time.month').mean('time')
#ds_sum = ds_sum.mean('time',skipna=True)
ds_mnth.append(ds_clim2)
d = dt.date(lyr,imon,1)
filename_month = sss_nrt_filename_new_monthly(d)
print('out:',filename_month)
ds_clim2.to_netcdf(filename_month)
combined = xr.concat(ds_mnth, dim='time')
filename_new = sss_nrt_filename_new_yearly(d)
combined.to_netcdf(filename_new)
d = dt.date(2018,12,1)
fname = sss_nrt_filename_new_yearly(d)
print(fname)
ds = xr.open_dataset(fname)
d = dt.date(2017,12,1)
print(fname)
fname = sss_filename_new_yearly(d)
ds2 = xr.open_dataset(fname)
ds
ds2
file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/2017/year_subset_mercatorglorys12v1_gl12_mean_2017.nc'
ds1 = xr.open_dataset(file1)
ds1.close()
print(ds1.time[0])
print(ds1.time[0].values+np.timedelta64(days=365))
print(filename_new)
print(combined)
combined.so[0,:,:].plot()
#make monthly average ssts into one yearly file
for lyr in range(2017,2018): #2017):
ds_mnth=[]
for imon in range(1,13):
d = dt.date(lyr,imon,1)
filename = sss_filename_new(d)
filename_new = sss_filename_new_yearly(d)
ds = xr.open_dataset(filename)
ds.close()
ds_mnth.append(ds)
combined = xr.concat(ds_mnth, dim='time')
combined = combined.rename({'longitude':'lon','latitude':'lat'})
combined.to_netcdf(filename_new)
#test nrt and reanalysis yearly files
#file1 = 'F:/data/model_data/CMEM/global-reanalysis-phy-001-030-monthly/1993/year_subset_mercatorglorys12v1_gl12_mean_1993.nc'
file2 = 'F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b2018_dm20180208.nc'
file2a = 'F:/data/model_data/CMEM/global-analysis-forecast-phys_001_015/monthly/year_subset_metoffice_coupled_orca025_GL4_SAL_b2018_dm20180208a.nc'
#ds1 = xr.open_dataset(file1)
ds2 = xr.open_dataset(file2)
#ds2['time']=ds2.time.values+np.timedelta64(365,'D')
#ds1.close()
ds2.close()
#print(ds2)
#print(ds1)
print(ds2)
ds2.to_netcdf(file2a)
#print(dt.timedelta(days=25))
#print(ds1.time[0]+dt.timedelta(days=25))
#d=ds1['time'].values
#d[0].time
ds2
print(ds2.time[0].values,ds2.time[0].values+np.timedelta64(days=365))
ds2
ds1.so[0,:,:].plot(vmin=30,vmax=35)
ds2.so[0,:,:].plot(vmin=30,vmax=35)
#calculate climatology
for icase in range(0,3):
if icase==0:
iyr1,iyr2 = 1993,2000
if icase==1:
iyr1,iyr2 = 2000,2010
if icase==2:
iyr1,iyr2 = 2010,2019
init = 0
for lyr in range(iyr1,iyr2):
d = dt.date(lyr,1,1)
filename = sss_filename_new_yearly(d)
if lyr == 2018:
filename = sss_nrt_filename_new_yearly(d)
ds = xr.open_dataset(filename,drop_variables={'mlotst','bottomT','sithick','siconc','usi','vsi','thetao','uo','vo'})
if init==0:
ds_sum = ds
init = 1
else:
ds_sum = xr.concat([ds_sum,ds],dim = 'time')
print(lyr,ds_sum.dims)
ds_sum2 = ds_sum.groupby('time.month').mean('time',keep_attrs=True)
fname_tem=dir_data_clim + 'climatology_'+str(iyr1)+'_'+str(iyr2-1)+'_mercatorglorys12v1_gl12_mean.nc'
ds_sum2.to_netcdf(fname_tem)
num_year = 2018-1993+1
num_year_file1 = 1999 - 1993 + 1
num_year_file2 = 2009 - 2000 + 1
num_year_file3 = 2018 - 2010 + 1
frac_file1 = num_year_file1 / num_year
frac_file2 = num_year_file2 / num_year
frac_file3 = num_year_file3 / num_year
print(frac_file1+frac_file2+frac_file3,frac_file1,frac_file2,frac_file3)
fname_tem = dir_data_clim + 'climatology_1993_1999_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds = xr.open_dataset(fname_tem)
ds.close()
fname_tem = dir_data_clim + 'climatology_2000_2009_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds2 = xr.open_dataset(fname_tem)
ds2.close()
fname_tem = dir_data_clim + 'climatology_2010_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds3 = xr.open_dataset(fname_tem)
ds3.close()
ds_ave = frac_file1*ds + frac_file2*ds2 + frac_file3*ds3
fname_tem = dir_data_clim + 'climatology_1993_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds_ave.to_netcdf(fname_tem)
num_year = 2018-2000+1
#num_year_file1 = 1999 - 1993 + 1
num_year_file2 = 2009 - 2000 + 1
num_year_file3 = 2018 - 2010 + 1
#frac_file1 = num_year_file1 / num_year
frac_file2 = num_year_file2 / num_year
frac_file3 = num_year_file3 / num_year
print(frac_file2+frac_file3,frac_file2,frac_file3)
#fname_tem = dir_data_clim + 'climatology_1993_1999_mercatorglorys12v1_gl12_mean.nc'
#print(fname_tem)
#ds = xr.open_dataset(fname_tem)
#ds.close()
fname_tem = dir_data_clim + 'climatology_2000_2009_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds2 = xr.open_dataset(fname_tem)
ds2.close()
fname_tem = dir_data_clim + 'climatology_2010_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds3 = xr.open_dataset(fname_tem)
ds3.close()
print(frac_file2+frac_file3,frac_file2,frac_file3)
ds_ave = frac_file2*ds2 + frac_file3*ds3
fname_tem = dir_data_clim + 'climatology_2000_2018_mercatorglorys12v1_gl12_mean.nc'
print(fname_tem)
ds_ave.to_netcdf(fname_tem)
ds_ave
#put clim together into one
fname_tem='climatology_monthly_1993_1998_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = ds
ds.close()
fname_tem='climatology_monthly_1999_2004_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
fname_tem='climatology_monthly_2005_2010_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
fname_tem='climatology_monthly_2011_2017_mercatorglorys12v1_gl12_mean.nc'
filename = dir_data_clim + fname_tem
ds = xr.open_dataset(filename)
ds_sum = xr.concat([ds_sum,ds],dim = 'month')
ds.close()
ds_sum2 = ds_sum.groupby('month').mean('month')
#ds_sum2.rename({'longitude':'lon','latitude':'lat'}, inplace = True)
fname_tem='climatology_monthly_1993_2017_mercatorglorys12v1_gl12_mean.nc'
filename_out = dir_data_clim + fname_tem
ds_sum2.to_netcdf(filename_out)
ds_sum2
#change clim to lat and lon rather than latitude and longitude
fname_tem='climatology_monthly_1993_2016_mercatorglorys12v1_gl12_mean.nc'
filename_out = dir_data_clim + fname_tem
ds=xr.open_dataset(filename_out)
#ds.rename({'longitude':'lon','latitude':'lat'}, inplace = True)
print(ds)
#ds.to_netcdf(filename_out)
ds.close()
ds_sum
| 0.157752 | 0.277846 |
# Análise Exploratória de Dados dos passageiros do Titanic
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
```
## 1 - Importando e compreendendo
```
df = pd.read_csv('train.csv')
df.head()
```
Explicando as variáveis:
* PassengerID: Um ID do passageiro
* Survived: Se sobreviveu ou não ao desastre (1 - Sobreviveu, 0 - Não sobreviveu)
* Pclass: Classe da passagem (1a, 2a ou 3a)
* Name: Nome do passageiro
* Sex: Sexo do passageiro
* Age: Idade
* SibSp: Para crianças, número de irmãos abordo do navio. Para adultos, número de cônjuges abordo do navio
* Parch: Para crianças, número de pais abordo do navio. Para adultos, número de filhos abordo do navio.
* Ticket: Número do ticket
* Fare: tarifa do ticket
* Cabin: Número da cabine
* Embarked: Porto de embarque
Vamos trabalhar apenas com as variáveis **Passenger ID, Survived, Pclass, Sex, Age, SibSp, Parch**
```
df = df[['PassengerId', 'Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch']].set_index('PassengerId')
df.head()
```
## 2 - Tratando dados ausentes
```
df.count()
```
Pode-se observar que em algumas instâncias há idades faltantes.
Há duas possibilidades:
1. Excluir instâncias cuja idade está faltando
2. Tentar aproveitar essas instâncias preenchendo a idade com um valor coerente
Seguirei com a segunda estratégia.
Poderia simplesmente substituir a idade faltante das mulheres com a média da idade de todas as mulheres, e fazer o mesmo raciocínio para os homens. Porém acredito que é possível fazer melhor que isso. Usarei todas as variáveis para fazer o input das idades.
É preciso notar que **Survived, Pclass e Sex** são categóricas, porém **SibSp e Parch** não são, logo precisamos caregorizá-las separando-as em buckets.
Observe que se **SibSp** > 1, há um forte indicativo de que o indivíduo é uma criança, pois um adulto não pode ter mais de um cônjugue. Logo, separarei **SibSp** nas seguintes categorias: (0, 1, >1)
Se **Parch** > 2, há um forte indicativo de que o indivíduo é um adulto, pois uma criança não pode ter mais de 2 pais. Separarei **Parch** em: (0, 1-2, >2)
```
df['SibSp_cat'] = df['SibSp'].apply(lambda x: str(x) if x <= 1 else '>1').astype('category')
def parch_cat(dt):
if dt == 1 or dt == 2:
return '1-2'
elif dt > 2:
return '>2'
return str(dt)
df['Parch_cat'] = df['Parch'].apply(parch_cat).astype('category')
```
Vamos agora inputar as idades com a média de cada grupo
```
def preencher(dt):
dt['Age'] = dt['Age'].fillna(dt['Age'].mean())
return dt
df2 = df.groupby(['Survived', 'Pclass', 'Sex', 'SibSp_cat', 'Parch_cat'], group_keys=False).apply(preencher)
df2.count()
```
Após o processo, ainda há uma instância com dados faltando
```
df2[df2.Age.isnull()]
```
Pelos motivos explicados mais em cima, essa instância corresponde a uma criança, porém como **Parch** é 0 ela não estava viajando com os pais. Provavelmente estava viajando com uma babá, ou um dos seus dois irmãos era responsável pelo garoto. Como esse caso é raro, não tínhamos dados similares para estimar a idade do garoto na etapa anterior.
Vou estimá-la com a média da idade de homens abaixo de 14 anos da 3a classe que sobreviveram
```
df2.loc[302, 'Age'] = int(df2['Age'][(df2.Sex == 'male') & (df2.Survived==1) & (df2.Age <= 14) & (df2.Pclass==3)].mean())
```
Com os dados ausentes tratados, podemos prosseguir para a análise
## 3 - Análise
O foco principal da análise será como as variáveis afetam a probabilidade de sobrevivência. Para começar, vamos fazer uma análise univariada para compreender melhor a base de dados.
```
fig = plt.figure(figsize=(15,4))
axe = fig.subplots(1,4)
_ = df2.groupby('Sex',group_keys=False).size().reset_index()
sns.barplot(x='Sex', y=0, ax=axe[0],data=_)
axe[0].set_ylabel('')
_ = df2.groupby('Pclass',group_keys=False).size().reset_index()
sns.barplot(x='Pclass', y=0, ax=axe[1],data=_)
axe[1].set_ylabel('')
_ = df2.groupby('Pclass',group_keys=False).size().reset_index()
sns.barplot(x='Pclass', y=0, ax=axe[1],data=_)
axe[1].set_ylabel('')
_ = df2.groupby('Survived',group_keys=False).size().reset_index()
sns.barplot(x='Survived', y=0, ax=axe[2],data=_)
axe[2].set_ylabel('')
axe[2].set_xticklabels(['Não', 'Sim'])
sns.kdeplot(x='Age', ax=axe[3],data=df2, fill=True)
axe[3].set_ylabel('')
```
Vamos tornar a variável **Age** em categórica para facilitar a análise. As classes serão divididas de forma que contenham o mesmo número de indivíduos.
```
df2['Age_cat'] = pd.qcut(df2['Age'], 4, precision=0)
```
Para analisar o efeito de uma variável na probabilidade de sobrevivência precisamos manter as outras variáveis constantes e variar apenas a variável de interesse.
### 3.1 - Como o sexo do indivíduo e a classe da passagem influenciam na probabilidade de sobrevivência?
Não podemos simplesmente observar qual sexo tem a maior porcentagem de sobreviventes, pois outras variáveis podem estar interferindo. Ao analisar apenas a variável **Sex** não sabemos quanto da diferença na probabilidade de sobrevivência se deve às outras variáveis.
Como as variáveis são categóricas, para capturar o real efeito da variável **Sex**, precisamos analisar dentro de grupos em que as outras variáveis permanecem constante.
```
df3 = df2.groupby(['Pclass', 'Age_cat', 'Sex'], as_index=False)['Survived'].mean()
df3['Survived'] = df3['Survived']*100
df3.head()
sns.catplot(x='Pclass', y='Survived', hue='Sex', col='Age_cat', kind='point',data=df3)
```
#### Descobertas:
* Pode-se observar que em todos os casos mulheres têm uma maior chance de sobrevivência do que homens. Esse resultado é esperado, afinal é sabido que mulheres e crianças têm prioridade de resgate em casos de emergência.
* A porcentagem de sobreviventes em geral cai nas classes mais populares. A classe é uma variável proxy para a condição econômica, ou seja, indivíduos da primeira classe provavelmente são os mais ricos da embarcação e os da terceira classe os mais pobres do navio.
### 3.2 - Como a idade do indivíduo influencia na probabilidade de sobrevivência?
Utilizando o mesmo data frame do item anterior e apenas alterando o eixo.
```
sns.catplot(x='Age_cat', y='Survived', hue='Sex', col='Pclass',kind='point', data=df3)
```
Com esse gráfico não está muito claro o efeito da idade, talvez pelo fato de a idade ter sido categorizada. Vamos tentar analisá-la como uma variável contínua utilizando um boxplot.
```
df_t = df2.copy()
df_t['Survived'] = df_t['Survived'].apply(lambda x: ['Não', 'Sim'][x])
sns.catplot(x='Survived', y='Age', col='Pclass', row='Sex',kind='box', data=df_t)
```
A idade mediana dos sobreviventes é em geral menor, porém no grupo das mulheres da primeira classe há uma quebra desse padrão. Vamos investigar isso melhor, checando o número de indivíduos em cada grupo.
```
df2.groupby(['Pclass', 'Sex', 'Survived']).size()
```
Observe que nas classes 1 e 2, o número de mulheres que não sobreviveram é muito pequeno (3 e 6 respectivamente). Como a amostra é muito pequena, fica difícil tirar conclusões sobre a distribuição de idade nesses grupos. Por isso, vou escolher grupos em que há indivíduos suficientes que sobreviveram e não sobreviveram para observar o efeito da idade.
```
df_t = df2[np.logical_not((df['Sex'] == 'female') & np.isin(df2['Pclass'], [1,2]))].copy()
df_t['Survived'] = df_t['Survived'].apply(lambda x: ['Não', 'Sim'][x])
sns.displot(x='Age', hue='Survived', col='Pclass',row='Sex',kind='kde',data=df_t, common_norm=False, fill=True)
```
#### Descobertas:
* Nos grupos de sobreviventes parece haver uma concentração maior de pessoas mais novas. É possível observar nesse grupo uma crista próxima do 0, indicando uma concentração de crianças.
### 3.3 - Como a quantidade de parentes a bordo influencia na probabilidade de sobrevivência?
#### - Adultos
Para os adultos **Parch** representa o número de filhos a bordo do navio. Já **SibSp** representa o número de parceiros a bordo.
Criarei a variável **Child** (indicando se o indivíduo tem pelo menos um filho a bordo) e **Partner** (indicando se possui um cônjugue a bordo).
```
df_ad = df2[df2['Age'] > 21].copy()
df_ad['Child'] = df_ad['Parch'] > 0
df_ad['Partner'] = df_ad['SibSp'] > 0
```
Vou selecionar apenas a 3a classe, pois é a mais populosa.
```
df_ad3 = df_ad[df_ad['Pclass'] == 3].copy()
df_ad3.groupby(['Sex', 'Child', 'Partner'])['Survived'].agg([('Sobreviveram',lambda x: np.round(100*np.mean(x),2)),
('Tamanho',np.size)])
```
* Analisando a proporção de sobreviventes, para ambos os sexos a presença de filhos ou de um parceiro parece diminuir a probabilidade de sobrevivência. É complicado assumir isso como verdade, pois o tamanho das amostras são muito pequenas em alguns casos.
#### - Crianças
Para as crianças **Parch** representa o número de pais a bordo do navio. Já **SibSp** representa o número de irmãos a bordo.
Criarei a variável **Parents** (indicando se o indivíduo tem pelo menos um pai a bordo) e **Siblings** (indicando se possui pelo menos um irmão a bordo).
```
df_chil = df2[df2['Age'] < 18].copy()
df_chil['Parents'] = df_chil['Parch'] > 0
df_chil['Siblings'] = df_chil['SibSp'] > 0
```
Vou selecionar novamente apenas a 3a classe, pois é a mais populosa.
```
df_chil3 = df_chil[df_chil['Pclass'] == 3].copy()
df_chil3.groupby(['Sex', 'Parents', 'Siblings'])['Survived'].agg([('Sobreviveram',lambda x: np.round(100*np.mean(x),2)),
('Tamanho',np.size)])
```
As amostras de cada categoria são muito pequenas para tirar conclusões. Vou adotar outro caminho, analisarei a distribuição da quantidade de irmãos entre as crianças sobreviventes e as não sobreviventes.
```
df_t = df_chil3[df_chil3['Parents'] & df_chil3['Siblings']]
sns.displot(x='SibSp', hue='Survived',col='Sex',kind='kde',common_norm=False, fill=True, data=df_t)
```
* Analisando as distribuições é possível observar que as crianças sobreviventes têm menos irmãos.
## 4 - Resumo da análise
* Mulheres têm maior chance de sobrevivência do que homens, provavelmente por causa do protocolo de priorizar mulheres e crianças no caso de um desastre.
* Quanto melhor a classe da passagem maior a probabilidade de sobrevivência, provavelmente pelo fato de melhores classes possuirem pessoas mais ricas.
* O efeito da idade não está muito claro, porém parece que indivíduos mais novos têm maior chance de sobrevivência, principalmente as crianças.
* A presença de filhos ou de um cônjugue a bordo parece diminuir a probabilidade de sobrevivência, porém a amostra é muito pequena para afirmar esse efeito com certeza.
* Para as crianças, a presença de irmãos parece diminuir a probabilidade de sobrevivência, porém a amostra também é muito pequena para uma conclusão definitiva.
|
github_jupyter
|
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('train.csv')
df.head()
df = df[['PassengerId', 'Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch']].set_index('PassengerId')
df.head()
df.count()
df['SibSp_cat'] = df['SibSp'].apply(lambda x: str(x) if x <= 1 else '>1').astype('category')
def parch_cat(dt):
if dt == 1 or dt == 2:
return '1-2'
elif dt > 2:
return '>2'
return str(dt)
df['Parch_cat'] = df['Parch'].apply(parch_cat).astype('category')
def preencher(dt):
dt['Age'] = dt['Age'].fillna(dt['Age'].mean())
return dt
df2 = df.groupby(['Survived', 'Pclass', 'Sex', 'SibSp_cat', 'Parch_cat'], group_keys=False).apply(preencher)
df2.count()
df2[df2.Age.isnull()]
df2.loc[302, 'Age'] = int(df2['Age'][(df2.Sex == 'male') & (df2.Survived==1) & (df2.Age <= 14) & (df2.Pclass==3)].mean())
fig = plt.figure(figsize=(15,4))
axe = fig.subplots(1,4)
_ = df2.groupby('Sex',group_keys=False).size().reset_index()
sns.barplot(x='Sex', y=0, ax=axe[0],data=_)
axe[0].set_ylabel('')
_ = df2.groupby('Pclass',group_keys=False).size().reset_index()
sns.barplot(x='Pclass', y=0, ax=axe[1],data=_)
axe[1].set_ylabel('')
_ = df2.groupby('Pclass',group_keys=False).size().reset_index()
sns.barplot(x='Pclass', y=0, ax=axe[1],data=_)
axe[1].set_ylabel('')
_ = df2.groupby('Survived',group_keys=False).size().reset_index()
sns.barplot(x='Survived', y=0, ax=axe[2],data=_)
axe[2].set_ylabel('')
axe[2].set_xticklabels(['Não', 'Sim'])
sns.kdeplot(x='Age', ax=axe[3],data=df2, fill=True)
axe[3].set_ylabel('')
df2['Age_cat'] = pd.qcut(df2['Age'], 4, precision=0)
df3 = df2.groupby(['Pclass', 'Age_cat', 'Sex'], as_index=False)['Survived'].mean()
df3['Survived'] = df3['Survived']*100
df3.head()
sns.catplot(x='Pclass', y='Survived', hue='Sex', col='Age_cat', kind='point',data=df3)
sns.catplot(x='Age_cat', y='Survived', hue='Sex', col='Pclass',kind='point', data=df3)
df_t = df2.copy()
df_t['Survived'] = df_t['Survived'].apply(lambda x: ['Não', 'Sim'][x])
sns.catplot(x='Survived', y='Age', col='Pclass', row='Sex',kind='box', data=df_t)
df2.groupby(['Pclass', 'Sex', 'Survived']).size()
df_t = df2[np.logical_not((df['Sex'] == 'female') & np.isin(df2['Pclass'], [1,2]))].copy()
df_t['Survived'] = df_t['Survived'].apply(lambda x: ['Não', 'Sim'][x])
sns.displot(x='Age', hue='Survived', col='Pclass',row='Sex',kind='kde',data=df_t, common_norm=False, fill=True)
df_ad = df2[df2['Age'] > 21].copy()
df_ad['Child'] = df_ad['Parch'] > 0
df_ad['Partner'] = df_ad['SibSp'] > 0
df_ad3 = df_ad[df_ad['Pclass'] == 3].copy()
df_ad3.groupby(['Sex', 'Child', 'Partner'])['Survived'].agg([('Sobreviveram',lambda x: np.round(100*np.mean(x),2)),
('Tamanho',np.size)])
df_chil = df2[df2['Age'] < 18].copy()
df_chil['Parents'] = df_chil['Parch'] > 0
df_chil['Siblings'] = df_chil['SibSp'] > 0
df_chil3 = df_chil[df_chil['Pclass'] == 3].copy()
df_chil3.groupby(['Sex', 'Parents', 'Siblings'])['Survived'].agg([('Sobreviveram',lambda x: np.round(100*np.mean(x),2)),
('Tamanho',np.size)])
df_t = df_chil3[df_chil3['Parents'] & df_chil3['Siblings']]
sns.displot(x='SibSp', hue='Survived',col='Sex',kind='kde',common_norm=False, fill=True, data=df_t)
| 0.291687 | 0.939415 |
```
import sys
sys.path.append("..")
import ScheduleFlow
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# create workload for large jobs
def create_jobs(num_procs, num_jobs, execution, submission):
job_list = []
for i in range(num_jobs):
execution_time = np.random.randint(int(execution/2), execution*2)
request_time = int(execution_time * (100+np.random.randint(-5, 5)) / 100)
processing_units = np.random.randint(1, num_procs + 1)
submission_time = submission
job_list.append(ScheduleFlow.Application(
processing_units,
submission_time,
execution_time,
[request_time]))
return job_list
job_list = create_jobs(10, 3, 100, 0)
job_list += create_jobs(10, 2, 10, 2)
job_list
procs = 10
# run the batch scheduler simulations with different values for the batch size
df = pd.DataFrame(columns=["Batch Size", "Utilization", "Response_time", "Scheduler"])
i = 0
simulator = ScheduleFlow.Simulator(check_correctness=True)
for loop in range(10):
job_list = create_jobs(procs, 100, 100, 0)
for batch_size in range(0,101,10):
batch_size = max(1,batch_size)
sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(procs),
batch_size=batch_size)
results = simulator.run_scenario(
sch,
job_list,
metrics=["system utilization", "job response time"])
df.loc[i] = [batch_size, results['system utilization'],
results['job response time']/3600, "Batch"]
i += 1
df.head(10)
# run the online scheduler simulation
sch = ScheduleFlow.OnlineScheduler(ScheduleFlow.System(procs))
online = simulator.run_scenario(sch, job_list,
metrics=["system utilization", "job response time"])
print(online)
i = len(df)
for batch_size in range(0,101,10):
df.loc[i] = [batch_size, online['system utilization'],
online['job response time']/3600, "Online"]
i += 1
df.tail()
#plot utilization
fig = plt.figure(figsize=(15,10), dpi=80)
sns.set(rc={'axes.facecolor':'#ffffff'}, font_scale=2.1)
ax = sns.lineplot(y="Utilization", x="Batch Size", markers=True,
dashes=False, linewidth=3, hue="Scheduler",
data=df)
plt.xlabel("Batch size")
plt.ylabel("Utilization")
#plot response time
fig = plt.figure(figsize=(15,10), dpi=80)
sns.set(rc={'axes.facecolor':'#ffffff'}, font_scale=2.1)
ax = sns.lineplot(y="Response_time", x="Batch Size", markers=True,
dashes=False, linewidth=3, hue="Scheduler",
data=df)
plt.xlabel("Batch size")
plt.ylabel("Average job response time (hours)")
```
|
github_jupyter
|
import sys
sys.path.append("..")
import ScheduleFlow
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# create workload for large jobs
def create_jobs(num_procs, num_jobs, execution, submission):
job_list = []
for i in range(num_jobs):
execution_time = np.random.randint(int(execution/2), execution*2)
request_time = int(execution_time * (100+np.random.randint(-5, 5)) / 100)
processing_units = np.random.randint(1, num_procs + 1)
submission_time = submission
job_list.append(ScheduleFlow.Application(
processing_units,
submission_time,
execution_time,
[request_time]))
return job_list
job_list = create_jobs(10, 3, 100, 0)
job_list += create_jobs(10, 2, 10, 2)
job_list
procs = 10
# run the batch scheduler simulations with different values for the batch size
df = pd.DataFrame(columns=["Batch Size", "Utilization", "Response_time", "Scheduler"])
i = 0
simulator = ScheduleFlow.Simulator(check_correctness=True)
for loop in range(10):
job_list = create_jobs(procs, 100, 100, 0)
for batch_size in range(0,101,10):
batch_size = max(1,batch_size)
sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(procs),
batch_size=batch_size)
results = simulator.run_scenario(
sch,
job_list,
metrics=["system utilization", "job response time"])
df.loc[i] = [batch_size, results['system utilization'],
results['job response time']/3600, "Batch"]
i += 1
df.head(10)
# run the online scheduler simulation
sch = ScheduleFlow.OnlineScheduler(ScheduleFlow.System(procs))
online = simulator.run_scenario(sch, job_list,
metrics=["system utilization", "job response time"])
print(online)
i = len(df)
for batch_size in range(0,101,10):
df.loc[i] = [batch_size, online['system utilization'],
online['job response time']/3600, "Online"]
i += 1
df.tail()
#plot utilization
fig = plt.figure(figsize=(15,10), dpi=80)
sns.set(rc={'axes.facecolor':'#ffffff'}, font_scale=2.1)
ax = sns.lineplot(y="Utilization", x="Batch Size", markers=True,
dashes=False, linewidth=3, hue="Scheduler",
data=df)
plt.xlabel("Batch size")
plt.ylabel("Utilization")
#plot response time
fig = plt.figure(figsize=(15,10), dpi=80)
sns.set(rc={'axes.facecolor':'#ffffff'}, font_scale=2.1)
ax = sns.lineplot(y="Response_time", x="Batch Size", markers=True,
dashes=False, linewidth=3, hue="Scheduler",
data=df)
plt.xlabel("Batch size")
plt.ylabel("Average job response time (hours)")
| 0.377541 | 0.412412 |
# About
Note about the dataset.
* This dataset contains the information about crypotocurrency.
* There are four cryptocurrencies given to us.
* Bitcoin cash, Bitcoin, Ethcoin and Litecoin.
* We have data available for each cryptocurrec for a few minutes.
* We need to forecast the next value of the cryptocurrency after certain time.
Note about the columns in the dataset:-
* The columns contain time given by the unix system.
* High refers to highest value of currency for the time interval.
* Low refers to lowest value of the currency for the time interval.
* Open refers to the opening value at the start of the time interval.
* Close refers the the closing value at the end of time interval.
* Volume refers to the amount of bitcoin traded.
```
pip install tensorboardcolab
```
# Imports
```
import pandas as pd
from sklearn import preprocessing
from collections import deque
import random
import time
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import tensorflow as tf
from keras import layers
from keras import optimizers
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from tensorboardcolab import TensorBoardColab, TensorBoardColabCallback
tbc=TensorBoardColab()
```
# Pre-processing
```
df = pd.read_csv('BCH-USD.csv', names = ['time', 'low', 'high', 'open', 'close', 'volume'], verbose = True)
df.tail()
```
* Need to join all the datasets using common column time.
```
main_df = pd.DataFrame()
ratios = ["BTC-USD", "LTC-USD", "BCH-USD", "ETH-USD"]
for ratio in ratios:
# print(ratio)
dataset = ratio + '.csv'
df = pd.read_csv(dataset, names = ['time', 'low', 'high', 'open', 'close', 'volume'])
df.rename(columns = {"close": ratio + "_close", "volume" : ratio + "_volume"}, inplace = True)
df.set_index("time", inplace = True)
df = df[[ratio + "_close", ratio + "_volume"]]
print(df.head())
if(len(main_df) == 0):
main_df = df
else:
main_df = main_df.join(df)
for column in main_df.columns:
print(column)
main_df.fillna(method = "ffill", inplace = True) # ffill fills it using the previously known values
main_df.dropna(inplace = True) # If we cannot fill the dataframe still then we need to discared the values
print(main_df.head())
main_df.isnull().values.any() # This checks if there are any missing values in the main dataframe
```
# Feature Engineering
* We need to create a target output.
* Here the target output is the price the cryptocurrency.
* We need these hyper-parameters here:
* SEQ_LEN = Length of the sequence required of the RNN.
* FUTURE_PERIOD_PREDICT = how fare we are tring to predict
* RATIO-TO-PRED = Which cryptocurrency we are predicting.
* We can create a new column based on profit or loss.
* If the selling value of the item is greater in future than at present then we have profit.
* We can assign this to be 1.
```
SEQ_LEN = 60
FUTURE_PERIOD_PREDICT = 3 # 3 min in future
RATIO_TO_PRED = "LTC-USD"
def classify(current, future):
if(future > current):
return(1)
else:
return(0)
# Creating a new future column. This is created by shifting the close column by 3 minutes upwards.
main_df['future'] = main_df[RATIO_TO_PRED + "_close"].shift(-FUTURE_PERIOD_PREDICT)
main_df['target'] = list(map(classify, main_df[RATIO_TO_PRED + "_close"], main_df['future']))
```
# Normalizing and Creating Validation Data
```
main_df.tail()
```
* We need to pick the last 5 percent of the time data sample as validation data.
```
times = sorted(main_df.index.values)
last5_pct = sorted(main_df.index.values) [-int(0.05 * len(times))]
main_validation_df = main_df[(main_df.index >= last5_pct)]
main_df = main_df[(main_df.index < last5_pct)]
print(main_validation_df.head())
print(main_df.head())
def preprocess_df(df):
# We do not need the future column, it was used only to create targets
SEQ_LEN = 60
df = df.drop('future', 1)
for col in df.columns:
if col != "target": # We do not need to normalize the targets
df[col] = df[col].pct_change()
df.dropna(inplace = True)
df[col] = preprocessing.scale(df[col].values)
df.dropna(inplace = True)
# print(df.head())
sequential_data = []
prev_days = deque(maxlen = SEQ_LEN)
for i in df.values:
prev_days.append([n for n in i[:-1]])
if(len(prev_days) == SEQ_LEN):
sequential_data.append([np.array(prev_days), i[-1]])
random.shuffle(sequential_data)
# print(sequential_data.shape)
# print(sequential_data)
# We need to balance the dataset
buys = []
sells = []
for seq, target in sequential_data:
if(target == 0):
sells.append([seq, target])
elif(target == 1):
buys.append([seq, target])
random.shuffle(buys)
random.shuffle(sells)
# Find the lower value of buys and sells
lower = min(len(buys), len(sells))
# We need to restrict the number of buys
buys = buys[:lower]
sells = sells[:lower]
sequential_data = buys + sells
random.shuffle(sequential_data)
X = []
Y = []
for seq, target in sequential_data:
X.append(seq)
Y.append(target)
return(np.array(X), np.array(Y))
```
# Creating Train and Validation Sets
```
X_train, Y_train = preprocess_df(main_df)
X_val, Y_val = preprocess_df(main_validation_df)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
```
# Building Recurrent Nueral Network
```
epochs = 50
batch_size = 256
RATIO_TO_PRED = "LTC-USD"
Name = RATIO_TO_PRED + " " + str(int(time.time()))
model = Sequential()
model.add(layers.CuDNNLSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization()) #normalizes activation outputs, same reason you want to normalize your input data.
model.add(layers.CuDNNLSTM(64,return_sequences=True))
model.add(layers.Dropout(0.1))
model.add(layers.BatchNormalization())
model.add(layers.CuDNNLSTM(32,return_sequences=False))
model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(2, activation='softmax'))
optim = optimizers.SGD(lr = 0.02, momentum = 0.9)
model.compile(optimizer = optim, loss = "sparse_categorical_crossentropy", metrics = ['accuracy'])
checkpoint = ModelCheckpoint(filepath = "best_model.hdf5", monitor = 'val_acc', save_best_only=True, save_weights_only=True)
history = model.fit(X_train, Y_train, epochs = epochs, batch_size = batch_size, callbacks = [TensorBoardColabCallback(tbc), checkpoint],
validation_data = (X_val, Y_val))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model.summary()
```
|
github_jupyter
|
pip install tensorboardcolab
import pandas as pd
from sklearn import preprocessing
from collections import deque
import random
import time
import numpy as np
import os
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import tensorflow as tf
from keras import layers
from keras import optimizers
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from tensorboardcolab import TensorBoardColab, TensorBoardColabCallback
tbc=TensorBoardColab()
df = pd.read_csv('BCH-USD.csv', names = ['time', 'low', 'high', 'open', 'close', 'volume'], verbose = True)
df.tail()
main_df = pd.DataFrame()
ratios = ["BTC-USD", "LTC-USD", "BCH-USD", "ETH-USD"]
for ratio in ratios:
# print(ratio)
dataset = ratio + '.csv'
df = pd.read_csv(dataset, names = ['time', 'low', 'high', 'open', 'close', 'volume'])
df.rename(columns = {"close": ratio + "_close", "volume" : ratio + "_volume"}, inplace = True)
df.set_index("time", inplace = True)
df = df[[ratio + "_close", ratio + "_volume"]]
print(df.head())
if(len(main_df) == 0):
main_df = df
else:
main_df = main_df.join(df)
for column in main_df.columns:
print(column)
main_df.fillna(method = "ffill", inplace = True) # ffill fills it using the previously known values
main_df.dropna(inplace = True) # If we cannot fill the dataframe still then we need to discared the values
print(main_df.head())
main_df.isnull().values.any() # This checks if there are any missing values in the main dataframe
SEQ_LEN = 60
FUTURE_PERIOD_PREDICT = 3 # 3 min in future
RATIO_TO_PRED = "LTC-USD"
def classify(current, future):
if(future > current):
return(1)
else:
return(0)
# Creating a new future column. This is created by shifting the close column by 3 minutes upwards.
main_df['future'] = main_df[RATIO_TO_PRED + "_close"].shift(-FUTURE_PERIOD_PREDICT)
main_df['target'] = list(map(classify, main_df[RATIO_TO_PRED + "_close"], main_df['future']))
main_df.tail()
times = sorted(main_df.index.values)
last5_pct = sorted(main_df.index.values) [-int(0.05 * len(times))]
main_validation_df = main_df[(main_df.index >= last5_pct)]
main_df = main_df[(main_df.index < last5_pct)]
print(main_validation_df.head())
print(main_df.head())
def preprocess_df(df):
# We do not need the future column, it was used only to create targets
SEQ_LEN = 60
df = df.drop('future', 1)
for col in df.columns:
if col != "target": # We do not need to normalize the targets
df[col] = df[col].pct_change()
df.dropna(inplace = True)
df[col] = preprocessing.scale(df[col].values)
df.dropna(inplace = True)
# print(df.head())
sequential_data = []
prev_days = deque(maxlen = SEQ_LEN)
for i in df.values:
prev_days.append([n for n in i[:-1]])
if(len(prev_days) == SEQ_LEN):
sequential_data.append([np.array(prev_days), i[-1]])
random.shuffle(sequential_data)
# print(sequential_data.shape)
# print(sequential_data)
# We need to balance the dataset
buys = []
sells = []
for seq, target in sequential_data:
if(target == 0):
sells.append([seq, target])
elif(target == 1):
buys.append([seq, target])
random.shuffle(buys)
random.shuffle(sells)
# Find the lower value of buys and sells
lower = min(len(buys), len(sells))
# We need to restrict the number of buys
buys = buys[:lower]
sells = sells[:lower]
sequential_data = buys + sells
random.shuffle(sequential_data)
X = []
Y = []
for seq, target in sequential_data:
X.append(seq)
Y.append(target)
return(np.array(X), np.array(Y))
X_train, Y_train = preprocess_df(main_df)
X_val, Y_val = preprocess_df(main_validation_df)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
epochs = 50
batch_size = 256
RATIO_TO_PRED = "LTC-USD"
Name = RATIO_TO_PRED + " " + str(int(time.time()))
model = Sequential()
model.add(layers.CuDNNLSTM(128, input_shape=(X_train.shape[1:]), return_sequences=True))
model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization()) #normalizes activation outputs, same reason you want to normalize your input data.
model.add(layers.CuDNNLSTM(64,return_sequences=True))
model.add(layers.Dropout(0.1))
model.add(layers.BatchNormalization())
model.add(layers.CuDNNLSTM(32,return_sequences=False))
model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(2, activation='softmax'))
optim = optimizers.SGD(lr = 0.02, momentum = 0.9)
model.compile(optimizer = optim, loss = "sparse_categorical_crossentropy", metrics = ['accuracy'])
checkpoint = ModelCheckpoint(filepath = "best_model.hdf5", monitor = 'val_acc', save_best_only=True, save_weights_only=True)
history = model.fit(X_train, Y_train, epochs = epochs, batch_size = batch_size, callbacks = [TensorBoardColabCallback(tbc), checkpoint],
validation_data = (X_val, Y_val))
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'g', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'g', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model.summary()
| 0.574275 | 0.949059 |
In this notebook we will demonstrate different text classification models trained using the IMDB reviews dataset.
```
#Make the necessary imports
import os
import sys
import numpy as np
import tarfile
import wget
import warnings
warnings.filterwarnings("ignore")
from zipfile import ZipFile
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding, LSTM
from keras.models import Model, Sequential
from keras.initializers import Constant
```
Here we set all the paths of all the external datasets and models such as [glove](https://nlp.stanford.edu/projects/glove/) and [IMDB reviews dataset](http://ai.stanford.edu/~amaas/data/sentiment/).
```
try:
from google.colab import files
!wget -P DATAPATH http://nlp.stanford.edu/data/glove.6B.zip
!unzip DATAPATH/glove.6B.zip -C DATAPATH
!wget -P DATAPATH http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xvf DATAPATH/aclImdb_v1.tar.gz -C DATAPATH
BASE_DIR = 'DATAPATH'
except ModuleNotFoundError:
if not os.path.exists(os.getcwd()+'\\Data\\glove.6B'):
os.mkdir(os.getcwd()+'\\Data\\glove.6B')
url='http://nlp.stanford.edu/data/glove.6B.zip'
path=os.getcwd()+'\Data'
wget.download(url,path)
temp=path+'\glove.6B.zip'
file = ZipFile(temp)
file.extractall(path+'\glove.6B')
file.close()
if not os.path.exists(os.getcwd()+'\\Data\\aclImdb'):
url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
path=os.getcwd()+'\Data'
wget.download(url,path)
temp=path+'\aclImdb_v1.tar.gz'
tar = tarfile.open(temp, "r:gz")
tar.extractall(path)
tar.close()
BASE_DIR = 'Data'
GLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')
TRAIN_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb\\train')
TEST_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb\\test')
#Within these, I only have a pos/ and a neg/ folder containing text files
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
#started off from: https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py
#and from: https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py
```
### Loading and Preprocessing
```
#Function to load the data from the dataset into the notebook. Will be called twice - for train and test.
def get_data(data_dir):
texts = [] # list of text samples
labels_index = {'pos':1, 'neg':0} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
if name=='pos' or name=='neg':
label_id = labels_index[name]
for fname in sorted(os.listdir(path)):
fpath = os.path.join(path, fname)
text = open(fpath,encoding='utf8').read()
texts.append(text)
labels.append(label_id)
return texts, labels
train_texts, train_labels = get_data(TRAIN_DATA_DIR)
test_texts, test_labels = get_data(TEST_DATA_DIR)
labels_index = {'pos':1, 'neg':0}
#Just to see how the data looks like.
#print(train_texts[0])
#print(train_labels[0])
#print(test_texts[24999])
#print(test_labels[24999])
#Vectorize these text samples into a 2D integer tensor using Keras Tokenizer
#Tokenizer is fit on training data only, and that is used to tokenize both train and test data.
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(train_texts)
train_sequences = tokenizer.texts_to_sequences(train_texts) #Converting text to a vector of word indexes
test_sequences = tokenizer.texts_to_sequences(test_texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
#Converting this to sequences to be fed into neural network. Max seq. len is 1000 as set earlier
#initial padding of 0s, until vector is of size MAX_SEQUENCE_LENGTH
trainvalid_data = pad_sequences(train_sequences, maxlen=MAX_SEQUENCE_LENGTH)
test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
trainvalid_labels = to_categorical(np.asarray(train_labels))
test_labels = to_categorical(np.asarray(test_labels))
# split the training data into a training set and a validation set
indices = np.arange(trainvalid_data.shape[0])
np.random.shuffle(indices)
trainvalid_data = trainvalid_data[indices]
trainvalid_labels = trainvalid_labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * trainvalid_data.shape[0])
x_train = trainvalid_data[:-num_validation_samples]
y_train = trainvalid_labels[:-num_validation_samples]
x_val = trainvalid_data[-num_validation_samples:]
y_val = trainvalid_labels[-num_validation_samples:]
#This is the data we will use for CNN and RNN training
print('Splitting the train data into train and valid is done')
print('Preparing embedding matrix.')
# first, build index mapping words in the embeddings set
# to their embedding vector
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'),encoding='utf8') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors in Glove embeddings.' % len(embeddings_index))
#print(embeddings_index["google"])
# prepare embedding matrix - rows are the words from word_index, columns are the embeddings of that word from glove.
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load these pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print("Preparing of embedding matrix is done")
```
### 1D CNN Model with pre-trained embedding
```
print('Define a 1D CNN model.')
cnnmodel = Sequential()
cnnmodel.add(embedding_layer)
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(GlobalMaxPooling1D())
cnnmodel.add(Dense(128, activation='relu'))
cnnmodel.add(Dense(len(labels_index), activation='softmax'))
cnnmodel.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
#Train the model. Tune to validation set.
cnnmodel.fit(x_train, y_train,
batch_size=128,
epochs=1, validation_data=(x_val, y_val))
#Evaluate on test set:
score, acc = cnnmodel.evaluate(test_data, test_labels)
print('Test accuracy with CNN:', acc)
```
### 1D CNN model with training your own embedding
```
print("Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings")
cnnmodel = Sequential()
cnnmodel.add(Embedding(MAX_NUM_WORDS, 128))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(GlobalMaxPooling1D())
cnnmodel.add(Dense(128, activation='relu'))
cnnmodel.add(Dense(len(labels_index), activation='softmax'))
cnnmodel.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
#Train the model. Tune to validation set.
cnnmodel.fit(x_train, y_train,
batch_size=128,
epochs=1, validation_data=(x_val, y_val))
#Evaluate on test set:
score, acc = cnnmodel.evaluate(test_data, test_labels)
print('Test accuracy with CNN:', acc)
```
### LSTM Model with training your own embedding
```
print("Defining and training an LSTM model, training embedding layer on the fly")
#model
rnnmodel = Sequential()
rnnmodel.add(Embedding(MAX_NUM_WORDS, 128))
rnnmodel.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
rnnmodel.add(Dense(2, activation='sigmoid'))
rnnmodel.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Training the RNN')
rnnmodel.fit(x_train, y_train,
batch_size=32,
epochs=1,
validation_data=(x_val, y_val))
score, acc = rnnmodel.evaluate(test_data, test_labels,
batch_size=32)
print('Test accuracy with RNN:', acc)
```
### LSTM Model using pre-trained Embedding Layer
```
print("Defining and training an LSTM model, using pre-trained embedding layer")
rnnmodel2 = Sequential()
rnnmodel2.add(embedding_layer)
rnnmodel2.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
rnnmodel2.add(Dense(2, activation='sigmoid'))
rnnmodel2.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Training the RNN')
rnnmodel2.fit(x_train, y_train,
batch_size=32,
epochs=1,
validation_data=(x_val, y_val))
score, acc = rnnmodel2.evaluate(test_data, test_labels,
batch_size=32)
print('Test accuracy with RNN:', acc)
```
|
github_jupyter
|
#Make the necessary imports
import os
import sys
import numpy as np
import tarfile
import wget
import warnings
warnings.filterwarnings("ignore")
from zipfile import ZipFile
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding, LSTM
from keras.models import Model, Sequential
from keras.initializers import Constant
try:
from google.colab import files
!wget -P DATAPATH http://nlp.stanford.edu/data/glove.6B.zip
!unzip DATAPATH/glove.6B.zip -C DATAPATH
!wget -P DATAPATH http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -xvf DATAPATH/aclImdb_v1.tar.gz -C DATAPATH
BASE_DIR = 'DATAPATH'
except ModuleNotFoundError:
if not os.path.exists(os.getcwd()+'\\Data\\glove.6B'):
os.mkdir(os.getcwd()+'\\Data\\glove.6B')
url='http://nlp.stanford.edu/data/glove.6B.zip'
path=os.getcwd()+'\Data'
wget.download(url,path)
temp=path+'\glove.6B.zip'
file = ZipFile(temp)
file.extractall(path+'\glove.6B')
file.close()
if not os.path.exists(os.getcwd()+'\\Data\\aclImdb'):
url='http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
path=os.getcwd()+'\Data'
wget.download(url,path)
temp=path+'\aclImdb_v1.tar.gz'
tar = tarfile.open(temp, "r:gz")
tar.extractall(path)
tar.close()
BASE_DIR = 'Data'
GLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')
TRAIN_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb\\train')
TEST_DATA_DIR = os.path.join(BASE_DIR, 'aclImdb\\test')
#Within these, I only have a pos/ and a neg/ folder containing text files
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
#started off from: https://github.com/keras-team/keras/blob/master/examples/pretrained_word_embeddings.py
#and from: https://github.com/keras-team/keras/blob/master/examples/imdb_lstm.py
#Function to load the data from the dataset into the notebook. Will be called twice - for train and test.
def get_data(data_dir):
texts = [] # list of text samples
labels_index = {'pos':1, 'neg':0} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(data_dir)):
path = os.path.join(data_dir, name)
if os.path.isdir(path):
if name=='pos' or name=='neg':
label_id = labels_index[name]
for fname in sorted(os.listdir(path)):
fpath = os.path.join(path, fname)
text = open(fpath,encoding='utf8').read()
texts.append(text)
labels.append(label_id)
return texts, labels
train_texts, train_labels = get_data(TRAIN_DATA_DIR)
test_texts, test_labels = get_data(TEST_DATA_DIR)
labels_index = {'pos':1, 'neg':0}
#Just to see how the data looks like.
#print(train_texts[0])
#print(train_labels[0])
#print(test_texts[24999])
#print(test_labels[24999])
#Vectorize these text samples into a 2D integer tensor using Keras Tokenizer
#Tokenizer is fit on training data only, and that is used to tokenize both train and test data.
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(train_texts)
train_sequences = tokenizer.texts_to_sequences(train_texts) #Converting text to a vector of word indexes
test_sequences = tokenizer.texts_to_sequences(test_texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
#Converting this to sequences to be fed into neural network. Max seq. len is 1000 as set earlier
#initial padding of 0s, until vector is of size MAX_SEQUENCE_LENGTH
trainvalid_data = pad_sequences(train_sequences, maxlen=MAX_SEQUENCE_LENGTH)
test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH)
trainvalid_labels = to_categorical(np.asarray(train_labels))
test_labels = to_categorical(np.asarray(test_labels))
# split the training data into a training set and a validation set
indices = np.arange(trainvalid_data.shape[0])
np.random.shuffle(indices)
trainvalid_data = trainvalid_data[indices]
trainvalid_labels = trainvalid_labels[indices]
num_validation_samples = int(VALIDATION_SPLIT * trainvalid_data.shape[0])
x_train = trainvalid_data[:-num_validation_samples]
y_train = trainvalid_labels[:-num_validation_samples]
x_val = trainvalid_data[-num_validation_samples:]
y_val = trainvalid_labels[-num_validation_samples:]
#This is the data we will use for CNN and RNN training
print('Splitting the train data into train and valid is done')
print('Preparing embedding matrix.')
# first, build index mapping words in the embeddings set
# to their embedding vector
embeddings_index = {}
with open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'),encoding='utf8') as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print('Found %s word vectors in Glove embeddings.' % len(embeddings_index))
#print(embeddings_index["google"])
# prepare embedding matrix - rows are the words from word_index, columns are the embeddings of that word from glove.
num_words = min(MAX_NUM_WORDS, len(word_index)) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NUM_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# load these pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
embeddings_initializer=Constant(embedding_matrix),
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print("Preparing of embedding matrix is done")
print('Define a 1D CNN model.')
cnnmodel = Sequential()
cnnmodel.add(embedding_layer)
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(GlobalMaxPooling1D())
cnnmodel.add(Dense(128, activation='relu'))
cnnmodel.add(Dense(len(labels_index), activation='softmax'))
cnnmodel.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
#Train the model. Tune to validation set.
cnnmodel.fit(x_train, y_train,
batch_size=128,
epochs=1, validation_data=(x_val, y_val))
#Evaluate on test set:
score, acc = cnnmodel.evaluate(test_data, test_labels)
print('Test accuracy with CNN:', acc)
print("Defining and training a CNN model, training embedding layer on the fly instead of using pre-trained embeddings")
cnnmodel = Sequential()
cnnmodel.add(Embedding(MAX_NUM_WORDS, 128))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(MaxPooling1D(5))
cnnmodel.add(Conv1D(128, 5, activation='relu'))
cnnmodel.add(GlobalMaxPooling1D())
cnnmodel.add(Dense(128, activation='relu'))
cnnmodel.add(Dense(len(labels_index), activation='softmax'))
cnnmodel.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
#Train the model. Tune to validation set.
cnnmodel.fit(x_train, y_train,
batch_size=128,
epochs=1, validation_data=(x_val, y_val))
#Evaluate on test set:
score, acc = cnnmodel.evaluate(test_data, test_labels)
print('Test accuracy with CNN:', acc)
print("Defining and training an LSTM model, training embedding layer on the fly")
#model
rnnmodel = Sequential()
rnnmodel.add(Embedding(MAX_NUM_WORDS, 128))
rnnmodel.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
rnnmodel.add(Dense(2, activation='sigmoid'))
rnnmodel.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Training the RNN')
rnnmodel.fit(x_train, y_train,
batch_size=32,
epochs=1,
validation_data=(x_val, y_val))
score, acc = rnnmodel.evaluate(test_data, test_labels,
batch_size=32)
print('Test accuracy with RNN:', acc)
print("Defining and training an LSTM model, using pre-trained embedding layer")
rnnmodel2 = Sequential()
rnnmodel2.add(embedding_layer)
rnnmodel2.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
rnnmodel2.add(Dense(2, activation='sigmoid'))
rnnmodel2.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Training the RNN')
rnnmodel2.fit(x_train, y_train,
batch_size=32,
epochs=1,
validation_data=(x_val, y_val))
score, acc = rnnmodel2.evaluate(test_data, test_labels,
batch_size=32)
print('Test accuracy with RNN:', acc)
| 0.40439 | 0.764298 |
```
import pandas as pd
import numpy as np
raw_data = pd.read_csv('data/sms-spam-data.csv', encoding = "ISO-8859-1")
data = raw_data[["v1", "v2"]].rename(columns={"v1": "label", "v2": "text"})
data.head()
data = pd.get_dummies(data, columns=["label"], drop_first=True).rename(columns={"label_spam": "label"})
data.value_counts("label")
def count_words(string):
return len(string.split(" "))
def get_sample_count_ratio(df):
n = df.shape[0]
word_counts = []
for row in df["text"]:
word_counts.append(count_words(row))
avg_word_count = sum(word_counts)/len(word_counts)
return n/avg_word_count
get_sample_count_ratio(data)
```
According to https://developers.google.com/machine-learning/guides/text-classification/step-2-5 this indicates that we should go with an n-gram preprocessing step couple with a simple MLP model
We proceed to experimenting with an n-gram based model. The pre-processing steps are as follows:
- Tokenize text-sample into 1 and 2 word n-grams. This mean extracing each individual word as well as each pair of consecutive words. The combination of both steps
- Vectorize the samples using a TF-IDF encoding scheme. Each piece of text is converted into a vector capturing which n-grams are present in it.
- Drop the least common n-gram tokens by discarding those that occur fewer than two times and and using statistical tests to determine feature importance
```
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
# n-gram sizes to compute
NGRAM_RANGE = (1, 2)
# Limit on the number of features
TOP_K = 10000
# Whether text should be split into word or character n-grams
TOKEN_MODE = 'word'
# Minimum document/corpus frequency below which a token will be discarded.
MIN_DOCUMENT_FREQUENCY = 2
from typing import List
def ngram_vectorize(train_text: List[str], train_labels: np.ndarray, test_text: List[str]):
# Arguments for vectorizor
kwargs = {
'ngram_range': NGRAM_RANGE, # Use 1-grams + 2-grams.
'dtype': 'int32',
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': TOKEN_MODE, # Split text into word tokens.
'min_df': MIN_DOCUMENT_FREQUENCY,
}
vectorizer = TfidfVectorizer(**kwargs)
# Vectorize training text
x_train = vectorizer.fit_transform(train_text)
# Vectorize test text
x_test = vectorizer.transform(test_text)
# Select top k features
selector = SelectKBest(f_classif, k=TOP_K)
selector.fit(x_train, train_labels)
x_train = selector.transform(x_train).astype('float32')
x_test = selector.transform(x_test).astype('float32')
return x_train, x_test
```
Now we need to prepare the data. First we create a train and test split and then wrangle the results into the appropriate format for our `ngram_vectorize` function
```
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data['text'], data['label'], test_size=0.2, random_state=99)
x_train = list(x_train)
y_train = y_train.to_numpy()
x_test = list(x_test)
y_test = y_test.to_numpy()
x_train, x_test = ngram_vectorize(x_train, y_train, x_test)
import tensorflow as tf
from typing import Tuple
def build_model(layers: int, units: int, dropout_rate: float, input_shape: Tuple):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units, input_shape=input_shape))
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
for _ in range(layers-1):
model.add(tf.keras.layers.Dense(units, activation='relu'))
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
return model
def train_model(train_data,
train_labels,
test_data,
test_labels,
learning_rate=1e-3,
epochs=100,
batch_size=32,
layers=2,
units=64,
dropout_rate=0.2):
# Create model
model = build_model(layers=layers,units=units, dropout_rate=dropout_rate, input_shape=x_train.shape[1:])
# Compile model
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
loss = 'binary_crossentropy'
model.compile(optimizer=optimizer, loss=loss, metrics=['acc', tf.keras.metrics.FalseNegatives(name="fn")])
# Create early stopping callback
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)]
# Train model
history = model.fit(train_data,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(test_data, test_labels),
verbose=2,
batch_size=batch_size)
# Print results
history = history.history
print('Validation accuracy: {acc}, loss: {loss}, false negatives: {fn}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1], fn=history['fn'][-1]))
train_model(x_train, y_train, x_test, y_test)
```
This model seems to be doing remarkably well, only misclassifying 2 spam messages
Saving the cleaned data to a csv for easier processing with TFX components
```
data.to_csv('data/clean-spam-data.csv', index=False)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
raw_data = pd.read_csv('data/sms-spam-data.csv', encoding = "ISO-8859-1")
data = raw_data[["v1", "v2"]].rename(columns={"v1": "label", "v2": "text"})
data.head()
data = pd.get_dummies(data, columns=["label"], drop_first=True).rename(columns={"label_spam": "label"})
data.value_counts("label")
def count_words(string):
return len(string.split(" "))
def get_sample_count_ratio(df):
n = df.shape[0]
word_counts = []
for row in df["text"]:
word_counts.append(count_words(row))
avg_word_count = sum(word_counts)/len(word_counts)
return n/avg_word_count
get_sample_count_ratio(data)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
# n-gram sizes to compute
NGRAM_RANGE = (1, 2)
# Limit on the number of features
TOP_K = 10000
# Whether text should be split into word or character n-grams
TOKEN_MODE = 'word'
# Minimum document/corpus frequency below which a token will be discarded.
MIN_DOCUMENT_FREQUENCY = 2
from typing import List
def ngram_vectorize(train_text: List[str], train_labels: np.ndarray, test_text: List[str]):
# Arguments for vectorizor
kwargs = {
'ngram_range': NGRAM_RANGE, # Use 1-grams + 2-grams.
'dtype': 'int32',
'strip_accents': 'unicode',
'decode_error': 'replace',
'analyzer': TOKEN_MODE, # Split text into word tokens.
'min_df': MIN_DOCUMENT_FREQUENCY,
}
vectorizer = TfidfVectorizer(**kwargs)
# Vectorize training text
x_train = vectorizer.fit_transform(train_text)
# Vectorize test text
x_test = vectorizer.transform(test_text)
# Select top k features
selector = SelectKBest(f_classif, k=TOP_K)
selector.fit(x_train, train_labels)
x_train = selector.transform(x_train).astype('float32')
x_test = selector.transform(x_test).astype('float32')
return x_train, x_test
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data['text'], data['label'], test_size=0.2, random_state=99)
x_train = list(x_train)
y_train = y_train.to_numpy()
x_test = list(x_test)
y_test = y_test.to_numpy()
x_train, x_test = ngram_vectorize(x_train, y_train, x_test)
import tensorflow as tf
from typing import Tuple
def build_model(layers: int, units: int, dropout_rate: float, input_shape: Tuple):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units, input_shape=input_shape))
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
for _ in range(layers-1):
model.add(tf.keras.layers.Dense(units, activation='relu'))
model.add(tf.keras.layers.Dropout(rate=dropout_rate))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
return model
def train_model(train_data,
train_labels,
test_data,
test_labels,
learning_rate=1e-3,
epochs=100,
batch_size=32,
layers=2,
units=64,
dropout_rate=0.2):
# Create model
model = build_model(layers=layers,units=units, dropout_rate=dropout_rate, input_shape=x_train.shape[1:])
# Compile model
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
loss = 'binary_crossentropy'
model.compile(optimizer=optimizer, loss=loss, metrics=['acc', tf.keras.metrics.FalseNegatives(name="fn")])
# Create early stopping callback
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)]
# Train model
history = model.fit(train_data,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(test_data, test_labels),
verbose=2,
batch_size=batch_size)
# Print results
history = history.history
print('Validation accuracy: {acc}, loss: {loss}, false negatives: {fn}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1], fn=history['fn'][-1]))
train_model(x_train, y_train, x_test, y_test)
data.to_csv('data/clean-spam-data.csv', index=False)
| 0.706089 | 0.781539 |
# Activity 5.03 - Binary Classification Using a CART Decision Tree (with Answers)
This activity uses the CART Decision Tree model to classify the MNIST dataset (digits 0 and 1 only) into either digits 0 or 1.
## Import the Required Packages
```
import struct
import numpy as np
import pandas as pd
import gzip
import urllib.request
import matplotlib.pyplot as plt
from array import array
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
```
Load the MNIST data into memory
1. Training images
```
with gzip.open('../Datasets/train-images-idx3-ubyte.gz', 'rb') as f:
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
img = np.array(array("B", f.read())).reshape((size, rows, cols))
```
2. Training labels
```
with gzip.open('../Datasets/train-labels-idx1-ubyte.gz', 'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
labels = np.array(array("B", f.read()))
```
3. Test images
```
with gzip.open('../Datasets/t10k-images-idx3-ubyte.gz', 'rb') as f:
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
img_test = np.array(array("B", f.read())).reshape((size, rows, cols))
```
4. Test labels
```
with gzip.open('../Datasets/t10k-labels-idx1-ubyte.gz', 'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
labels_test = np.array(array("B", f.read()))
```
Visualise a sample of the data
```
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(img[i], cmap='gray');
plt.title(f'{labels[i]}');
plt.axis('off')
```
## Construct a CART Decision Tree Classification Model
Construct a CART Decision Tree classifier to determine if the samples are either the digits 0 or 1. To do this we first need to select only those samples (digits 0 and 1).
```
samples_0_1 = np.where((labels == 0) | (labels == 1))[0]
images_0_1 = img[samples_0_1]
labels_0_1 = labels[samples_0_1]
samples_0_1_test = np.where((labels_test == 0) | (labels_test == 1))
images_0_1_test = img_test[samples_0_1_test].reshape((-1, rows * cols))
labels_0_1_test = labels_test[samples_0_1_test]
```
Visualising the selected information with images of one sample of 0 and one sample of 1.
```
sample_0 = np.where((labels == 0))[0][0]
plt.imshow(img[sample_0], cmap='gray');
```
In order to provide the image information to the Logistic model we must first flatten the data out so that each image is 1 x 784 pixels in shape.
```
sample_1 = np.where((labels == 1))[0][0]
plt.imshow(img[sample_1], cmap='gray');
```
In order to provide the image information to the Decision Tree model we must first flatten the data out so that each image is 1 x 784 pixels in shape.
```
images_0_1 = images_0_1.reshape((-1, rows * cols))
images_0_1.shape
```
Let's construct the model, use the sklearn DecisionTreeClassifier API and call the fit function.
```
model = DecisionTreeClassifier(random_state=123)
model = model.fit(X=images_0_1, y=labels_0_1)
model
```
Determine the training set accuracy
```
model.score(X=images_0_1, y=labels_0_1)
```
Determine the label predictions for each of the training samples, using a threshold of 0.5. Values greater than 0.5 classify as 1, less than or equal to classify as 0.
```
y_pred = model.predict(images_0_1) > 0.5
y_pred = y_pred.astype(int)
y_pred
```
Compute the classification accuracy of the predicted training values vs the ground truth
```
np.sum(y_pred == labels_0_1) / len(labels_0_1)
```
Compare the performance against the test set
```
y_pred = model.predict(images_0_1_test) > 0.5
y_pred = y_pred.astype(int)
np.sum(y_pred == labels_0_1_test) / len(labels_0_1_test)
```
|
github_jupyter
|
import struct
import numpy as np
import pandas as pd
import gzip
import urllib.request
import matplotlib.pyplot as plt
from array import array
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
with gzip.open('../Datasets/train-images-idx3-ubyte.gz', 'rb') as f:
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
img = np.array(array("B", f.read())).reshape((size, rows, cols))
with gzip.open('../Datasets/train-labels-idx1-ubyte.gz', 'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
labels = np.array(array("B", f.read()))
with gzip.open('../Datasets/t10k-images-idx3-ubyte.gz', 'rb') as f:
magic, size, rows, cols = struct.unpack(">IIII", f.read(16))
img_test = np.array(array("B", f.read())).reshape((size, rows, cols))
with gzip.open('../Datasets/t10k-labels-idx1-ubyte.gz', 'rb') as f:
magic, size = struct.unpack(">II", f.read(8))
labels_test = np.array(array("B", f.read()))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(img[i], cmap='gray');
plt.title(f'{labels[i]}');
plt.axis('off')
samples_0_1 = np.where((labels == 0) | (labels == 1))[0]
images_0_1 = img[samples_0_1]
labels_0_1 = labels[samples_0_1]
samples_0_1_test = np.where((labels_test == 0) | (labels_test == 1))
images_0_1_test = img_test[samples_0_1_test].reshape((-1, rows * cols))
labels_0_1_test = labels_test[samples_0_1_test]
sample_0 = np.where((labels == 0))[0][0]
plt.imshow(img[sample_0], cmap='gray');
sample_1 = np.where((labels == 1))[0][0]
plt.imshow(img[sample_1], cmap='gray');
images_0_1 = images_0_1.reshape((-1, rows * cols))
images_0_1.shape
model = DecisionTreeClassifier(random_state=123)
model = model.fit(X=images_0_1, y=labels_0_1)
model
model.score(X=images_0_1, y=labels_0_1)
y_pred = model.predict(images_0_1) > 0.5
y_pred = y_pred.astype(int)
y_pred
np.sum(y_pred == labels_0_1) / len(labels_0_1)
y_pred = model.predict(images_0_1_test) > 0.5
y_pred = y_pred.astype(int)
np.sum(y_pred == labels_0_1_test) / len(labels_0_1_test)
| 0.296145 | 0.988885 |
# Advanced filtering
In this tutorial we are going to see how to use the ``F`` object to do advanced filtering of hosts. Let's start by initiating nornir and looking at the inventory:
```
from nornir import InitNornir
from nornir.core.filter import F
nr = InitNornir(config_file="advanced_filtering/config.yaml")
%cat advanced_filtering/inventory/hosts.yaml
%cat advanced_filtering/inventory/groups.yaml
```
As you can see we have built ourselves a collection of animals with different properties. The ``F`` object let's you access the magic methods of each types by just prepeding two underscores and the the name of the magic method. For instance, if you want to check if a list contains a particular element you can just prepend ``__contains``. Let's use this feature to retrieve all the animals that belong to the group ``bird``:
```
birds = nr.filter(F(groups__contains="bird"))
print(birds.inventory.hosts.keys())
```
We can also invert the ``F`` object by prepending ``~``:
```
not_birds = nr.filter(~F(groups__contains="bird"))
print(not_birds.inventory.hosts.keys())
```
We can also combine ``F`` objects and perform AND and OR operations with the symbols ``&`` and ``|`` (pipe) respectively:
```
domestic_or_bird = nr.filter(F(groups__contains="bird") | F(domestic=True))
print(domestic_or_bird.inventory.hosts.keys())
domestic_mammals = nr.filter(F(groups__contains="mammal") & F(domestic=True))
print(domestic_mammals.inventory.hosts.keys())
```
As expected, you can combine all of the symbols:
```
flying_not_carnivore = nr.filter(F(fly=True) & ~F(diet="carnivore"))
print(flying_not_carnivore.inventory.hosts.keys())
```
You can also access nested data the same way you access magic methods, by appending two underscores and the data you want to access. You can keep building on this as much as needed and even access the magic methods of the nested data. For instance, let's get the animals that have a lifespan greater or equal than 15:
```
long_lived = nr.filter(F(additional_data__lifespan__ge=15))
print(long_lived.inventory.hosts.keys())
```
There are two extra facilities to help you working with lists; ``any`` and ``all``. Those facilities let's you send a list of elements and get the objects that has either any of the members or all of them. For instance:
```
marine_and_invertebrates = nr.filter(F(groups__all=["marine", "invertebrate"]))
print(marine_and_invertebrates.inventory.hosts.keys())
bird_or_invertebrates = nr.filter(F(groups__any=["bird", "invertebrate"]))
print(bird_or_invertebrates.inventory.hosts.keys())
```
|
github_jupyter
|
from nornir import InitNornir
from nornir.core.filter import F
nr = InitNornir(config_file="advanced_filtering/config.yaml")
%cat advanced_filtering/inventory/hosts.yaml
%cat advanced_filtering/inventory/groups.yaml
birds = nr.filter(F(groups__contains="bird"))
print(birds.inventory.hosts.keys())
not_birds = nr.filter(~F(groups__contains="bird"))
print(not_birds.inventory.hosts.keys())
domestic_or_bird = nr.filter(F(groups__contains="bird") | F(domestic=True))
print(domestic_or_bird.inventory.hosts.keys())
domestic_mammals = nr.filter(F(groups__contains="mammal") & F(domestic=True))
print(domestic_mammals.inventory.hosts.keys())
flying_not_carnivore = nr.filter(F(fly=True) & ~F(diet="carnivore"))
print(flying_not_carnivore.inventory.hosts.keys())
long_lived = nr.filter(F(additional_data__lifespan__ge=15))
print(long_lived.inventory.hosts.keys())
marine_and_invertebrates = nr.filter(F(groups__all=["marine", "invertebrate"]))
print(marine_and_invertebrates.inventory.hosts.keys())
bird_or_invertebrates = nr.filter(F(groups__any=["bird", "invertebrate"]))
print(bird_or_invertebrates.inventory.hosts.keys())
| 0.257485 | 0.954393 |
```
import sys
!{sys.executable} -m pip install scikit-plot
import pandas as pd
import nltk
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import pickle
import os
import numpy as np
def preprocess_tweets(data):
snow = nltk.stem.SnowballStemmer('english')
data['description'].dropna(inplace=True)
for index, sentence in enumerate(data['description']):
sentence = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '', str(sentence))
sentence = re.sub(r'[?|!|\'|’|"|#|@|_|:|“|”|-|"|-|-|<|>|{|}.|,|)|(|\|/]', r'', sentence)
sentence = nltk.word_tokenize(sentence)
words = [snow.stem(word) for word in sentence if word not in stopwords.words('english')]
data.loc[index, 'description'] = (" ".join(map(str, words)))
X = data['description']
return data
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
def execute_pipeline(cl_type, x_train, x_test, y_train, y_test):
if cl_type == 'svm':
steps = [('scaler', StandardScaler()), ('SVM', SVC(probability=True))]
hypertuning_parameters = {'SVM__C': [0.1, 1, 9, 10, 100], 'SVM__gamma': [0.1, 1], 'SVM__kernel': ['linear']}
label = 'SVM'
import scikitplot as skplt
elif cl_type == 'rf':
steps = [('scaler', StandardScaler()), ('RF', RandomForestClassifier())]
hypertuning_parameters = {"RF__max_depth": [3, None],
"RF__max_features": [1, 10],
"RF__min_samples_leaf": [1, 3, 10],
"RF__criterion": ["gini", "entropy"]}
label = 'RANDOM FOREST'
elif cl_type == 'dt':
steps = [('scaler', StandardScaler()), ('DT', DecisionTreeClassifier())]
hypertuning_parameters = {"DT__max_depth": [3, None],
"DT__max_features": [1, 10],
"DT__min_samples_leaf": [1, 3, 10],
"DT__criterion": ["gini", "entropy"]}
label = 'DECISION TREE'
elif cl_type == 'lr':
steps = [('scaler', StandardScaler()), ('LR', LogisticRegression())]
label = 'LOGISTIC REGRESSION'
hypertuning_parameters = {}
pipeline = Pipeline(steps)
warnings.simplefilter(action='ignore')
kfold = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
clf_model = GridSearchCV(pipeline, param_grid=hypertuning_parameters, cv=kfold)
clf_model.fit(x_train, y_train)
y_pred = clf_model.predict(x_test)
print('--------------- ', label, '------------------')
print("Accuracy = %3.2f" % (clf_model.score(x_test, y_test)))
print("Precision = %3.2f" % (precision_score(y_test,y_pred)))
print("Recall = %3.2f" % (recall_score(y_test,y_pred)))
print("F1 = %3.2f" % (f1_score(y_test,y_pred)))
from sklearn.metrics import confusion_matrix
conmat = np.array(confusion_matrix(y_test, y_pred, labels=[1, 0]))
confusion = pd.DataFrame(conmat, index=['YES', 'NO'], columns=['predicted_YES', 'predicted_NO'])
print(confusion)
import scikitplot as skplt
predicted_probas = clf_model.predict_proba(x_test)
skplt.metrics.plot_roc(y_test, predicted_probas)
plt.show()
print('\n')
return clf_model
def export(cl, path):
"""
convert the classifier to byte representation and save it to a file
:param path:
:return:
"""
try:
os.remove(path)
except FileNotFoundError:
pass
with open(path, 'wb') as file:
pickle.dump(cl, file)
def predict_label_and_save(cl):
test_file_path = 'DownsampledCoreDataForBotTest.csv'
dataf = pd.read_csv(test_file_path, encoding='utf-8')
df = preprocess_tweets(dataf)
df['screen_name_binary'] = any(x in df.screen_name for x in symbols)
df['description_binary'] = any(x in df.description for x in symbols)
# Separating observations
x = df.drop(['bot', 'screen_name', 'description', 'location', 'verified'], axis=1)
y = df['bot']
print(x.head())
print("\nClassifying user now...")
predicted_df = []
for i in x.itertuples(index=True, name='Pandas'):
data = np.array(i).reshape(1, -1)
input_data = np.delete(data, 0, axis=1)
result = cl.predict(np.nan_to_num(input_data))
if result[0] == 1:
dictn = {'id': i.id, 'bot': 1}
predicted_df.append(dictn)
else:
dictn = {'id': i.id, 'bot': 0}
predicted_df.append(dictn)
new_df = pd.concat([x, pd.DataFrame(predicted_df)], axis=1)
print(new_df)
new_df.to_csv("BotClassificationResults.csv", index=False)
print("\nClassification done and saved in 'BotClassificationResults.csv'!\n")
```
Feature engineering for the dataset
```
symbols = ['Bot', 'bot', 'b0t', 'B0T', 'B0t', 'random', 'http', 'co', 'every', 'twitter', 'pubmed', 'news',
'created', 'like', 'feed', 'tweeting', 'task', 'world', 'x', 'affiliated', 'latest', 'twitterbot',
'project', 'botally', 'generated', 'image', 'reply', 'tinysubversions', 'biorxiv', 'digital', 'rt',
'ckolderup', 'arxiv', 'rss', 'thricedotted', 'collection', 'want', 'backspace', 'maintained',
'things', 'curated', 'see', 'us', 'people', 'every', 'love', 'please']
train_file_path = 'BotTrainingData.csv'
dataf = pd.read_csv(train_file_path, encoding='utf-8')
# Preprocessing
df = preprocess_tweets(dataf)
# Feature engineering
df['screen_name_binary'] = any(x in df.screen_name for x in symbols)
df['description_binary'] = any(x in df.description for x in symbols)
# Separating observations
x = df.drop(['bot', 'screen_name', 'description', 'id_str', 'std_deviation_friends', 'std_deviation_followers'], axis=1)
y = df['bot']
```
Check for missingness in data
```
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
def get_heatmap(df):
plt.figure(figsize=(10,6))
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
plt.tight_layout()
return plt.show()
get_heatmap(df)
```
We can see that the missing observations are few and only confined to 'description' field.
```
x.head()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=30, stratify=y)
export(execute_pipeline('dt', x_train, x_test, y_train, y_test), 'trained_dt.p')
export(execute_pipeline('rf', x_train, x_test, y_train, y_test), 'trained_rf.p')
export(execute_pipeline('svm', x_train, x_test, y_train, y_test), 'trained_svm.p')
export(execute_pipeline('lr', x_train, x_test, y_train, y_test), 'trained_lr.p')
cl = execute_pipeline('dt', x_train, x_test, y_train, y_test)
predict_label_and_save(cl)
result_file_path = 'BotClassificationResults.csv'
dataf = pd.read_csv(result_file_path, encoding='utf-8')
dataf.head()
dataf['bot'].value_counts()
```
# Results
* Labeled "62" users from the annotated dataset as 'bots'.
* Next, we proceed to remove these bot accounts from the annotated dataset for further steps.
|
github_jupyter
|
import sys
!{sys.executable} -m pip install scikit-plot
import pandas as pd
import nltk
import re
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import pickle
import os
import numpy as np
def preprocess_tweets(data):
snow = nltk.stem.SnowballStemmer('english')
data['description'].dropna(inplace=True)
for index, sentence in enumerate(data['description']):
sentence = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '', str(sentence))
sentence = re.sub(r'[?|!|\'|’|"|#|@|_|:|“|”|-|"|-|-|<|>|{|}.|,|)|(|\|/]', r'', sentence)
sentence = nltk.word_tokenize(sentence)
words = [snow.stem(word) for word in sentence if word not in stopwords.words('english')]
data.loc[index, 'description'] = (" ".join(map(str, words)))
X = data['description']
return data
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
def execute_pipeline(cl_type, x_train, x_test, y_train, y_test):
if cl_type == 'svm':
steps = [('scaler', StandardScaler()), ('SVM', SVC(probability=True))]
hypertuning_parameters = {'SVM__C': [0.1, 1, 9, 10, 100], 'SVM__gamma': [0.1, 1], 'SVM__kernel': ['linear']}
label = 'SVM'
import scikitplot as skplt
elif cl_type == 'rf':
steps = [('scaler', StandardScaler()), ('RF', RandomForestClassifier())]
hypertuning_parameters = {"RF__max_depth": [3, None],
"RF__max_features": [1, 10],
"RF__min_samples_leaf": [1, 3, 10],
"RF__criterion": ["gini", "entropy"]}
label = 'RANDOM FOREST'
elif cl_type == 'dt':
steps = [('scaler', StandardScaler()), ('DT', DecisionTreeClassifier())]
hypertuning_parameters = {"DT__max_depth": [3, None],
"DT__max_features": [1, 10],
"DT__min_samples_leaf": [1, 3, 10],
"DT__criterion": ["gini", "entropy"]}
label = 'DECISION TREE'
elif cl_type == 'lr':
steps = [('scaler', StandardScaler()), ('LR', LogisticRegression())]
label = 'LOGISTIC REGRESSION'
hypertuning_parameters = {}
pipeline = Pipeline(steps)
warnings.simplefilter(action='ignore')
kfold = StratifiedKFold(n_splits=10, random_state=42, shuffle=True)
clf_model = GridSearchCV(pipeline, param_grid=hypertuning_parameters, cv=kfold)
clf_model.fit(x_train, y_train)
y_pred = clf_model.predict(x_test)
print('--------------- ', label, '------------------')
print("Accuracy = %3.2f" % (clf_model.score(x_test, y_test)))
print("Precision = %3.2f" % (precision_score(y_test,y_pred)))
print("Recall = %3.2f" % (recall_score(y_test,y_pred)))
print("F1 = %3.2f" % (f1_score(y_test,y_pred)))
from sklearn.metrics import confusion_matrix
conmat = np.array(confusion_matrix(y_test, y_pred, labels=[1, 0]))
confusion = pd.DataFrame(conmat, index=['YES', 'NO'], columns=['predicted_YES', 'predicted_NO'])
print(confusion)
import scikitplot as skplt
predicted_probas = clf_model.predict_proba(x_test)
skplt.metrics.plot_roc(y_test, predicted_probas)
plt.show()
print('\n')
return clf_model
def export(cl, path):
"""
convert the classifier to byte representation and save it to a file
:param path:
:return:
"""
try:
os.remove(path)
except FileNotFoundError:
pass
with open(path, 'wb') as file:
pickle.dump(cl, file)
def predict_label_and_save(cl):
test_file_path = 'DownsampledCoreDataForBotTest.csv'
dataf = pd.read_csv(test_file_path, encoding='utf-8')
df = preprocess_tweets(dataf)
df['screen_name_binary'] = any(x in df.screen_name for x in symbols)
df['description_binary'] = any(x in df.description for x in symbols)
# Separating observations
x = df.drop(['bot', 'screen_name', 'description', 'location', 'verified'], axis=1)
y = df['bot']
print(x.head())
print("\nClassifying user now...")
predicted_df = []
for i in x.itertuples(index=True, name='Pandas'):
data = np.array(i).reshape(1, -1)
input_data = np.delete(data, 0, axis=1)
result = cl.predict(np.nan_to_num(input_data))
if result[0] == 1:
dictn = {'id': i.id, 'bot': 1}
predicted_df.append(dictn)
else:
dictn = {'id': i.id, 'bot': 0}
predicted_df.append(dictn)
new_df = pd.concat([x, pd.DataFrame(predicted_df)], axis=1)
print(new_df)
new_df.to_csv("BotClassificationResults.csv", index=False)
print("\nClassification done and saved in 'BotClassificationResults.csv'!\n")
symbols = ['Bot', 'bot', 'b0t', 'B0T', 'B0t', 'random', 'http', 'co', 'every', 'twitter', 'pubmed', 'news',
'created', 'like', 'feed', 'tweeting', 'task', 'world', 'x', 'affiliated', 'latest', 'twitterbot',
'project', 'botally', 'generated', 'image', 'reply', 'tinysubversions', 'biorxiv', 'digital', 'rt',
'ckolderup', 'arxiv', 'rss', 'thricedotted', 'collection', 'want', 'backspace', 'maintained',
'things', 'curated', 'see', 'us', 'people', 'every', 'love', 'please']
train_file_path = 'BotTrainingData.csv'
dataf = pd.read_csv(train_file_path, encoding='utf-8')
# Preprocessing
df = preprocess_tweets(dataf)
# Feature engineering
df['screen_name_binary'] = any(x in df.screen_name for x in symbols)
df['description_binary'] = any(x in df.description for x in symbols)
# Separating observations
x = df.drop(['bot', 'screen_name', 'description', 'id_str', 'std_deviation_friends', 'std_deviation_followers'], axis=1)
y = df['bot']
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
def get_heatmap(df):
plt.figure(figsize=(10,6))
sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
plt.tight_layout()
return plt.show()
get_heatmap(df)
x.head()
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=30, stratify=y)
export(execute_pipeline('dt', x_train, x_test, y_train, y_test), 'trained_dt.p')
export(execute_pipeline('rf', x_train, x_test, y_train, y_test), 'trained_rf.p')
export(execute_pipeline('svm', x_train, x_test, y_train, y_test), 'trained_svm.p')
export(execute_pipeline('lr', x_train, x_test, y_train, y_test), 'trained_lr.p')
cl = execute_pipeline('dt', x_train, x_test, y_train, y_test)
predict_label_and_save(cl)
result_file_path = 'BotClassificationResults.csv'
dataf = pd.read_csv(result_file_path, encoding='utf-8')
dataf.head()
dataf['bot'].value_counts()
| 0.423935 | 0.527317 |
# T-SNE on Amazon Food Reviews Using tfidf w2v Technique
Data Source: https://www.kaggle.com/snap/amazon-fine-food-reviews
The Amazon Fine Food Reviews dataset consists of reviews of fine foods from Amazon.
Number of reviews: 568,454 Number of users: 256,059 Number of products: 74,258 Timespan: Oct 1999 - Oct 2012 Number of Attributes/Columns in data: 10
Attribute Information:
1. index
2. Id
3. ProductId - unique identifier for the product
4. UserId - unqiue identifier for the user
5. ProfileName
6. HelpfulnessNumerator - number of users who found the review helpful
7. HelpfulnessDenominator - number of users who indicated whether they found the review helpful or not
8. Score - rating between 1 and 5
9. Time - timestamp for the review
10. Summary - brief summary of the review
11. Text - text of the review
12. ProcessedText - Cleaned & Preprocessed Text of the review
**Objective: Given Amazon Food reviews, convert all the reviews into a vector using tfidf w2v technique then plot T-SNE. Separate all positive and negative reviews by blue and red colors respectively.**
[Q] How to determine if a review is positive or negative?
[Ans] We could use the Score/Rating. A rating of 4 or 5 could be cosnidered a positive review. A review of 1 or 2 could be considered negative. A review of 3 is nuetral and ignored. This is an approximate and proxy way of determining the polarity (positivity/negativity) of a review.
Loading the data The dataset is available in two forms
1) .csv file
2) SQLite Database
In order to load the data, We have used the SQLITE dataset as it easier to query the data and visualise the data efficiently. Here as we only want to get the global sentiment of the recommendations (positive or negative), we will purposefully ignore all Scores equal to 3. If the score id above 3, then the recommendation wil be set to "positive". Otherwise, it will be set to "negative".
```
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plot
import nltk #nltk = natural language tool kit
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
connection = sqlite3.connect('FinalAmazonFoodReviewsDataset.sqlite')
data = pd.read_sql_query("SELECT * FROM Reviews", connection)
data.head(5)
data.shape
data["Score"].value_counts()
allPositiveReviews = pd.read_sql_query("SELECT * FROM Reviews WHERE Score = 'Positive'", connection)
allPositiveReviews.shape
allNegativeReviews = pd.read_sql_query("SELECT * FROM Reviews WHERE Score = 'Negative'", connection)
allNegativeReviews.shape
positiveReviews_500 = allPositiveReviews[3600:4100]
print(positiveReviews_500.shape)
positiveReviews_500.head(5)
negativeReviews_500 = allNegativeReviews[8500:9000]
print(negativeReviews_500.shape)
negativeReviews_500.head(5)
frames_1000 = [positiveReviews_500, negativeReviews_500]
FinalPositiveNegative = pd.concat(frames_1000)
FinalPositiveNegative.shape
FinalSortedPositiveNegative_1000 = FinalPositiveNegative.sort_values('Time', axis=0, ascending=True, inplace=False)
FinalSortedPositiveNegativeScore_1000 = FinalSortedPositiveNegative_1000["Score"]
print(FinalSortedPositiveNegative_1000.shape)
print(FinalSortedPositiveNegativeScore_1000.shape)
FinalSortedPositiveNegative_1000.head(5)
```
## tfidf_w2v
```
tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), stop_words = "english")
final_tf_idf = tf_idf_vect.fit_transform(FinalSortedPositiveNegative_1000['ProcessedText'].values)
print(type(final_tf_idf))
final_tf_idf.get_shape()
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
i = 0
listOfSentences = []
for sentence in FinalSortedPositiveNegative_1000["ProcessedText"].values:
subSentence = []
for word in sentence.split():
subSentence.append(word)
listOfSentences.append(subSentence)
print(FinalSortedPositiveNegative_1000['ProcessedText'].values[0:2])
print("\n")
print(listOfSentences[0:2])
print("\n")
print(type(listOfSentences))
import gensim
w2v_model=gensim.models.Word2Vec(listOfSentences, min_count=5, size=50, workers=4)
# TF-IDF weighted Word2Vec
tfidf_features = tf_idf_vect.get_feature_names()
tfidf_w2v = []
reviews = 0
for sentence in listOfSentences:
sentenceVector = np.zeros(50)
weightSum = 0
for word in sentence:
try:
W2V_Vector = w2v_model.wv[word]
tfidf = final_tf_idf[reviews, tfidf_features.index(word)]
sentenceVector += (W2V_Vector * tfidf)
weightSum += tfidf
except:
pass
sentenceVector /= weightSum
tfidf_w2v.append(sentenceVector)
reviews += 1
from sklearn.preprocessing import StandardScaler
standardized_tfidf_w2v = StandardScaler().fit_transform(tfidf_w2v)
print(standardized_tfidf_w2v.shape)
print(type(standardized_tfidf_w2v))
```
## TSNE OF TFIDF W2V
```
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=500, n_iter=2500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=50, n_iter=3500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=100, n_iter=3500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=200, n_iter=2500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
```
|
github_jupyter
|
import sqlite3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plot
import nltk #nltk = natural language tool kit
import seaborn as sns
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from nltk.stem.porter import PorterStemmer
connection = sqlite3.connect('FinalAmazonFoodReviewsDataset.sqlite')
data = pd.read_sql_query("SELECT * FROM Reviews", connection)
data.head(5)
data.shape
data["Score"].value_counts()
allPositiveReviews = pd.read_sql_query("SELECT * FROM Reviews WHERE Score = 'Positive'", connection)
allPositiveReviews.shape
allNegativeReviews = pd.read_sql_query("SELECT * FROM Reviews WHERE Score = 'Negative'", connection)
allNegativeReviews.shape
positiveReviews_500 = allPositiveReviews[3600:4100]
print(positiveReviews_500.shape)
positiveReviews_500.head(5)
negativeReviews_500 = allNegativeReviews[8500:9000]
print(negativeReviews_500.shape)
negativeReviews_500.head(5)
frames_1000 = [positiveReviews_500, negativeReviews_500]
FinalPositiveNegative = pd.concat(frames_1000)
FinalPositiveNegative.shape
FinalSortedPositiveNegative_1000 = FinalPositiveNegative.sort_values('Time', axis=0, ascending=True, inplace=False)
FinalSortedPositiveNegativeScore_1000 = FinalSortedPositiveNegative_1000["Score"]
print(FinalSortedPositiveNegative_1000.shape)
print(FinalSortedPositiveNegativeScore_1000.shape)
FinalSortedPositiveNegative_1000.head(5)
tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), stop_words = "english")
final_tf_idf = tf_idf_vect.fit_transform(FinalSortedPositiveNegative_1000['ProcessedText'].values)
print(type(final_tf_idf))
final_tf_idf.get_shape()
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
import pickle
i = 0
listOfSentences = []
for sentence in FinalSortedPositiveNegative_1000["ProcessedText"].values:
subSentence = []
for word in sentence.split():
subSentence.append(word)
listOfSentences.append(subSentence)
print(FinalSortedPositiveNegative_1000['ProcessedText'].values[0:2])
print("\n")
print(listOfSentences[0:2])
print("\n")
print(type(listOfSentences))
import gensim
w2v_model=gensim.models.Word2Vec(listOfSentences, min_count=5, size=50, workers=4)
# TF-IDF weighted Word2Vec
tfidf_features = tf_idf_vect.get_feature_names()
tfidf_w2v = []
reviews = 0
for sentence in listOfSentences:
sentenceVector = np.zeros(50)
weightSum = 0
for word in sentence:
try:
W2V_Vector = w2v_model.wv[word]
tfidf = final_tf_idf[reviews, tfidf_features.index(word)]
sentenceVector += (W2V_Vector * tfidf)
weightSum += tfidf
except:
pass
sentenceVector /= weightSum
tfidf_w2v.append(sentenceVector)
reviews += 1
from sklearn.preprocessing import StandardScaler
standardized_tfidf_w2v = StandardScaler().fit_transform(tfidf_w2v)
print(standardized_tfidf_w2v.shape)
print(type(standardized_tfidf_w2v))
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=500, n_iter=2500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=50, n_iter=3500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=100, n_iter=3500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0, perplexity=200, n_iter=2500)
tsne_data = model.fit_transform(standardized_tfidf_w2v)
# creating a new data frame which help us in ploting the result data
tsne_data = np.vstack((tsne_data.T, FinalSortedPositiveNegativeScore_1000)).T
tsne_df = pd.DataFrame(data=tsne_data, columns=("Dim_1", "Dim_2", "Score"))
# Ploting the result of tsne
sns.FacetGrid(tsne_df, hue="Score", size=6).map(plot.scatter, 'Dim_1', 'Dim_2').add_legend()
plot.show()
| 0.521471 | 0.893635 |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/JavaScripts/CloudMasking/Landsat8TOAReflectanceQABand.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/CloudMasking/Landsat8TOAReflectanceQABand.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/JavaScripts/CloudMasking/Landsat8TOAReflectanceQABand.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
```
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
```
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
```
## Add Earth Engine Python script
```
# Add Earth Engine dataset
# This example demonstrates the use of the Landsat 8 QA band to mask clouds.
# Function to mask clouds using the quality band of Landsat 8.
def maskL8(image):
qa = image.select('BQA')
#/ Check that the cloud bit is off.
# See https:#www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band
mask = qa.bitwiseAnd(1 << 4).eq(0)
return image.updateMask(mask)
# Map the function over one year of Landsat 8 TOA data and take the median.
composite = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterDate('2016-01-01', '2016-12-31') \
.map(maskL8) \
.median()
# Display the results in a cloudy place.
Map.setCenter(114.1689, 22.2986, 12)
Map.addLayer(composite, {'bands': ['B4', 'B3', 'B2'], 'max': 0.3})
```
## Display Earth Engine data layers
```
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
```
|
github_jupyter
|
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# Add Earth Engine dataset
# This example demonstrates the use of the Landsat 8 QA band to mask clouds.
# Function to mask clouds using the quality band of Landsat 8.
def maskL8(image):
qa = image.select('BQA')
#/ Check that the cloud bit is off.
# See https:#www.usgs.gov/land-resources/nli/landsat/landsat-collection-1-level-1-quality-assessment-band
mask = qa.bitwiseAnd(1 << 4).eq(0)
return image.updateMask(mask)
# Map the function over one year of Landsat 8 TOA data and take the median.
composite = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterDate('2016-01-01', '2016-12-31') \
.map(maskL8) \
.median()
# Display the results in a cloudy place.
Map.setCenter(114.1689, 22.2986, 12)
Map.addLayer(composite, {'bands': ['B4', 'B3', 'B2'], 'max': 0.3})
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 0.733643 | 0.952131 |
# 2-1 Intro Python Practice
## Sequence: String
<font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
- Work with String Characters
- Slice strings into substrings
- Iterate through String Characters
- Use String ~~Tricks~~ Methods
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 1</B></font>
## Access String Characters
### `working_string[index]`
```
# [ ] access and print the second character from planet_name: "u"
planet_name = "Jupiter"
# [ ] access and print the first character from planet_name: "J"
planet_name = "Jupiter"
# [ ] access and print the first and last characters from planet_name
planet_name = "Jupiter"
# [ ] using a negative index access and print the first character from planet_name: "J"
planet_name = "Jupiter"
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 2</B></font>
## slice
### `working_string[start:stop]`
### `working_string[start:stop:step]`
```
# [ ] print planet_name sliced into the first 3 characters and remaining characters
planet_name = "Neptune"
# [ ] print 1st char and then every 3rd char of wise_words
# use string slice with a step
wise_words = 'Play it who opens'
# [ ] print planet_name in reverse
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 3</B></font>
## iterate a String
### `for letter in sentence:`
```
# [ ] Get user input for 1 fav_food
# [ ] iterate through letters in fav_food
# - print each letter on a new line
# [ ] iterate work_tip string concatenate each letter to variable: new_string
# [ ] concatenate the letter or a "-" instead of a space " "
# tip: concatenate string example: word = word + "a"
work_tip = "Good code is commented code"
# [ ] Print the first 4 letters of name on new line
name = "Hiroto"
# [ ] Print every other letter from 2nd to last letter of name
name = "Hiroto"
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 4</B></font>
## Program: Mystery Name
- get user input for first_name
- create an empty string variable: new_name
- iterate through letters in first_name Backwards
- add each letter to new_name as you iterate
- Replace the letter if "e", "t" or "a" with "?" *(hint: if, elif, elif, else)*
- print new_name
**example: "Alton" = "no?l?"**
```
# [ ] Create Mystery Name
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 4</B></font>
## `len(), .find(), .count()`
```
- len(working_string)
- .find("i")
- .find("i",start)
- .find("i", start, end)
- .count("i")
- .count("i", start)
- .count("i", start, end)
```
```
# [ ] find and display the length of the string: topic
topic = "len() returns the length of a string"
# [ ] use len() to find and display the mid_pt (middle) index (+/- 1) of the string: topic
# note: index values are whole numbers
topic = "len() can take a sequence, like a string, as an argument"
# [ ] print index where first instance of the word "code" starts using .find()
work_tip = "Good code is commented code"
# [ ] search for "code" in code_tip using .find()
# [ ] search substring with substring index start= 13,end = last char
# [ ] save result in variable: code_index
# [ ] display index of where "code" is found, or print "not found" if code_index == -1
work_tip = "Good code is commented code"
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 5</B></font>
```
# [ ] find and report (print) number of w's, o's + use of word "code"
work_tip = "Good code is commented code"
# [ ] count times letter "i" appears in code_tip string
# [ ] find and display the index of all the letter i's in code_tip
# Remember: if .find("i") has No Match, -1 is returned
code_tip = "code a conditional decision like you would say it"
print ("code_tip:" , code_tip)
```
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 6</B></font>
## Program: Words after "G"/"g"
Create a program inputs a phrase (like a famous quotation) and prints all of the words that start with h-z
Sample input:
`enter a 1 sentence quote, non-alpha separate words:` **`Wheresoever you go, go with all your heart`**
Sample output:
```
WHERESOEVER
YOU
WITH
YOUR
HEART
```
- split the words by building a placeholder variable: **`word`**
- loop each character in the input string
- check if character is a letter
- add a letter to **`word`** each loop until a non-alpha char is encountered
- **if** character is alpha
- add character to **`word`**
- non-alpha detected (space, punctuation, digit,...) defines the end of a word and goes to **`else`**
- **`else`**
- check **`if`** word is greater than "g" alphabetically
- print word
- set word = empty string
- or **else**
- set word = empty string and build the next word
Hint: use `.lower()`
```
# [] create words after "G"
# sample quote "Wheresoever you go, go with all your heart" ~ Confucius (551 BC - 479 BC)
def words(quote):
word = ''
for letter in quote:
if letter.isalpha():
word += letter
elif word:
yield word
word = ''
if word:
yield word
while True:
quote = input("Please input a phrase or type exit to leave: ")
if quote in ('exit'):
input ('Press ENTER to exit')
break
for word in words(quote):
if word >= 'h':
print(word.upper())
def words(quote):
word = ''
for letter in quote:
if letter.isalpha():
word += letter
elif word:
yield word
word = ''
if word:
yield word
while True:
quote = input("Please input a phrase or type exit to leave: ")
if quote in ('exit'):
input ('Press ENTER to exit')
break
for word in words(quote):
if word >= 'h':
print(word.upper())
```
[Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) © 2017 Microsoft
|
github_jupyter
|
# [ ] access and print the second character from planet_name: "u"
planet_name = "Jupiter"
# [ ] access and print the first character from planet_name: "J"
planet_name = "Jupiter"
# [ ] access and print the first and last characters from planet_name
planet_name = "Jupiter"
# [ ] using a negative index access and print the first character from planet_name: "J"
planet_name = "Jupiter"
# [ ] print planet_name sliced into the first 3 characters and remaining characters
planet_name = "Neptune"
# [ ] print 1st char and then every 3rd char of wise_words
# use string slice with a step
wise_words = 'Play it who opens'
# [ ] print planet_name in reverse
# [ ] Get user input for 1 fav_food
# [ ] iterate through letters in fav_food
# - print each letter on a new line
# [ ] iterate work_tip string concatenate each letter to variable: new_string
# [ ] concatenate the letter or a "-" instead of a space " "
# tip: concatenate string example: word = word + "a"
work_tip = "Good code is commented code"
# [ ] Print the first 4 letters of name on new line
name = "Hiroto"
# [ ] Print every other letter from 2nd to last letter of name
name = "Hiroto"
# [ ] Create Mystery Name
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 5</B></font>
#
<font size="6" color="#B24C00" face="verdana"> <B>Task 6</B></font>
## Program: Words after "G"/"g"
Create a program inputs a phrase (like a famous quotation) and prints all of the words that start with h-z
Sample input:
`enter a 1 sentence quote, non-alpha separate words:` **`Wheresoever you go, go with all your heart`**
Sample output:
- split the words by building a placeholder variable: **`word`**
- loop each character in the input string
- check if character is a letter
- add a letter to **`word`** each loop until a non-alpha char is encountered
- **if** character is alpha
- add character to **`word`**
- non-alpha detected (space, punctuation, digit,...) defines the end of a word and goes to **`else`**
- **`else`**
- check **`if`** word is greater than "g" alphabetically
- print word
- set word = empty string
- or **else**
- set word = empty string and build the next word
Hint: use `.lower()`
| 0.382372 | 0.840783 |
```
import os
import pandas as pd
import numpy as np
import json
from datetime import datetime
import copy
root_data_path = "../data"
df_dict: dict = {}
runtime_targets: dict = {
"LR": 5,
"MPC": 9,
"K-Means": 14,
"GBT": 16
}
paper_names = {
"LogisticRegression": "LR",
"KMeans": "K-Means",
"GradientBoostedTrees": "GBT"
}
job_names = ["LR", "MPC", "K-Means", "GBT"]
# get data from ellis + profiling runs
cols = ["APP_EVENT_ID", "JOB_ID", "DURATION_MS", "APP_ID", "SCALE_OUT"]
for file in os.listdir(root_data_path):
def calculate_ellis_cvc(row: pd.Series):
duration_m = row["DURATION_MS"] / 60000
return 1 if duration_m > runtime_targets.get(row["APP_ID"]) else 0
def calculate_ellis_cvs(row: pd.Series):
return abs((row["DURATION_MS"] / 60000) - runtime_targets.get(row["APP_ID"])) if row["CVC"] == 1 else 0
if ".csv" in file:
raw_df = pd.read_csv(os.path.join(root_data_path, file), delimiter="|", usecols=cols)
raw_df["APP_ID"] = raw_df["APP_ID"].map(lambda n: paper_names.get(n, n))
raw_df = raw_df.drop(columns=['SCALE_OUT'])
df = raw_df.groupby(by=["APP_EVENT_ID"], as_index=False).agg({"DURATION_MS": sum,
"APP_ID": max
})
df["CVC"] = df[['DURATION_MS', 'APP_ID']].apply(calculate_ellis_cvc, axis=1)
df["CVS"] = df[['DURATION_MS', 'APP_ID', 'CVC']].apply(calculate_ellis_cvs, axis=1)
df = df.sort_values(["APP_EVENT_ID"])
job_name = str(df["APP_ID"].values[0])
df["duration"] = df["DURATION_MS"].map(lambda v: v / 60000)
df = df.drop(columns=['DURATION_MS', 'APP_ID'])
df = df.rename(columns={"APP_EVENT_ID": "id"})
df["Method"] = "Ellis"
df.loc[df["id"] < 11, "Method"] = "Profiling Runs"
print(df.shape)
df_dict[job_name] = df
for file in os.listdir(root_data_path):
if "enel_runs.json" == file:
keys = ["application_execution_id", "application_signature",
"end_time", "start_time"]
list_dict = {k:[] for k in keys}
with open(os.path.join(root_data_path, file), "r") as f:
obj_list = json.load(f)
for obj in obj_list:
for k in keys:
list_dict[k].append(obj[k])
def get_duration(row: pd.Series):
date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
start = datetime.strptime(row["start_time"]["$date"], date_format)
end = datetime.strptime(row["end_time"]["$date"], date_format)
duration = (end - start).total_seconds() / 60
return pd.Series([start, end, duration])
def calculate_enel_cvc(row: pd.Series):
return 1 if row["duration"] > runtime_targets.get(row["application_signature"]) else 0
def calculate_enel_cvs(row: pd.Series):
return abs(row["duration"] - runtime_targets.get(row["application_signature"])) if row["CVC"] == 1 else 0
json_df = pd.DataFrame.from_dict(list_dict)
json_df["application_signature"] = json_df["application_signature"].map(lambda n: paper_names.get(n, n))
json_df[['start_time', 'end_time', "duration"]] = json_df[['start_time', 'end_time']].apply(get_duration, axis=1)
json_df = json_df.sort_values(by=['start_time'])
json_df = json_df.drop(columns=['start_time', 'end_time'])
json_df = json_df.rename(columns={"application_execution_id": "id"})
json_df = json_df.groupby(by=["id"], as_index=False).agg({"duration": sum,
"application_signature": max})
json_df["Method"] = "Enel"
for job_name in job_names:
sub_df = copy.deepcopy(json_df.loc[json_df["application_signature"] == job_name, :])
sub_df["id"] = list(range(1, len(sub_df) + 1))
sub_df.loc[sub_df["id"] < 11, "Method"] = "Profiling2 Runs"
sub_df["CVC"] = sub_df[['duration', 'application_signature']].apply(calculate_enel_cvc, axis=1)
sub_df["CVS"] = sub_df[['duration', 'application_signature', 'CVC']].apply(calculate_enel_cvs, axis=1)
merge = pd.concat([df_dict.get(job_name), sub_df], ignore_index=True)
merge = merge.loc[merge["Method"] != "Profiling2 Runs", :]
print(merge.shape)
df_dict[job_name] = merge
from collections import OrderedDict
new_df_dict = OrderedDict()
for k in job_names:
new_df_dict[k] = df_dict[k]
df_dict = new_df_dict
for k, v in df_dict.items():
print(k, len(v))
palette: dict = {
"Profiling Runs": "grey",
"Ellis": "#4878D0",
"Enel": "#EE854A"
}
y_lim_dict: dict = {
"LR": [2,12],
"MPC": [2, 25],
"K-Means": [0, 100],
"GBT": [10, 43]
}
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
with sns.axes_style("whitegrid"):
fig, axes = plt.subplots(4, 1, figsize=(12,12), sharex=True)
for idx, ((job_name, df), ax) in enumerate(zip(list(df_dict.items()), axes.reshape(-1))):
# plot runtime target line
ax.plot(range(len(df)), [runtime_targets.get(job_name)] * len(df),
color="tab:red", label="Target Runtime", linestyle='dashed', linewidth=2)
ax.plot(range(len(df)), [runtime_targets.get(job_name) * 1.5] * len(df),
color="tab:red", linestyle='dashed', linewidth=1)
ax.plot(range(len(df)), [runtime_targets.get(job_name) * 2] * len(df),
color="tab:red", linestyle='dashed', linewidth=0.5)
# plot actual data
ax = sns.lineplot(ax=ax, data=df, x="id", y="duration", hue="Method", palette=palette, linewidth=2)
ax.get_legend().remove()
ax.set_title(job_name, fontsize=16)
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
if job_name == "K-Means":
vals = df.loc[df["Method"] == "Ellis", "duration"].values.tolist()
max_vals = sorted(vals)[-2:]
ax.scatter(np.array([vals.index(e) + 11 for e in max_vals]),
np.array(max_vals), marker="x", color="red", s=100, zorder=10)
if idx == 3:
ax.set_xlabel("# Run", fontsize=16)
else:
ax.set_xlabel(None)
ax.set_ylabel("Runtime [m]", fontsize=16)
# plot background color for profiling runs
ax.axvspan(xmin=0, xmax=10, color=(.9, .9, .9), zorder=-1)
ax.axvspan(xmin=41, xmax=50, color="mistyrose", zorder=-1)
ax.axvspan(xmin=61, xmax=65, color="mistyrose", zorder=-1)
ax.set_xlim(0, 65)
if job_name == "LR":
ax.set_ylim(1,16)
# allow only integers as y-axis elements
ax.xaxis.set_major_locator(MaxNLocator(13, integer=True))
ax.yaxis.set_major_locator(MaxNLocator(5, integer=True))
legend = plt.legend(loc='lower center',
ncol=4,
fancybox=True,
fontsize=16,
bbox_to_anchor=(0.5, -0.6))
fig.savefig("complete_experiment_runtime_target_comparison.pdf", bbox_inches='tight')
plt.show()
for job_name, df in list(df_dict.items()):
offsets = [11, 22, 33, 44, 55]
cvc_res = []
cvs_res = []
for off in offsets:
sub_df = copy.deepcopy(df.loc[(df.id >= off) & (df.id < (off+11)), :])
enel_df = sub_df.loc[sub_df.Method == "Enel", :]
cvc_res.append((np.mean(enel_df['CVC'].values),
np.std(enel_df['CVC'].values),
np.median(enel_df['CVC'].values)))
cvs_res.append((np.mean(enel_df['CVS'].values),
np.std(enel_df['CVS'].values),
np.median(enel_df['CVS'].values)))
print(job_name)
print("--> CVC:", " & ".join([f"${tup[0]:.2f}$ & ${tup[2]:.2f}$" for tup in cvc_res]))
print("--> CVS:", "& ".join([f"${tup[0]:.2f}$m & ${tup[2]:.2f}$m" for tup in cvs_res]))
print("\n")
```
|
github_jupyter
|
import os
import pandas as pd
import numpy as np
import json
from datetime import datetime
import copy
root_data_path = "../data"
df_dict: dict = {}
runtime_targets: dict = {
"LR": 5,
"MPC": 9,
"K-Means": 14,
"GBT": 16
}
paper_names = {
"LogisticRegression": "LR",
"KMeans": "K-Means",
"GradientBoostedTrees": "GBT"
}
job_names = ["LR", "MPC", "K-Means", "GBT"]
# get data from ellis + profiling runs
cols = ["APP_EVENT_ID", "JOB_ID", "DURATION_MS", "APP_ID", "SCALE_OUT"]
for file in os.listdir(root_data_path):
def calculate_ellis_cvc(row: pd.Series):
duration_m = row["DURATION_MS"] / 60000
return 1 if duration_m > runtime_targets.get(row["APP_ID"]) else 0
def calculate_ellis_cvs(row: pd.Series):
return abs((row["DURATION_MS"] / 60000) - runtime_targets.get(row["APP_ID"])) if row["CVC"] == 1 else 0
if ".csv" in file:
raw_df = pd.read_csv(os.path.join(root_data_path, file), delimiter="|", usecols=cols)
raw_df["APP_ID"] = raw_df["APP_ID"].map(lambda n: paper_names.get(n, n))
raw_df = raw_df.drop(columns=['SCALE_OUT'])
df = raw_df.groupby(by=["APP_EVENT_ID"], as_index=False).agg({"DURATION_MS": sum,
"APP_ID": max
})
df["CVC"] = df[['DURATION_MS', 'APP_ID']].apply(calculate_ellis_cvc, axis=1)
df["CVS"] = df[['DURATION_MS', 'APP_ID', 'CVC']].apply(calculate_ellis_cvs, axis=1)
df = df.sort_values(["APP_EVENT_ID"])
job_name = str(df["APP_ID"].values[0])
df["duration"] = df["DURATION_MS"].map(lambda v: v / 60000)
df = df.drop(columns=['DURATION_MS', 'APP_ID'])
df = df.rename(columns={"APP_EVENT_ID": "id"})
df["Method"] = "Ellis"
df.loc[df["id"] < 11, "Method"] = "Profiling Runs"
print(df.shape)
df_dict[job_name] = df
for file in os.listdir(root_data_path):
if "enel_runs.json" == file:
keys = ["application_execution_id", "application_signature",
"end_time", "start_time"]
list_dict = {k:[] for k in keys}
with open(os.path.join(root_data_path, file), "r") as f:
obj_list = json.load(f)
for obj in obj_list:
for k in keys:
list_dict[k].append(obj[k])
def get_duration(row: pd.Series):
date_format = "%Y-%m-%dT%H:%M:%S.%fZ"
start = datetime.strptime(row["start_time"]["$date"], date_format)
end = datetime.strptime(row["end_time"]["$date"], date_format)
duration = (end - start).total_seconds() / 60
return pd.Series([start, end, duration])
def calculate_enel_cvc(row: pd.Series):
return 1 if row["duration"] > runtime_targets.get(row["application_signature"]) else 0
def calculate_enel_cvs(row: pd.Series):
return abs(row["duration"] - runtime_targets.get(row["application_signature"])) if row["CVC"] == 1 else 0
json_df = pd.DataFrame.from_dict(list_dict)
json_df["application_signature"] = json_df["application_signature"].map(lambda n: paper_names.get(n, n))
json_df[['start_time', 'end_time', "duration"]] = json_df[['start_time', 'end_time']].apply(get_duration, axis=1)
json_df = json_df.sort_values(by=['start_time'])
json_df = json_df.drop(columns=['start_time', 'end_time'])
json_df = json_df.rename(columns={"application_execution_id": "id"})
json_df = json_df.groupby(by=["id"], as_index=False).agg({"duration": sum,
"application_signature": max})
json_df["Method"] = "Enel"
for job_name in job_names:
sub_df = copy.deepcopy(json_df.loc[json_df["application_signature"] == job_name, :])
sub_df["id"] = list(range(1, len(sub_df) + 1))
sub_df.loc[sub_df["id"] < 11, "Method"] = "Profiling2 Runs"
sub_df["CVC"] = sub_df[['duration', 'application_signature']].apply(calculate_enel_cvc, axis=1)
sub_df["CVS"] = sub_df[['duration', 'application_signature', 'CVC']].apply(calculate_enel_cvs, axis=1)
merge = pd.concat([df_dict.get(job_name), sub_df], ignore_index=True)
merge = merge.loc[merge["Method"] != "Profiling2 Runs", :]
print(merge.shape)
df_dict[job_name] = merge
from collections import OrderedDict
new_df_dict = OrderedDict()
for k in job_names:
new_df_dict[k] = df_dict[k]
df_dict = new_df_dict
for k, v in df_dict.items():
print(k, len(v))
palette: dict = {
"Profiling Runs": "grey",
"Ellis": "#4878D0",
"Enel": "#EE854A"
}
y_lim_dict: dict = {
"LR": [2,12],
"MPC": [2, 25],
"K-Means": [0, 100],
"GBT": [10, 43]
}
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
with sns.axes_style("whitegrid"):
fig, axes = plt.subplots(4, 1, figsize=(12,12), sharex=True)
for idx, ((job_name, df), ax) in enumerate(zip(list(df_dict.items()), axes.reshape(-1))):
# plot runtime target line
ax.plot(range(len(df)), [runtime_targets.get(job_name)] * len(df),
color="tab:red", label="Target Runtime", linestyle='dashed', linewidth=2)
ax.plot(range(len(df)), [runtime_targets.get(job_name) * 1.5] * len(df),
color="tab:red", linestyle='dashed', linewidth=1)
ax.plot(range(len(df)), [runtime_targets.get(job_name) * 2] * len(df),
color="tab:red", linestyle='dashed', linewidth=0.5)
# plot actual data
ax = sns.lineplot(ax=ax, data=df, x="id", y="duration", hue="Method", palette=palette, linewidth=2)
ax.get_legend().remove()
ax.set_title(job_name, fontsize=16)
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=14)
if job_name == "K-Means":
vals = df.loc[df["Method"] == "Ellis", "duration"].values.tolist()
max_vals = sorted(vals)[-2:]
ax.scatter(np.array([vals.index(e) + 11 for e in max_vals]),
np.array(max_vals), marker="x", color="red", s=100, zorder=10)
if idx == 3:
ax.set_xlabel("# Run", fontsize=16)
else:
ax.set_xlabel(None)
ax.set_ylabel("Runtime [m]", fontsize=16)
# plot background color for profiling runs
ax.axvspan(xmin=0, xmax=10, color=(.9, .9, .9), zorder=-1)
ax.axvspan(xmin=41, xmax=50, color="mistyrose", zorder=-1)
ax.axvspan(xmin=61, xmax=65, color="mistyrose", zorder=-1)
ax.set_xlim(0, 65)
if job_name == "LR":
ax.set_ylim(1,16)
# allow only integers as y-axis elements
ax.xaxis.set_major_locator(MaxNLocator(13, integer=True))
ax.yaxis.set_major_locator(MaxNLocator(5, integer=True))
legend = plt.legend(loc='lower center',
ncol=4,
fancybox=True,
fontsize=16,
bbox_to_anchor=(0.5, -0.6))
fig.savefig("complete_experiment_runtime_target_comparison.pdf", bbox_inches='tight')
plt.show()
for job_name, df in list(df_dict.items()):
offsets = [11, 22, 33, 44, 55]
cvc_res = []
cvs_res = []
for off in offsets:
sub_df = copy.deepcopy(df.loc[(df.id >= off) & (df.id < (off+11)), :])
enel_df = sub_df.loc[sub_df.Method == "Enel", :]
cvc_res.append((np.mean(enel_df['CVC'].values),
np.std(enel_df['CVC'].values),
np.median(enel_df['CVC'].values)))
cvs_res.append((np.mean(enel_df['CVS'].values),
np.std(enel_df['CVS'].values),
np.median(enel_df['CVS'].values)))
print(job_name)
print("--> CVC:", " & ".join([f"${tup[0]:.2f}$ & ${tup[2]:.2f}$" for tup in cvc_res]))
print("--> CVS:", "& ".join([f"${tup[0]:.2f}$m & ${tup[2]:.2f}$m" for tup in cvs_res]))
print("\n")
| 0.35354 | 0.294703 |
# Predict New York City Taxi Trip Duration
### Xinzhu Han
https://www.kaggle.com/c/nyc-taxi-trip-duration/overview
The competition dataset is based on the 2016 NYC Yellow Cab trip record data made available in Big Query on Google Cloud Platform. The data was originally published by the NYC Taxi and Limousine Commission (TLC). The data was sampled and cleaned for the purposes of this playground competition. Based on individual trip attributes, participants should predict the duration of each trip in the test set.
The notebook includes the detailed steps of analyzing dataset, visulization, training model and making predictions. Analyzing data set is to prepare data for the following steps, like delete the duplicate if it has and fill the null place. Try to have a good understanding of the whole dataset. Then draw a few visulized figures and figure out the best model for fiting this dataset. Finally, train a model using xgboost and hyperparameter tuning, and make a prediction.
### File descriptions
train.csv - the training set (contains 1458644 trip records)
test.csv - the testing set (contains 625134 trip records)
sample_submission.csv - a sample submission file in the correct format
### Data fields
id - a unique identifier for each trip
vendor_id - a code indicating the provider associated with the trip record
pickup_datetime - date and time when the meter was engaged
dropoff_datetime - date and time when the meter was disengaged
passenger_count - the number of passengers in the vehicle (driver entered value)
pickup_longitude - the longitude where the meter was engaged
pickup_latitude - the latitude where the meter was engaged
dropoff_longitude - the longitude where the meter was disengaged
dropoff_latitude - the latitude where the meter was disengaged
store_and_fwd_flag - This flag indicates whether the trip record was held in vehicle memory before sending to the vendor because the vehicle did not have a connection to the server - Y=store and forward; N=not a store and forward trip
trip_duration - duration of the trip in seconds
Disclaimer: The decision was made to not remove dropoff coordinates from the dataset order to provide an expanded set of variables to use in Kernels.
### 1. Load data
Fist step is to load dataset!
```
import numpy as np
import pandas as pd
from matplotlib import pylab as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
train=pd.read_csv('../data/raw/train.csv')
test=pd.read_csv('../data/raw/test.csv')
print("Total number of samples in train file : ", train.shape[0])
print("Total number of samples in test file : ", test.shape[0])
```
### 2. Take a look at the dataset.
```
print("A view of the train dataframe")
print(train.head())
print("\nColumns in train dataset : ", train.columns)
print("\n")
print("Overall description of the train dataset : ")
print(train.info())
```
### 3. Drop the duplicate data and fill null
See if id column has overlap, then delete the duplicate data.
```
train_id = set(train['id'].values)
test_id = set(test['id'].values)
print("Number of unique id in train dataset : ", len(train_id))
print("Number of unique id in test dataset : ", len(test_id))
common_ids = train_id.intersection(test_id)
print("Number of common id in the train and test datasets : ", len(common_ids))
```
So the id column doesn't have duplicate number. Then let's see the details of the target column: trip_duration.
```
target = train['trip_duration']
print("Longest trip duration {} or {} minutes: " .format(np.max(target.values), np.max(target.values)//60))
print("Smallest trip duration {} or {} minutes: ".format(np.min(target.values),np.min(target.values)//60))
print("Average trip duration : {} or {} minutes".format(np.mean(target.values), np.mean(target.values)//60))
```
Check if there are some nulls in dataset. If so, fill it with mean.
```
train.isna().sum()
test.isna().sum()
```
So the train and test data don't have null.
Limit area of investigation to within the NY City borders. Change the formatting of the date variables (`pickup_datetime` and `dropoff_datetime`)
```
train = train[train['pickup_longitude'] <= -73.75]
train = train[train['pickup_longitude'] >= -74.03]
train = train[train['pickup_latitude'] <= 40.85]
train = train[train['pickup_latitude'] >= 40.63]
train = train[train['dropoff_longitude'] <= -73.75]
train = train[train['dropoff_longitude'] >= -74.03]
train = train[train['dropoff_latitude'] <= 40.85]
train = train[train['dropoff_latitude'] >= 40.63]
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
test['pickup_datetime'] = pd.to_datetime(test.pickup_datetime)
train.loc[:, 'pickup_date'] = train['pickup_datetime'].dt.date
test.loc[:, 'pickup_date'] = test['pickup_datetime'].dt.date
train['dropoff_datetime'] = pd.to_datetime(train.dropoff_datetime) #Not in Test
```
### 4. Visualization
See if some data seems have notible difference.
```
f = plt.figure(figsize=(8,6))
plt.scatter(range(len(target)), np.sort(target.values), alpha=0.5)
plt.xlabel('Index')
plt.ylabel('Trip duration in seconds')
plt.show()
```
Yeah! Data actually have a few point have notible difference.
A log transform can help us to see if notable patterns emerge in the data
```
plt.rcParams['figure.figsize'] = [10, 5]
train['log_trip_duration'] = np.log(train['trip_duration'].values + 1)
plt.hist(train['log_trip_duration'].values, bins=100)
plt.xlabel('log(trip_duration)')
plt.ylabel('number of train records')
plt.show()
sns.distplot(train["log_trip_duration"], bins =100)
```
The number of trips over time.
```
plt.plot(train.groupby('pickup_date').count()[['id']], 'o-', label='train')
plt.plot(test.groupby('pickup_date').count()[['id']], 'o-', label='test')
plt.title('Trips over Time.')
plt.legend(loc=0)
plt.ylabel('Trips')
plt.show()
```
A few points stand out in this figure.
How the two vendors differ in their respective mean trip durations:
```
import warnings
warnings.filterwarnings("ignore")
plot_vendor = train.groupby('vendor_id')['trip_duration'].mean()
plt.subplots(1,1,figsize=(5,5))
plt.ylim(ymin=800)
plt.ylim(ymax=840)
sns.barplot(plot_vendor.index,plot_vendor.values)
plt.title('Time per Vendor')
plt.legend(loc=0)
plt.ylabel('Time in Seconds')
```
The number of trips with changes of passengers count column.
```
pass_count = train['passenger_count']
print("Maximum number of passengers on a trip : ", np.max(pass_count.values))
print("Minimum number of passengers on a trip : ", np.min(pass_count.values))
print("Average number of passengers on a trip : ", np.mean(pass_count.values))
f = plt.figure(figsize=(10,5))
pass_count = train['passenger_count'].value_counts()
sns.barplot(pass_count.index, pass_count.values, alpha=0.7)
plt.xlabel('Number of passengers on a trip', fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.show()
```
The numbers change with store_and_fwd_flag column.
```
flags = train['store_and_fwd_flag'].value_counts()
f = plt.figure(figsize=(5,5))
sns.barplot(flags.index, flags.values, alpha=0.7)
plt.xlabel('Flags', fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.show()
```
Pickups and dropoffs in the whole month.
```
train1=train
train1['pickup_datetime'] = pd.to_datetime(train['pickup_datetime'])
train1['dropoff_datetime'] = pd.to_datetime(train['dropoff_datetime'])
train1['pickup_day'] = train['pickup_datetime'].dt.day
train1['pickup_month'] = train['pickup_datetime'].dt.month
train1['pickup_weekday'] = train['pickup_datetime'].dt.weekday
train1['pickup_hour'] = train['pickup_datetime'].dt.hour
train1['drop_day'] = train['dropoff_datetime'].dt.day
train1['drop_month'] = train['dropoff_datetime'].dt.month
train1['drop_weekday'] = train['dropoff_datetime'].dt.weekday
train1['drop_hour'] = train['dropoff_datetime'].dt.hour
f = plt.figure(figsize=(15,5))
sns.countplot(x='pickup_day', data=train1)
plt.xlabel('Day of month', fontsize=14)
plt.ylabel('Pickup count', fontsize=14)
plt.show()
```
How many pickups for each month?
```
f = plt.figure(figsize=(8,5))
sns.countplot(x='pickup_month', data=train)
plt.xlabel('Month', fontsize=14)
plt.ylabel('Pickup count', fontsize=14)
plt.show()
```
The trip duration and passenger count.
```
f = plt.figure(figsize=(13,8))
sns.set(style="whitegrid", palette="pastel", color_codes=True)
sns.set_context("poster")
train_data2 = train.copy()
train_data2['trip_duration']= np.log(train['trip_duration'])
sns.violinplot(x="passenger_count", y="trip_duration", hue="vendor_id", data=train_data2, split=True,
inner="quart",palette={1: "g", 2: "r"})
sns.despine(left=True)
```
### 5. Split the dataset into training and testing.
Split the dataset into 70% training and 30% testing using train_test_split.
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
%matplotlib inline
import numpy as np
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from sklearn.datasets import load_boston
from sklearn.model_selection import (cross_val_score, train_test_split,
GridSearchCV, RandomizedSearchCV)
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
X=train[['passenger_count','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']]
y=train[['trip_duration']]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.30, random_state = 1)
```
### 6. Train a model and predict
#### Hyperparameter tuning
```
import xgboost as xgb
from sklearn.metrics import mean_squared_error
from xgboost.sklearn import XGBRegressor
xgb = XGBRegressor()
parameters = {
'objective': ['reg:linear'],
'learning_rate': [0.3, 0.5, 0.7],
'max_depth': [7, 10, 15],
'subsample': [0.7],
'n_estimators': [100],
'eval_metric': ['rmse']}
xgb_grid = GridSearchCV(xgb, parameters, cv=3, n_jobs=-1, verbose=True)
xgb_grid.fit(X_train, y_train)
best_grid_randomsearch= xgb_grid.best_estimator_
best_grid_randomsearch
```
#### Use the best_grid as the regerssor and apply it into one of the steps of pipeline.
```
regressor = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, eval_metric='rmse',
gamma=0, importance_type='gain', learning_rate=0.3,
max_delta_step=0, max_depth=7, min_child_weight=1, missing=None,
n_estimators=100, n_jobs=1, nthread=None, objective='reg:linear',
random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
seed=None, silent=None, subsample=0.7, verbosity=1)
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
xgb_model=Pipeline(steps=([('scaler',StandardScaler()),('regressor', regressor)]))
xgb=xgb_model.fit(X_train,y_train)
```
#### Predict the target trip duration with applying X_test into xgb_model. Then calculate the mean absolute error and r2 score of y_test and y_pred.
```
y_pred = xgb_model.predict(X_test);
mae = mean_absolute_error(y_test, y_pred)
r2=r2_score(y_test, y_pred)
print("The model performance for testing set from grid search")
print("--------------------------------------")
print('mean absoulte error is {}'.format(mae))
print('R2 score is {}'.format(r2))
print('Improvement of {:0.2f}%.'.format( 100 * (r2- r2) / r2))
```
### 7. Reference
- https://www.kaggle.com/pceccon/beginner-s-approach-with-xgboost
- https://www.kaggle.com/frednavruzov/nyc-taxi-eda-feature-engineering
|
github_jupyter
|
import numpy as np
import pandas as pd
from matplotlib import pylab as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
train=pd.read_csv('../data/raw/train.csv')
test=pd.read_csv('../data/raw/test.csv')
print("Total number of samples in train file : ", train.shape[0])
print("Total number of samples in test file : ", test.shape[0])
print("A view of the train dataframe")
print(train.head())
print("\nColumns in train dataset : ", train.columns)
print("\n")
print("Overall description of the train dataset : ")
print(train.info())
train_id = set(train['id'].values)
test_id = set(test['id'].values)
print("Number of unique id in train dataset : ", len(train_id))
print("Number of unique id in test dataset : ", len(test_id))
common_ids = train_id.intersection(test_id)
print("Number of common id in the train and test datasets : ", len(common_ids))
target = train['trip_duration']
print("Longest trip duration {} or {} minutes: " .format(np.max(target.values), np.max(target.values)//60))
print("Smallest trip duration {} or {} minutes: ".format(np.min(target.values),np.min(target.values)//60))
print("Average trip duration : {} or {} minutes".format(np.mean(target.values), np.mean(target.values)//60))
train.isna().sum()
test.isna().sum()
train = train[train['pickup_longitude'] <= -73.75]
train = train[train['pickup_longitude'] >= -74.03]
train = train[train['pickup_latitude'] <= 40.85]
train = train[train['pickup_latitude'] >= 40.63]
train = train[train['dropoff_longitude'] <= -73.75]
train = train[train['dropoff_longitude'] >= -74.03]
train = train[train['dropoff_latitude'] <= 40.85]
train = train[train['dropoff_latitude'] >= 40.63]
train['pickup_datetime'] = pd.to_datetime(train.pickup_datetime)
test['pickup_datetime'] = pd.to_datetime(test.pickup_datetime)
train.loc[:, 'pickup_date'] = train['pickup_datetime'].dt.date
test.loc[:, 'pickup_date'] = test['pickup_datetime'].dt.date
train['dropoff_datetime'] = pd.to_datetime(train.dropoff_datetime) #Not in Test
f = plt.figure(figsize=(8,6))
plt.scatter(range(len(target)), np.sort(target.values), alpha=0.5)
plt.xlabel('Index')
plt.ylabel('Trip duration in seconds')
plt.show()
plt.rcParams['figure.figsize'] = [10, 5]
train['log_trip_duration'] = np.log(train['trip_duration'].values + 1)
plt.hist(train['log_trip_duration'].values, bins=100)
plt.xlabel('log(trip_duration)')
plt.ylabel('number of train records')
plt.show()
sns.distplot(train["log_trip_duration"], bins =100)
plt.plot(train.groupby('pickup_date').count()[['id']], 'o-', label='train')
plt.plot(test.groupby('pickup_date').count()[['id']], 'o-', label='test')
plt.title('Trips over Time.')
plt.legend(loc=0)
plt.ylabel('Trips')
plt.show()
import warnings
warnings.filterwarnings("ignore")
plot_vendor = train.groupby('vendor_id')['trip_duration'].mean()
plt.subplots(1,1,figsize=(5,5))
plt.ylim(ymin=800)
plt.ylim(ymax=840)
sns.barplot(plot_vendor.index,plot_vendor.values)
plt.title('Time per Vendor')
plt.legend(loc=0)
plt.ylabel('Time in Seconds')
pass_count = train['passenger_count']
print("Maximum number of passengers on a trip : ", np.max(pass_count.values))
print("Minimum number of passengers on a trip : ", np.min(pass_count.values))
print("Average number of passengers on a trip : ", np.mean(pass_count.values))
f = plt.figure(figsize=(10,5))
pass_count = train['passenger_count'].value_counts()
sns.barplot(pass_count.index, pass_count.values, alpha=0.7)
plt.xlabel('Number of passengers on a trip', fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.show()
flags = train['store_and_fwd_flag'].value_counts()
f = plt.figure(figsize=(5,5))
sns.barplot(flags.index, flags.values, alpha=0.7)
plt.xlabel('Flags', fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.show()
train1=train
train1['pickup_datetime'] = pd.to_datetime(train['pickup_datetime'])
train1['dropoff_datetime'] = pd.to_datetime(train['dropoff_datetime'])
train1['pickup_day'] = train['pickup_datetime'].dt.day
train1['pickup_month'] = train['pickup_datetime'].dt.month
train1['pickup_weekday'] = train['pickup_datetime'].dt.weekday
train1['pickup_hour'] = train['pickup_datetime'].dt.hour
train1['drop_day'] = train['dropoff_datetime'].dt.day
train1['drop_month'] = train['dropoff_datetime'].dt.month
train1['drop_weekday'] = train['dropoff_datetime'].dt.weekday
train1['drop_hour'] = train['dropoff_datetime'].dt.hour
f = plt.figure(figsize=(15,5))
sns.countplot(x='pickup_day', data=train1)
plt.xlabel('Day of month', fontsize=14)
plt.ylabel('Pickup count', fontsize=14)
plt.show()
f = plt.figure(figsize=(8,5))
sns.countplot(x='pickup_month', data=train)
plt.xlabel('Month', fontsize=14)
plt.ylabel('Pickup count', fontsize=14)
plt.show()
f = plt.figure(figsize=(13,8))
sns.set(style="whitegrid", palette="pastel", color_codes=True)
sns.set_context("poster")
train_data2 = train.copy()
train_data2['trip_duration']= np.log(train['trip_duration'])
sns.violinplot(x="passenger_count", y="trip_duration", hue="vendor_id", data=train_data2, split=True,
inner="quart",palette={1: "g", 2: "r"})
sns.despine(left=True)
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
%matplotlib inline
import numpy as np
from scipy.stats import randint as sp_randint
from scipy.stats import uniform as sp_uniform
from sklearn.datasets import load_boston
from sklearn.model_selection import (cross_val_score, train_test_split,
GridSearchCV, RandomizedSearchCV)
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
X=train[['passenger_count','pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']]
y=train[['trip_duration']]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.30, random_state = 1)
import xgboost as xgb
from sklearn.metrics import mean_squared_error
from xgboost.sklearn import XGBRegressor
xgb = XGBRegressor()
parameters = {
'objective': ['reg:linear'],
'learning_rate': [0.3, 0.5, 0.7],
'max_depth': [7, 10, 15],
'subsample': [0.7],
'n_estimators': [100],
'eval_metric': ['rmse']}
xgb_grid = GridSearchCV(xgb, parameters, cv=3, n_jobs=-1, verbose=True)
xgb_grid.fit(X_train, y_train)
best_grid_randomsearch= xgb_grid.best_estimator_
best_grid_randomsearch
regressor = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, eval_metric='rmse',
gamma=0, importance_type='gain', learning_rate=0.3,
max_delta_step=0, max_depth=7, min_child_weight=1, missing=None,
n_estimators=100, n_jobs=1, nthread=None, objective='reg:linear',
random_state=0, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
seed=None, silent=None, subsample=0.7, verbosity=1)
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
xgb_model=Pipeline(steps=([('scaler',StandardScaler()),('regressor', regressor)]))
xgb=xgb_model.fit(X_train,y_train)
y_pred = xgb_model.predict(X_test);
mae = mean_absolute_error(y_test, y_pred)
r2=r2_score(y_test, y_pred)
print("The model performance for testing set from grid search")
print("--------------------------------------")
print('mean absoulte error is {}'.format(mae))
print('R2 score is {}'.format(r2))
print('Improvement of {:0.2f}%.'.format( 100 * (r2- r2) / r2))
| 0.410166 | 0.945851 |
<a href="https://colab.research.google.com/github/FCUAIC/Basic-ML/blob/main/mnist_template.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# MNIST Template
作者: 梁定殷
版權歸FCUAI所有
Input shape: (1, 28, 28)
## 超參數(Hyperparameters)
```
# 學習率(Learning Rate), LR越大模型越有自信, LR越小模型越沒自信
LR = 0.01
# 每次學習要看過多少的Batch後才更新模型
BATCH_SIZE = 128
# 學習次數
EPOCHS = 3
用離線的MNIST = True # Pytorch的MNIST暫時不能用
```
## 載入需要用的Package
```
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adadelta
from torch.nn import CrossEntropyLoss
from torchsummary import summary
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
if 用離線的MNIST:
from keras.datasets import mnist
else:
from torchvision.datasets import MNIST
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from tqdm.notebook import trange, tqdm
```
## 模型
```
class MNISTModel(nn.Module):
def __init__(self):
super(MNISTModel, self).__init__()
self.cnn = nn.Sequential(
# in: 1x28x28
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3),
nn.ReLU(),
# in: 32x26x26
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.Dropout(),
nn.ReLU(),
# in: 64x24x24
nn.Flatten(),
# out: 36864
)
self.fc = nn.Sequential(
nn.Linear(in_features=36864, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=10),
)
def forward(self, x):
x = self.cnn(x)
return self.fc(x)
model = MNISTModel().cuda()
summary(model, (1, 28, 28))
```
## 資料集(Dataset)
```
class MNISTDataset(Dataset):
"""
這是我們定義的Dataset
"""
def __init__(self, train=False, transformer=None):
# 從Keras載入MNIST
(train_feature, train_label), (test_feature, test_label) = mnist.load_data()
if train:
# 我們只要訓練的
self.data = np.array([list(d) for d in zip(train_feature, train_label)], dtype=object)
self.length = len(train_feature)
else:
# 我們只要測試的
self.data = np.array([list(d) for d in zip(test_feature, test_label)], dtype=object)
self.length = len(test_feature)
self.transformer = transformer
def __len__(self):
return self.length
def __getitem__(self, idx):
# 對data做轉換
if self.transformer:
img, label = self.data[idx]
return self.transformer(img), torch.tensor(label, dtype=torch.long)
return self.data[idx]
```
## 載入資料集(Dataset)
```
preprocessor = transforms.Compose([
transforms.ToTensor() #轉成Tensor的時候會做歸一化(Normalize)
])
if 用離線的MNIST:
print('使用離線的Dataset.')
mnist_train = MNISTDataset(train=True, transformer=preprocessor)
mnist_test = MNISTDataset(train=False, transformer=preprocessor)
else:
print('使用Pytorch的Dataset.')
mnist_train = MNIST(root='mnist', download=True, transform=preprocessor, train=True)
mnist_test = MNIST(root='mnist', transform=preprocessor, train=False)
print(f'訓練資料一共有{len(mnist_train)}筆資料\n測試資料一共有{len(mnist_test)}筆資料')
mnist_train = DataLoader(mnist_train, batch_size=BATCH_SIZE, shuffle=True) # 我們想要打散訓練資料的順序
mnist_test = DataLoader(mnist_test, batch_size=1)
```
## 來看一下我們的資料
```
if 用離線的MNIST:
preview_train = MNISTDataset(train=True)
else:
preview_train = MNIST(root='mnist', download=True, transform=None, train=True)
# 實作一: 完成這個功能
indexs = [-1] * 10
# 找出第一個0-9在dataset裡的位置
for i, batch in enumerate(preview_train):
# 把一個batch拆開, img是圖片, label是圖片的數字
img, label = batch
# 在這裡開始寫
# 輸出結果
for num, idx in enumerate(indexs):
print(f'第一張數字 {num}的圖片出現在dataset的第 {idx}個位置')
print(indexs)
for idx in indexs:
img, label = preview_train[idx]
plt.title(f'label: {label}')
plt.imshow(img, cmap='gray')
plt.show()
```
## 宣告損失函數&優化器
```
# 計算輸出的損失函數(Loss function)
loss_func = CrossEntropyLoss() # 計算機率的Loss我們會用交叉熵(CrossEntropy)
# 這是優化器(Optimizer), 使得模型能更好的學習, 可以想成是學習的策略
optimizer = Adadelta(model.parameters(), lr=LR)
```
## 訓練
```
# 把每個EPOCH的Loss記錄下來
losses_log = []
# 開始訓練
for epoch in trange(1, EPOCHS+1, desc='Epoch', unit='次'):
# 忘記剛才學習的方向
losses = 0
optimizer.zero_grad()
# 給模型看很多的圖
for batch_x, batch_y in tqdm(mnist_train, desc='訓練進度', unit='batch'):
# 把圖跟答案放到GPU
x = batch_x.cuda()
y = batch_y.cuda()
# 問模型這是什麼
predict = model(x)
# 根據模型的回答評分
loss = loss_func(predict, y)
# 告訴模型哪裡錯了
loss.backward()
# 把所有錯的地方跟程度加起來
losses += loss.item()
# 模型根據上面給的方向學習
optimizer.step()
# 測試, 給模型看從沒看過的圖, 看他是真懂還是假懂
test_losses = 0
trues = []
predicts = []
with torch.no_grad():
for batch_x, batch_y in tqdm(mnist_test, desc='測試進度', unit='張'):
x = batch_x.cuda()
y = batch_y.cuda()
predict = model(x)
test_losses += loss_func(predict, y).item()
# 選模型認為最有可能的數字
predict = torch.argmax(predict, dim=1, keepdim=True)
trues += y.tolist()
predicts += predict.tolist()
# 記錄Loss
losses_log.append(test_losses)
# 印出這次Epoch的總結與統計
print(f'EPOCH {epoch} | 訓練資料的Loss: {losses/len(mnist_train.dataset)} | 測試資料的Loss: {test_losses/len(mnist_test.dataset)}\n{classification_report(trues, predicts, labels=[l for l in range(0, 10)], digits=4)}')
```
## 把訓練過程的Loss 畫出來
```
# X 軸是Epoch, Y 是對應Epoch 的Loss
plt.plot(list(range(1, EPOCHS+1)), losses_log)
plt.xticks(list(range(1, EPOCHS+1)))
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.xlim(1, EPOCHS)
```
|
github_jupyter
|
# 學習率(Learning Rate), LR越大模型越有自信, LR越小模型越沒自信
LR = 0.01
# 每次學習要看過多少的Batch後才更新模型
BATCH_SIZE = 128
# 學習次數
EPOCHS = 3
用離線的MNIST = True # Pytorch的MNIST暫時不能用
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adadelta
from torch.nn import CrossEntropyLoss
from torchsummary import summary
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
if 用離線的MNIST:
from keras.datasets import mnist
else:
from torchvision.datasets import MNIST
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
from tqdm.notebook import trange, tqdm
class MNISTModel(nn.Module):
def __init__(self):
super(MNISTModel, self).__init__()
self.cnn = nn.Sequential(
# in: 1x28x28
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3),
nn.ReLU(),
# in: 32x26x26
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
nn.Dropout(),
nn.ReLU(),
# in: 64x24x24
nn.Flatten(),
# out: 36864
)
self.fc = nn.Sequential(
nn.Linear(in_features=36864, out_features=1024),
nn.ReLU(),
nn.Linear(in_features=1024, out_features=10),
)
def forward(self, x):
x = self.cnn(x)
return self.fc(x)
model = MNISTModel().cuda()
summary(model, (1, 28, 28))
class MNISTDataset(Dataset):
"""
這是我們定義的Dataset
"""
def __init__(self, train=False, transformer=None):
# 從Keras載入MNIST
(train_feature, train_label), (test_feature, test_label) = mnist.load_data()
if train:
# 我們只要訓練的
self.data = np.array([list(d) for d in zip(train_feature, train_label)], dtype=object)
self.length = len(train_feature)
else:
# 我們只要測試的
self.data = np.array([list(d) for d in zip(test_feature, test_label)], dtype=object)
self.length = len(test_feature)
self.transformer = transformer
def __len__(self):
return self.length
def __getitem__(self, idx):
# 對data做轉換
if self.transformer:
img, label = self.data[idx]
return self.transformer(img), torch.tensor(label, dtype=torch.long)
return self.data[idx]
preprocessor = transforms.Compose([
transforms.ToTensor() #轉成Tensor的時候會做歸一化(Normalize)
])
if 用離線的MNIST:
print('使用離線的Dataset.')
mnist_train = MNISTDataset(train=True, transformer=preprocessor)
mnist_test = MNISTDataset(train=False, transformer=preprocessor)
else:
print('使用Pytorch的Dataset.')
mnist_train = MNIST(root='mnist', download=True, transform=preprocessor, train=True)
mnist_test = MNIST(root='mnist', transform=preprocessor, train=False)
print(f'訓練資料一共有{len(mnist_train)}筆資料\n測試資料一共有{len(mnist_test)}筆資料')
mnist_train = DataLoader(mnist_train, batch_size=BATCH_SIZE, shuffle=True) # 我們想要打散訓練資料的順序
mnist_test = DataLoader(mnist_test, batch_size=1)
if 用離線的MNIST:
preview_train = MNISTDataset(train=True)
else:
preview_train = MNIST(root='mnist', download=True, transform=None, train=True)
# 實作一: 完成這個功能
indexs = [-1] * 10
# 找出第一個0-9在dataset裡的位置
for i, batch in enumerate(preview_train):
# 把一個batch拆開, img是圖片, label是圖片的數字
img, label = batch
# 在這裡開始寫
# 輸出結果
for num, idx in enumerate(indexs):
print(f'第一張數字 {num}的圖片出現在dataset的第 {idx}個位置')
print(indexs)
for idx in indexs:
img, label = preview_train[idx]
plt.title(f'label: {label}')
plt.imshow(img, cmap='gray')
plt.show()
# 計算輸出的損失函數(Loss function)
loss_func = CrossEntropyLoss() # 計算機率的Loss我們會用交叉熵(CrossEntropy)
# 這是優化器(Optimizer), 使得模型能更好的學習, 可以想成是學習的策略
optimizer = Adadelta(model.parameters(), lr=LR)
# 把每個EPOCH的Loss記錄下來
losses_log = []
# 開始訓練
for epoch in trange(1, EPOCHS+1, desc='Epoch', unit='次'):
# 忘記剛才學習的方向
losses = 0
optimizer.zero_grad()
# 給模型看很多的圖
for batch_x, batch_y in tqdm(mnist_train, desc='訓練進度', unit='batch'):
# 把圖跟答案放到GPU
x = batch_x.cuda()
y = batch_y.cuda()
# 問模型這是什麼
predict = model(x)
# 根據模型的回答評分
loss = loss_func(predict, y)
# 告訴模型哪裡錯了
loss.backward()
# 把所有錯的地方跟程度加起來
losses += loss.item()
# 模型根據上面給的方向學習
optimizer.step()
# 測試, 給模型看從沒看過的圖, 看他是真懂還是假懂
test_losses = 0
trues = []
predicts = []
with torch.no_grad():
for batch_x, batch_y in tqdm(mnist_test, desc='測試進度', unit='張'):
x = batch_x.cuda()
y = batch_y.cuda()
predict = model(x)
test_losses += loss_func(predict, y).item()
# 選模型認為最有可能的數字
predict = torch.argmax(predict, dim=1, keepdim=True)
trues += y.tolist()
predicts += predict.tolist()
# 記錄Loss
losses_log.append(test_losses)
# 印出這次Epoch的總結與統計
print(f'EPOCH {epoch} | 訓練資料的Loss: {losses/len(mnist_train.dataset)} | 測試資料的Loss: {test_losses/len(mnist_test.dataset)}\n{classification_report(trues, predicts, labels=[l for l in range(0, 10)], digits=4)}')
# X 軸是Epoch, Y 是對應Epoch 的Loss
plt.plot(list(range(1, EPOCHS+1)), losses_log)
plt.xticks(list(range(1, EPOCHS+1)))
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.xlim(1, EPOCHS)
| 0.683314 | 0.96799 |
```
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten ,Dropout, Conv2D, MaxPooling2D, BatchNormalization
from keras import applications
from keras.models import Sequential, Model, load_model
from tensorflow.keras.models import Sequential
import numpy as np
import os
import random
import cv2
from sklearn.model_selection import train_test_split
categories = {'cane': 'dog', "cavallo": "horse", "elefante": "elephant", "farfalla": "butterfly", "gallina": "chicken", "gatto": "cat", "mucca": "cow", "pecora": "sheep", "scoiattolo": "squirrel","ragno":"spider"}
data=[]
animals=["dog", "horse","elephant", "butterfly", "chicken", "cat", "cow", "sheep", "squirrel","spider"]
img_size=100
def create_data():
for category,translate in categories.items():
path="../input/animals10/raw-img/"+category
target=animals.index(translate)
for img in os.listdir(path):
try:
img_array=cv2.imread(os.path.join(path,img))
new_img_array=cv2.resize(img_array,(img_size,img_size))
data.append([new_img_array,target])
except Exception as e:
pass
create_data()
random.shuffle(data)
x=[]
y=[]
for features,labels in data:
x.append(features)
y.append(labels)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
x_train=np.array(x_train).reshape(-1,img_size,img_size,3)
x_train=tf.keras.utils.normalize(x_train,axis=1)
y_train=np.array(y_train)
x_test=np.array(x_test).reshape(-1,img_size,img_size,3)
x_test=tf.keras.utils.normalize(x_test,axis=1)
y_test=np.array(y_test)
from keras.models import Sequential, Model, load_model
from keras import applications
from keras import optimizers
from keras.layers import Dropout, Flatten, Dense
from keras.utils import to_categorical
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape= x_train.shape[1:])
add_model = Sequential()
add_model.add(Flatten(input_shape=base_model.output_shape[1:]))
add_model.add(Dense(256, activation='relu'))
add_model.add(Dense(10, activation='softmax'))
model = Model(inputs=base_model.input, outputs=add_model(base_model.output))
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
model.summary()
history = model.fit(x_train,y_train,epochs=50,batch_size=32,validation_data=(x_test, y_test))
print(history.history.keys())
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
```
|
github_jupyter
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten ,Dropout, Conv2D, MaxPooling2D, BatchNormalization
from keras import applications
from keras.models import Sequential, Model, load_model
from tensorflow.keras.models import Sequential
import numpy as np
import os
import random
import cv2
from sklearn.model_selection import train_test_split
categories = {'cane': 'dog', "cavallo": "horse", "elefante": "elephant", "farfalla": "butterfly", "gallina": "chicken", "gatto": "cat", "mucca": "cow", "pecora": "sheep", "scoiattolo": "squirrel","ragno":"spider"}
data=[]
animals=["dog", "horse","elephant", "butterfly", "chicken", "cat", "cow", "sheep", "squirrel","spider"]
img_size=100
def create_data():
for category,translate in categories.items():
path="../input/animals10/raw-img/"+category
target=animals.index(translate)
for img in os.listdir(path):
try:
img_array=cv2.imread(os.path.join(path,img))
new_img_array=cv2.resize(img_array,(img_size,img_size))
data.append([new_img_array,target])
except Exception as e:
pass
create_data()
random.shuffle(data)
x=[]
y=[]
for features,labels in data:
x.append(features)
y.append(labels)
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)
x_train=np.array(x_train).reshape(-1,img_size,img_size,3)
x_train=tf.keras.utils.normalize(x_train,axis=1)
y_train=np.array(y_train)
x_test=np.array(x_test).reshape(-1,img_size,img_size,3)
x_test=tf.keras.utils.normalize(x_test,axis=1)
y_test=np.array(y_test)
from keras.models import Sequential, Model, load_model
from keras import applications
from keras import optimizers
from keras.layers import Dropout, Flatten, Dense
from keras.utils import to_categorical
base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape= x_train.shape[1:])
add_model = Sequential()
add_model.add(Flatten(input_shape=base_model.output_shape[1:]))
add_model.add(Dense(256, activation='relu'))
add_model.add(Dense(10, activation='softmax'))
model = Model(inputs=base_model.input, outputs=add_model(base_model.output))
model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
model.summary()
history = model.fit(x_train,y_train,epochs=50,batch_size=32,validation_data=(x_test, y_test))
print(history.history.keys())
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
| 0.740268 | 0.413359 |
# Debugging XGBoost training jobs in real time with Amazon SageMaker Debugger
This notebook uses the MNIST dataset to demonstrate real-time analysis of XGBoost training jobs while the training jobs are running.
This notebook was created and tested on an ml.m5.4xlarge notebook instance using 100GB instance volume.
## Overview
Amazon SageMaker Debugger allows debugging machine learning training.
SageMaker Debugger helps you to monitor your training in near real time using rules and provides alerts if it detects issues in training.
Using SageMaker Debugger is a two step process: Saving model parameters and analysis.
Let's look at each one of them closely.
### Saving model parameters
In machine learning process, model parameters are updated every forward and backward pass and can describe the state of the training job at any particular instant in an ML lifecycle.
Amazon SageMaker Debugger allows you to capture the model parameters and save them for analysis.
Although XGBoost is not a deep learning algorithm, Amazon SageMaker Debugger is highly customizable and can help you interpret results by saving insightful metrics. For example, performance metrics or the importance of features at different frequencies.
Refer to [SageMaker Debugger documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) for details on how to save the metrics you want.
### Analysis
There are two ways to get to model parameters and run analysis on them.
One way is to use concept called ***Rules***. On a very broad level, a rule is Python code used to detect certain conditions during training.
Some of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on.
Amazon SageMaker Debugger comes pre-packaged with certain built-in rules that can be invoked on Amazon SageMaker. You can also write your own rules using the Amazon SageMaker Debugger APIs.
For more details about automatic analysis using rules, see [Configure Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/use-debugger-built-in-rules.html) and [List of Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html).
This notebook also walk you through how to use the SMDebug client library for analysis in real time while training jobs are running. The SMDebug client library enables you to retrieve model parameters and scalars saved during training job via few lines of code.
Through the model parameter analysis, you can drill down into training issues you're running into. You save raw model parameter data in order to understand your model better, and figure out the root cause of training problems.

## Import SageMaker Python SDK and the SMDebug client library
<font color='red'>**Important**</font>: To use the new Debugger features, you need to upgrade the SageMaker Python SDK and the SMDebug libary, which we do in the following cell
```
import sys
!{sys.executable} -m pip install -U sagemaker smdebug
import boto3
import sagemaker
```
Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version `0.90-2` or later. The following cell retrieves the SageMaker XGBoost 0.90-2 container.
```
from sagemaker import image_uris
# Below changes the region to be one where this notebook is running
region = boto3.Session().region_name
container = sagemaker.image_uris.retrieve("xgboost", region, "0.90-2")
```
## Training XGBoost models in Amazon SageMaker with Amazon SageMaker Debugger
In this section you learn to train an XGBoost model with Amazon SageMaker Debugger enabled and monitor the training jobs.
This is done using the SageMaker [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html#sagemaker.estimator.Estimator).
While training job is running, use the SageMaker Debugger API to access saved model parameters in real time and visualize them.
You can also download a fresh set of model parameters every time you query for using the SMDebug library.
This notebook is adapted from [XGBoost for Classification](https://github.com/aws/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_mnist/xgboost_mnist.ipynb).
### Data preparation
Use the [MNIST data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html) stored in [LIBSVM](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) format.
```
from data_utils import load_mnist, upload_to_s3
bucket = sagemaker.Session().default_bucket()
prefix = "DEMO-smdebug-xgboost-mnist"
%%time
train_file, validation_file = load_mnist()
upload_to_s3(train_file, bucket, f"{prefix}/train/mnist.train.libsvm")
upload_to_s3(validation_file, bucket, f"{prefix}/validation/mnist.validation.libsvm")
```
### Enabling Amazon SageMaker Debugger in the estimator object
Enabling Amazon SageMaker Debugger in a training job can be accomplished by adding its configuration into an Estimator object constructor:
```
from sagemaker.debugger import DebuggerHookConfig
estimator = Estimator(
...,
debugger_hook_config = DebuggerHookConfig(
s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Optional
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": "10"
}
)
]
)
)
```
Here, the `DebuggerHookConfig` object configures which data `Estimator` should save for the real-time visualization. Provide two parameters:
- `s3_output_path`: Points to an S3 bucket where you intend to store model parameters. Amount of data saved depends on multiple factors, major ones are training job, data set, model, frequency of saving model parameters. This S3 bucket should be in your AWS account so that you have full access to control over the stored data. **Note**: The S3 bucket should be originally created in the same Region where your training job is running, otherwise you might run into problems with cross-Region access.
- `collection_configs`: It enumerates named collections of model parameters to save. Collections are a convenient way to organize relevant model parameters under same umbrella to make it easy to navigate them during analysis. In this particular example, you are interested in a single collection named `metrics`. You also configured Amazon SageMaker Debugger to save metrics every 10 iterations. For all parameters that are supported by Collections and DebuggerConfig, see [Collection documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md).
### Using Amazon SageMaker Debugger with XGBoost Classification
Import the libraries for the demo of Amazon SageMaker Debugger.
```
from sagemaker import get_execution_role
role = get_execution_role()
base_job_name = "demo-smdebug-xgboost-classification"
bucket_path = "s3://{}".format(bucket)
num_round = 25
save_interval = 3
hyperparameters = {
"max_depth": "5",
"eta": "0.1",
"gamma": "4",
"min_child_weight": "6",
"silent": "0",
"objective": "multi:softmax",
"num_class": "10", # num_class is required for 'multi:*' objectives
"num_round": num_round,
}
from sagemaker.estimator import Estimator
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
xgboost_algorithm_mode_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type="ml.m5.xlarge",
image_uri=container,
hyperparameters=hyperparameters,
max_run=1800,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(name="metrics", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="predictions", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="labels", parameters={"save_interval": str(save_interval)}),
],
),
)
```
With the next step you are going to actually start a training job using the Estimator object you created above. This job is started in asynchronous, non-blocking way. This means that control is passed back to the notebook and further commands can be run while training job is progressing.
```
from sagemaker.session import TrainingInput
train_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "train"), content_type="libsvm"
)
validation_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "validation"), content_type="libsvm"
)
# This is a fire and forget event. By setting wait=False, you just submit the job to run in the background.
# Amazon SageMaker will start one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
xgboost_algorithm_mode_estimator.fit(
{"train": train_s3_input, "validation": validation_s3_input}, wait=False
)
```
### Result
As a result of the above command, Amazon SageMaker starts one training job for you and it produces model parameters to be analyzed.
This job will run in a background without you having to wait for it to complete in order to continue with the rest of the notebook.
Because of this asynchronous nature of a training job, you need to monitor its status so that you don't start to request debugging too early.
## Analysis and Visualization
### Checking on the training job status
Check the status of the training job by running the following code.
It checks on the status of an Amazon SageMaker training job every 15 seconds.
Once a training job has started its training cycle, it proceeds to the next cells in the notebook.
That means training job started to tune the model and, in parallel, emit model parameters.
```
import time
from time import gmtime, strftime
# Below command will give the status of training job
job_name = xgboost_algorithm_mode_estimator.latest_training_job.name
client = xgboost_algorithm_mode_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
print("Training job name: " + job_name)
if description["TrainingJobStatus"] != "Completed":
while description["SecondaryStatus"] not in ["Training", "Completed"]:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description["TrainingJobStatus"]
secondary_status = description["SecondaryStatus"]
print("{}: {}, {}".format(strftime("%X", gmtime()), primary_status, secondary_status))
time.sleep(15)
```
### Retrieving and analyzing model parameters
Before getting to analysis, here are some notes on concepts being used in Amazon SageMaker Debugger that help with analysis.
- ***Trial*** - Object that is a centerpiece of the SageMaker Debugger API when it comes to getting access to model parameters. It is a top level abstract that represents a single run of a training job. All model parameters emitted by a training job are associated with its *trial*.
- ***Tensor*** - Object that represents model parameters, such as weights, gradients, accuracy, and loss, that are saved during training job.
For more details on aforementioned concepts as well as on SageMaker Debugger API and examples, see [SageMaker Debugger Analysis API](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md) documentation.
In the following code cell, use a ***Trial*** to access model parameters. You can do that by inspecting currently running training job and extract necessary parameters from its debug configuration to instruct SageMaker Debugger where the data you are looking for is located. Keep in mind the following:
- Model parameters are being stored in your own S3 bucket to which you can navigate and manually inspect its content if desired.
- You might notice a slight delay before trial object is created. This is normal as SageMaker Debugger monitors the corresponding bucket and waits until model parameters data to appear. The delay is introduced by less than instantaneous upload of model parameters from a training container to your S3 bucket.
```
from smdebug.trials import create_trial
description = client.describe_training_job(TrainingJobName=job_name)
s3_output_path = xgboost_algorithm_mode_estimator.latest_job_debugger_artifacts_path()
# This is where we create a Trial object that allows access to saved model parameters.
trial = create_trial(s3_output_path)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from IPython.display import display, clear_output
def plot_confusion_for_one_step(trial, step, ax=None):
if ax is None:
fig, ax = plt.subplots()
cm = confusion_matrix(
trial.tensor("labels").value(step), trial.tensor("predictions").value(step)
)
normalized_cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(normalized_cm, cmap="bone", ax=ax, cbar=False, annot=cm, fmt="")
print(f"iteration: {step}")
def plot_and_update_confusion_for_all_steps(trial):
fig, ax = plt.subplots()
rendered_steps = []
# trial.loaded_all_steps is a way to keep monitoring for a state of a training job
# as seen by Amazon SageMaker Debugger.
# When training job is completed Trial becomes aware of it.
while not rendered_steps or not trial.loaded_all_steps:
steps = trial.steps()
# quick way to get diff between two lists
steps_to_render = list(set(steps).symmetric_difference(set(rendered_steps)))
# plot only from newer chunk
for step in steps_to_render:
clear_output(wait=True)
plot_confusion_for_one_step(trial, step, ax=ax)
display(fig)
plt.pause(5)
ax.clear()
rendered_steps.extend(steps_to_render)
fig.clear()
plt.close()
```
### Visualizing confusion matrix of a running training job
Finally, wait until Amazon SageMaker Debugger has downloaded initial collection of model parameters to look at. Once that collection is ready you keep getting new model parameters every five seconds and plot them correspondingly one under another.
```
plot_and_update_confusion_for_all_steps(trial)
```
|
github_jupyter
|
import sys
!{sys.executable} -m pip install -U sagemaker smdebug
import boto3
import sagemaker
from sagemaker import image_uris
# Below changes the region to be one where this notebook is running
region = boto3.Session().region_name
container = sagemaker.image_uris.retrieve("xgboost", region, "0.90-2")
from data_utils import load_mnist, upload_to_s3
bucket = sagemaker.Session().default_bucket()
prefix = "DEMO-smdebug-xgboost-mnist"
%%time
train_file, validation_file = load_mnist()
upload_to_s3(train_file, bucket, f"{prefix}/train/mnist.train.libsvm")
upload_to_s3(validation_file, bucket, f"{prefix}/validation/mnist.validation.libsvm")
from sagemaker.debugger import DebuggerHookConfig
estimator = Estimator(
...,
debugger_hook_config = DebuggerHookConfig(
s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Optional
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": "10"
}
)
]
)
)
from sagemaker import get_execution_role
role = get_execution_role()
base_job_name = "demo-smdebug-xgboost-classification"
bucket_path = "s3://{}".format(bucket)
num_round = 25
save_interval = 3
hyperparameters = {
"max_depth": "5",
"eta": "0.1",
"gamma": "4",
"min_child_weight": "6",
"silent": "0",
"objective": "multi:softmax",
"num_class": "10", # num_class is required for 'multi:*' objectives
"num_round": num_round,
}
from sagemaker.estimator import Estimator
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
xgboost_algorithm_mode_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type="ml.m5.xlarge",
image_uri=container,
hyperparameters=hyperparameters,
max_run=1800,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(name="metrics", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="predictions", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="labels", parameters={"save_interval": str(save_interval)}),
],
),
)
from sagemaker.session import TrainingInput
train_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "train"), content_type="libsvm"
)
validation_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "validation"), content_type="libsvm"
)
# This is a fire and forget event. By setting wait=False, you just submit the job to run in the background.
# Amazon SageMaker will start one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
xgboost_algorithm_mode_estimator.fit(
{"train": train_s3_input, "validation": validation_s3_input}, wait=False
)
import time
from time import gmtime, strftime
# Below command will give the status of training job
job_name = xgboost_algorithm_mode_estimator.latest_training_job.name
client = xgboost_algorithm_mode_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
print("Training job name: " + job_name)
if description["TrainingJobStatus"] != "Completed":
while description["SecondaryStatus"] not in ["Training", "Completed"]:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description["TrainingJobStatus"]
secondary_status = description["SecondaryStatus"]
print("{}: {}, {}".format(strftime("%X", gmtime()), primary_status, secondary_status))
time.sleep(15)
from smdebug.trials import create_trial
description = client.describe_training_job(TrainingJobName=job_name)
s3_output_path = xgboost_algorithm_mode_estimator.latest_job_debugger_artifacts_path()
# This is where we create a Trial object that allows access to saved model parameters.
trial = create_trial(s3_output_path)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from IPython.display import display, clear_output
def plot_confusion_for_one_step(trial, step, ax=None):
if ax is None:
fig, ax = plt.subplots()
cm = confusion_matrix(
trial.tensor("labels").value(step), trial.tensor("predictions").value(step)
)
normalized_cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(normalized_cm, cmap="bone", ax=ax, cbar=False, annot=cm, fmt="")
print(f"iteration: {step}")
def plot_and_update_confusion_for_all_steps(trial):
fig, ax = plt.subplots()
rendered_steps = []
# trial.loaded_all_steps is a way to keep monitoring for a state of a training job
# as seen by Amazon SageMaker Debugger.
# When training job is completed Trial becomes aware of it.
while not rendered_steps or not trial.loaded_all_steps:
steps = trial.steps()
# quick way to get diff between two lists
steps_to_render = list(set(steps).symmetric_difference(set(rendered_steps)))
# plot only from newer chunk
for step in steps_to_render:
clear_output(wait=True)
plot_confusion_for_one_step(trial, step, ax=ax)
display(fig)
plt.pause(5)
ax.clear()
rendered_steps.extend(steps_to_render)
fig.clear()
plt.close()
plot_and_update_confusion_for_all_steps(trial)
| 0.427277 | 0.957477 |
# Cleaning & EDA
- Train Test Split data before moving on to modeling
# Principals
- Contains the principal cast/crew for titles:
1. tconst (string) - alphanumeric unique identifier of the title
2. ordering (integer) – a number to uniquely identify rows for a given titleId
3. nconst (string) - alphanumeric unique identifier of the name/person
4. category (string) - the category of job that person was in
5. job (string) - the specific job title if applicable, else '\N'
6. characters (string) - the name of the character played if applicable, else '\N'
```
# importing the modules
import pandas as pd
import numpy as np
principle_df = pd.read_csv('../data/raw/principals.tsv', sep='\t')
principle_df.head()
tsv_file = open("../data/raw/title.principals.tsv")
read_tsv = csv.reader(tsv_file, delimiter="\t")
```
## Metadata
### **Supporting Actor(s)**
```
supporting = cast_df[cast_df['order'] == 1]
supporting = supporting[['movie_id', 'name']]
supporting = supporting.rename(columns={'movie_id':'id'})
supporting = supporting.rename(columns={'name':'supporting'})
supporting.head(5)
imdb_metadata = pd.merge(imdb_metadata, supporting, on = 'id', how = 'left')
imdb_metadata.head()
```
## **Crew**
Create new dataframe with certain features
```
crew_df.head()
crew_df = crew_df[['name', 'job', 'department', 'gender', 'movie_id']]
crew_df.head()
```
### **Director**
```
director = crew_df[crew_df['job'] == 'Director']
director = director[['movie_id', 'name']]
director = director.rename(columns={'movie_id':'id', 'name':'director'})
director.head(5)
dataset = pd.merge(imdb_metadata, director, on = 'id', how = 'left')
print("Number of rows before dropping those with null values:",len(dataset))
#dataset.dropna(inplace = True)
print("Number of rows after dropping those with null values:",len(dataset))
```
### **Executive Producer**
```
Executive_Producer = crew_df[crew_df['job'] == 'Executive Producer']
Executive_Producer = Executive_Producer[['movie_id', 'name']]
Executive_Producer = Executive_Producer.rename(columns={'movie_id':'id', 'name':'Executive Producer'})
Executive_Producer.head(5)
```
## Repeat these steps for any additonal jobs, departments, or individuals you would like to examine...
list:
- Producer
- Director of Photography
- Editor
- Casting
- Screenplay
- Production Design
- Original Music Composer
- Music
- Music Supervisor
- Costume Designer
- Gaffer
- etc....
## Seaborn stacked barplot
```
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style="whitegrid")
# Initialize the matplotlib figure
f, ax = plt.subplots(figsize=(12, 8))
# Load the budget and revenue data
profit = high_gp = imdb_final[['title', 'revenue', 'budget']].sort_values(by = "revenue", ascending = False).head(20)
# Plot the total revenue
sns.set_color_codes("pastel")
sns.barplot(x="revenue", y="title", data=profit,
label="Total Revenue", color="orange")
# Plot the budget for each film
sns.set_color_codes("muted")
sns.barplot(x="budget", y="title", data=profit,
label="Budget", color="burlywood")
# Add a legend and informative axis label
ax.legend(ncol=2, loc="lower right", frameon=True)
ax.set(ylabel="Movie Titles",
xlabel="Total (USD Billions)",
title='Budget v Revenue')
sns.despine(left=True, bottom=True)
```
## Plotly Stacked bar chart - Losses
```
# Load gross profit data
profit = imdb_final[['title', 'budget', 'revenue', 'gross_profit']].sort_values(by = 'gross_profit', ascending = True).head(20)
title = profit['title']
investment = profit['budget']
revenue = profit['revenue']
profit = profit['gross_profit']
fig = go.Figure()
fig.add_trace(go.Bar(
y = title,
x = budget,
name = 'Investment',
orientation='h',
marker=dict(
color='rgb(229, 196, 148)',
line=dict(color='rgb(229, 196, 148)', width=1)
)
))
fig.add_trace(go.Bar(
y = title,
x = profit,
name = 'Revenue',
orientation='h',
marker=dict(
color='rgb(218, 165, 27)',
line=dict(color='rgb(218, 165, 27)', width=1)
)
))
fig.update_layout(
title = "Biggest Flops",
autosize=False,
width=1300,
height=500,
yaxis=dict(
titlefont=dict(size=30),
)
)
# Change the bar mode
fig.update_layout(barmode='stack')
fig.update_yaxes(
type="category",
categoryorder="min descending")
fig.update_yaxes(automargin=True)
fig.show()
```
|
github_jupyter
|
# importing the modules
import pandas as pd
import numpy as np
principle_df = pd.read_csv('../data/raw/principals.tsv', sep='\t')
principle_df.head()
tsv_file = open("../data/raw/title.principals.tsv")
read_tsv = csv.reader(tsv_file, delimiter="\t")
supporting = cast_df[cast_df['order'] == 1]
supporting = supporting[['movie_id', 'name']]
supporting = supporting.rename(columns={'movie_id':'id'})
supporting = supporting.rename(columns={'name':'supporting'})
supporting.head(5)
imdb_metadata = pd.merge(imdb_metadata, supporting, on = 'id', how = 'left')
imdb_metadata.head()
crew_df.head()
crew_df = crew_df[['name', 'job', 'department', 'gender', 'movie_id']]
crew_df.head()
director = crew_df[crew_df['job'] == 'Director']
director = director[['movie_id', 'name']]
director = director.rename(columns={'movie_id':'id', 'name':'director'})
director.head(5)
dataset = pd.merge(imdb_metadata, director, on = 'id', how = 'left')
print("Number of rows before dropping those with null values:",len(dataset))
#dataset.dropna(inplace = True)
print("Number of rows after dropping those with null values:",len(dataset))
Executive_Producer = crew_df[crew_df['job'] == 'Executive Producer']
Executive_Producer = Executive_Producer[['movie_id', 'name']]
Executive_Producer = Executive_Producer.rename(columns={'movie_id':'id', 'name':'Executive Producer'})
Executive_Producer.head(5)
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style="whitegrid")
# Initialize the matplotlib figure
f, ax = plt.subplots(figsize=(12, 8))
# Load the budget and revenue data
profit = high_gp = imdb_final[['title', 'revenue', 'budget']].sort_values(by = "revenue", ascending = False).head(20)
# Plot the total revenue
sns.set_color_codes("pastel")
sns.barplot(x="revenue", y="title", data=profit,
label="Total Revenue", color="orange")
# Plot the budget for each film
sns.set_color_codes("muted")
sns.barplot(x="budget", y="title", data=profit,
label="Budget", color="burlywood")
# Add a legend and informative axis label
ax.legend(ncol=2, loc="lower right", frameon=True)
ax.set(ylabel="Movie Titles",
xlabel="Total (USD Billions)",
title='Budget v Revenue')
sns.despine(left=True, bottom=True)
# Load gross profit data
profit = imdb_final[['title', 'budget', 'revenue', 'gross_profit']].sort_values(by = 'gross_profit', ascending = True).head(20)
title = profit['title']
investment = profit['budget']
revenue = profit['revenue']
profit = profit['gross_profit']
fig = go.Figure()
fig.add_trace(go.Bar(
y = title,
x = budget,
name = 'Investment',
orientation='h',
marker=dict(
color='rgb(229, 196, 148)',
line=dict(color='rgb(229, 196, 148)', width=1)
)
))
fig.add_trace(go.Bar(
y = title,
x = profit,
name = 'Revenue',
orientation='h',
marker=dict(
color='rgb(218, 165, 27)',
line=dict(color='rgb(218, 165, 27)', width=1)
)
))
fig.update_layout(
title = "Biggest Flops",
autosize=False,
width=1300,
height=500,
yaxis=dict(
titlefont=dict(size=30),
)
)
# Change the bar mode
fig.update_layout(barmode='stack')
fig.update_yaxes(
type="category",
categoryorder="min descending")
fig.update_yaxes(automargin=True)
fig.show()
| 0.62498 | 0.855187 |
# Running dashboard
### From notebook
If you run all the cells in this notebook, the final cell will display the dashboard inline
### From JupyterLab
To run this from within JupyterLab: open a terminal and run:
```bash
panel serve /home/jovyan/project/examples/nyc-taxi-snowflake/dashboard.ipynb
```
The dashboard will be live behind the Jupyter proxy. You can copy the URL of this Jupyter window and replace `/lab/*` with `/proxy/5006/dashboard`. For example, your Jupyter URL might be:
```
https://main.demo.saturnenterprise.io/user/aaron/examples-cpu/lab/workspaces/examples-cpu
```
Then your dashboard URL would be:
```
https://main.demo.saturnenterprise.io/user/aaron/examples-cpu/proxy/5006/dashboard
```
It will take a few seconds to load when first viewing the page, as all the cells in this notebook must be executed first.
### Deployment
To run as part of a Deployment, use this for the Command (see readme for more details):
```bash
python -m panel serve /home/jovyan/project/examples/nyc-taxi-snowflake/dashboard.ipynb --port=8000 --address="0.0.0.0" --allow-websocket-origin="*"
```
# ML model predictions
The `MODEL_URL` environment variable must be set to be able to get predictions from a deployed model. Otherwise the widget on the "ML" tab will return -1.
```
import os
import datetime as dt
import numpy as np
import hvplot.dask, hvplot.pandas
import holoviews as hv
from holoviews.streams import Selection1D
from bokeh.models import HoverTool
import panel as pn
import logging
logging.captureWarnings(True)
# URL to deployed model (see readme for more details)
MODEL_URL = os.environ.get('MODEL_URL', 'http://0.0.0.0:8000')
import s3fs
fs = s3fs.S3FileSystem(anon=True)
```
## Read in data
We'll start by reading in the shape file for the taxi zones provided by [NYC TLC](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page).
```
import zipfile
with fs.open('s3://nyc-tlc/misc/taxi_zones.zip') as f:
with zipfile.ZipFile(f) as zip_ref:
zip_ref.extractall(f'/tmp/taxi_zones')
import geopandas as gpd
zones = gpd.read_file('/tmp/taxi_zones/taxi_zones.shp').to_crs('epsg:4326')
```
Run the below cell to visualize the shape file
```
# zones.hvplot(geo=True)
import geoviews as gv
basemap = gv.tile_sources.CartoLight()
```
### Query Snowflake
Next we'll aggregate data using Snowflake and pull the results into Pandas
```
import os
import snowflake.connector
SNOWFLAKE_ACCOUNT = os.environ['SNOWFLAKE_ACCOUNT']
SNOWFLAKE_USER = os.environ['SNOWFLAKE_USER']
SNOWFLAKE_PASSWORD = os.environ['SNOWFLAKE_PASSWORD']
SNOWFLAKE_WAREHOUSE = os.environ['SNOWFLAKE_WAREHOUSE']
TAXI_DATABASE = os.environ['TAXI_DATABASE']
TAXI_SCHEMA = os.environ['TAXI_SCHEMA']
conn_info = {
'account': SNOWFLAKE_ACCOUNT,
'user': SNOWFLAKE_USER,
'password': SNOWFLAKE_PASSWORD,
'warehouse': SNOWFLAKE_WAREHOUSE,
'database': TAXI_DATABASE,
'schema': TAXI_SCHEMA,
}
conn = snowflake.connector.connect(**conn_info)
conn.cursor().execute("""
CREATE OR REPLACE VIEW taxi_tip AS
SELECT
*,
HOUR(pickup_datetime) as pickup_hour,
HOUR(dropoff_datetime) as dropoff_hour,
DAYOFWEEKISO(pickup_datetime) - 1 as pickup_weekday, -- start week (Monday) at 0 to match pandas
DAYOFWEEKISO(dropoff_datetime) -1 as dropoff_weekday,
tip_amount / fare_amount * 100 as percent_tip
FROM taxi_yellow
WHERE
pickup_datetime BETWEEN '2017-01-01' AND '2019-12-31'
AND fare_amount > 0
AND tip_amount / fare_amount < 10
""")
def snowflake_query(query):
result = conn.cursor().execute(query).fetch_pandas_all()
result.columns = result.columns.str.lower()
return result
pickup_by_zone_and_time = snowflake_query("""
SELECT
pickup_taxizone_id,
pickup_hour,
pickup_weekday,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY
pickup_taxizone_id,
pickup_hour,
pickup_weekday
""")
pickup_by_zone_and_time.shape
pickup_by_zone_and_time.sort_values(['pickup_taxizone_id', 'pickup_hour', 'pickup_weekday']).head()
pickup_by_zone = snowflake_query("""
SELECT
pickup_taxizone_id,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY pickup_taxizone_id
""")
pickup_by_zone = pickup_by_zone.sort_values('pickup_taxizone_id').set_index('pickup_taxizone_id')
pickup_by_zone.shape
pickup_by_zone.head()
dropoff_by_zone = snowflake_query("""
SELECT
dropoff_taxizone_id,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY dropoff_taxizone_id
""")
dropoff_by_zone = dropoff_by_zone.sort_values('dropoff_taxizone_id').set_index('dropoff_taxizone_id')
dropoff_by_zone.shape
dropoff_by_zone.head()
zones_dict = dict(zip(zones.LocationID.tolist(), zones.zone.tolist()))
pickup_by_zone.index = pickup_by_zone.index.map(zones_dict)
dropoff_by_zone.index = dropoff_by_zone.index.map(zones_dict)
pickup_by_zone.head()
pickup_by_time = snowflake_query("""
SELECT
pickup_hour,
pickup_weekday,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY pickup_hour, pickup_weekday
""")
pickup_by_time.shape
pickup_by_time.sort_values(['pickup_hour', 'pickup_weekday']).head()
```
### Timeseries data
Next we'll read in the hourly timeseries data for the various fields
```
tip_timeseries = snowflake_query("""
SELECT
DATE_TRUNC('HOUR', pickup_datetime) as pickup_datetime,
AVG(percent_tip) as percent_tip
FROM taxi_tip
GROUP BY 1
""")
tip_timeseries = tip_timeseries.sort_values('pickup_datetime').set_index('pickup_datetime')
tip_timeseries.shape
tip_timeseries.sort_values('pickup_datetime').head()
conn.close()
```
## Construct visualizations
In this dashboard we'll have three tabs. We'll start with one about volume of rides and aggregate fare, then move on to one about tips and finish with a tab that digests the outputs of the Machine Learning algorithms that we've trained to predict fare.
### Volume tab
```
total_rides = pickup_by_zone.total_rides.sum()
total_fare = pickup_by_zone.total_fare.sum()
volume_intro = """
# Taxi Volume
Ridership by region and average fares for 2017-01-01 to 2020-01-01.
"""
logo_file = '/tmp/logo.svg'
fs.get("s3://saturn-public-data/nyc-taxi/data/dashboard/saturn_logo.svg", logo_file)
logo = pn.pane.SVG(logo_file, style={"float": "right"})
def kpi_box(title, color, value, unit=""):
if value > 1e9:
value /= 1e9
increment = "B"
elif value > 1e6:
value /= 1e6
increment = "M"
elif value > 1e3:
value /= 1e3
increment = "K"
else:
increment = ""
return pn.pane.Markdown(
f"""
### {title}
# {unit}{value :.02f} {increment}
""",
style={'background-color': '#F6F6F6', 'border': '2px solid black',
'border-radius': '5px', 'padding': '10px', 'color': color},
)
fares = kpi_box("Total Fares", "#10874a", total_fare, "$")
rides = kpi_box("Total Rides", "#7a41ba", total_rides)
average = kpi_box("Average Fare", "coral", (total_fare / total_rides), "$")
data = zones.join(pickup_by_zone[["total_rides", "average_fare"]], on="zone")
data["million_rides"] = data.total_rides/1e6
tooltips = [
('Total Rides', '@total_rides{(0,0.00 a)}'),
('Average Fare', '@{average_fare}{($0.00 a)}'),
('Zone', '@zone'),
('Borough', '@borough'),
]
hover = HoverTool(tooltips=tooltips)
pickup_map = data.hvplot(
x="longitude", y="latitude", c="million_rides",
geo=True, max_width=600, max_height=600,
alpha=0.6, cmap="viridis", clim=(0, np.ceil(pickup_by_zone.total_rides.max() / 1e6)),
hover_cols=["zone", "borough", "average_fare", "total_rides"],
title=f"Rides by pickup location (in Millions)",
responsive=True, colorbar=True,
xaxis=None, yaxis=None, selection_alpha=1).opts(tools=["tap", hover], toolbar="above")
toggle = pn.widgets.RadioButtonGroup(options=["Pickup", "Dropoff"], value="Pickup")
@pn.depends(value=toggle)
def volume_table(value):
data = pickup_by_zone if value == "Pickup" else dropoff_by_zone
subset = data.total_rides.sort_values(ascending=False)
subset = subset.loc[subset.index.dropna()]
return pn.Column(
f"### Top/Bottom 5 {value} Zones",
pn.pane.DataFrame(subset.head(5), index_names=False),
pn.Spacer(height=10),
pn.pane.DataFrame(subset.tail(5), index_names=False, header=False),
width_policy="fit"
)
data = pickup_by_zone_and_time.copy()
data.index = data.index.map(zones_dict)
def heatmap(C, data=data, **kwargs):
return data.hvplot.heatmap(
x="pickup_weekday",
y="pickup_hour",
C=C,
hover_cols=["total_rides"] if C == "average_fare" else ["average_fare"],
xticks=[(0, 'Mon'), (1, 'Tues'), (2, 'Wed'), (3, 'Thur'), (4, 'Fri'), (5, 'Sat'), (6, 'Sun')],
responsive=True, min_height=500, colorbar=False, **kwargs
).opts(toolbar=None, xrotation=90, padding=0)
rides_dmap = heatmap(C="total_rides", groupby="pickup_taxizone_id", cmap="reds")
fare_dmap = heatmap(C="average_fare", groupby="pickup_taxizone_id", cmap="blues")
rides_summary = heatmap(data=pickup_by_time, C="total_rides", cmap="reds", title="Total Rides")
fare_summary = heatmap(data=pickup_by_time, C="average_fare", cmap="blues", title="Average Fare")
volume_heatmap = pn.pane.HoloViews(rides_summary)
def ride_or_fares_plot(zone, value):
if value == ["Rides"]:
if zone is None:
obj = rides_summary.opts(alpha=1)
else:
obj = rides_dmap[zone].opts(title=f"{zone} Rides").opts(alpha=1)
elif value == ["Fares"]:
if zone is None:
obj = fare_summary
else:
obj = fare_dmap[zone].opts(title=f"{zone} Fares")
else:
if zone is None:
obj = (fare_summary * rides_summary.opts(alpha=0.5, padding=0)).opts(title="Total Rides/Fares")
else:
obj = (fare_dmap[zone] * rides_dmap[zone].opts(alpha=0.5, padding=0)).opts(title=f"{zone}")
return obj
def on_pickup_tap(index):
if index:
zone = zones.loc[index, "zone"].item()
value = rides_or_fares.value
volume_heatmap.object = ride_or_fares_plot(zone, value)
return
volume_stream = Selection1D(source=pickup_map)
volume_stream.param.watch_values(on_pickup_tap, ['index']);
rides_or_fares = pn.widgets.CheckButtonGroup(options=["Rides", "Fares"], value=["Rides"])
def on_rides_or_fares(target, event):
index = volume_stream.index
value = event.new
if index and value:
zone = zones.loc[index, "zone"].item()
volume_heatmap.object = ride_or_fares_plot(zone, value)
rides_or_fares.link(volume_heatmap, callbacks={"value": on_rides_or_fares})
def on_reset_heatmap(*args):
value = rides_or_fares.value
volume_heatmap.object = ride_or_fares_plot(None, value)
reset_heatmap = pn.widgets.Button(name="Reset")
reset_heatmap.on_click(on_reset_heatmap)
volume = pn.GridSpec(name="Volume", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
volume[0, :6] = volume_intro
volume[0, 6] = logo
volume[1, 0] = fares
volume[1, 1] = rides
volume[1, 2] = average
volume[1:4, 4:6] = pn.Column(toggle, volume_table)
volume[1:8, 3] = pn.Column(
pn.pane.Markdown("*Choose rides, fares, or both and select a zone on the map.*", margin=(0, 10)),
rides_or_fares, reset_heatmap, volume_heatmap)
volume[2:8, 0:3] = pickup_map * gv.tile_sources.CartoLight()
```
## Tip tab
```
tip_intro = """
# Analysis of Tips
Tips vary based on time of day, location and many other factors.
"""
tip_heatmap = heatmap(data=pickup_by_time, C="average_percent_tip", cmap="coolwarm", clim=(12, 18), title="Average Tip %")
date_range_slider = pn.widgets.DateRangeSlider(
name='Show between',
start=tip_timeseries.index[0], end=tip_timeseries.index[-1],
value=(tip_timeseries.index.min(), tip_timeseries.index.max())
)
discrete_slider = pn.widgets.DiscreteSlider(name='Rolling window', options=['1H', '2H', '4H', '6H', '12H', '1D', '2D', '7D', '14D', '1M'], value='1D')
def tip_plot(xlim, window):
data = tip_timeseries.rolling(window).mean()
return data.hvplot(y="percent_tip", xlim=xlim, ylim=(10, 18), responsive=True, min_height=200).opts(toolbar="above")
tip_timeseries_plot = pn.pane.HoloViews(tip_plot(date_range_slider.value, discrete_slider.value))
def trim(target, event):
target.object = tip_plot(event.new, discrete_slider.value)
def roll(target, event):
target.object = tip_plot(date_range_slider.value, event.new)
discrete_slider.link(tip_timeseries_plot, callbacks={"value": roll})
date_range_slider.link(tip_timeseries_plot, callbacks={"value": trim})
joined = zones.join(pickup_by_zone, on="zone")
tip_map = joined.hvplot(c="average_percent_tip", geo=True, alpha=0.6, cmap="coolwarm",
hover_cols=["zone", "borough"], title="Average Tip %",
clim=(0, 20),responsive=True, colorbar=False,
xaxis=None, yaxis=None).opts(toolbar="above")
tip_table = pickup_by_zone.average_percent_tip.sort_values(ascending=False)
tip_table = tip_table.loc[tip_table.index.dropna()]
tip_pane = pn.Column(
"### Top/Bottom 5 Tip Zones",
pn.pane.DataFrame(tip_table.head(5), header=False, index_names=False),
pn.Spacer(height=10),
pn.pane.DataFrame(tip_table.tail(5), header=False, index_names=False),
)
tips = pn.GridSpec(name="Tips", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
tips[0, :6] = tip_intro
tips[0, 6] = logo
tips[1:5, 0:2] = tip_map * gv.tile_sources.CartoLight()
tips[1:5, 2:4] = tip_pane
tips[1:5, 4:6] = tip_heatmap
tips[5:8, 0:2] = pn.Column(date_range_slider, discrete_slider, "*Use widgets to control rolling window average on the timeseries plot or and to restrict to between certain dates*")
tips[5:8, 2:6] = tip_timeseries_plot
```
## ML Tab
```
ml_intro = """
# Machine Learning
Predict percent tip by consuming a deployed model. Must set MODEL_URL environment variable in Project or Deployment, otherwise the prediction will be -1 (see readme for more details).
"""
import requests
def tip_prediction(pickup_taxizone_id, dropoff_taxizone_id, datetime, passenger_count):
try:
SCORING_ENDPOINT = f"{MODEL_URL}/api/predict"
SATURN_TOKEN = os.environ["SATURN_TOKEN"]
result = requests.post(
url=SCORING_ENDPOINT,
json={
"passenger_count": passenger_count,
"tpep_pickup_datetime": str(datetime),
"pickup_taxizone_id": int(pickup_taxizone_id),
"dropoff_taxizone_id": int(dropoff_taxizone_id)
},
headers={
"Content-Type": "application/json",
"Authorization": f"token {SATURN_TOKEN}"
}
)
return float(result.json()["prediction"]) * 100
except:
return -1
options = {"Choose from map": -1, **{v: k for k, v in zones.zone.to_dict().items()}}
pickup = pn.widgets.Select(name="Pickup", options=options)
dropoff = pn.widgets.Select(name="Dropoff", options=options)
passengers = pn.widgets.IntSlider(name='Passengers', start=0, end=10, step=1, value=2)
plot = zones.hvplot(geo=True, c='zone', legend=False, width=500, height=500, xaxis=None, yaxis=None, alpha=.2, selection_alpha=1).opts(tools=['tap', 'hover'])
def on_map_select(index):
if index and pickup.value == -1:
pickup.value = index[0]
elif index and dropoff.value == -1:
dropoff.value = index[0]
return
stream = Selection1D(source=plot)
stream.param.watch_values(on_map_select, ['index'])
overlay = pn.pane.HoloViews(plot * gv.tile_sources.CartoLight())
def on_reset(*args):
pickup.value = -1
dropoff.value = -1
passengers.value = 2
date.value = dt.datetime.now().date()
hour.value = 0
text.background = "#ffffff"
text.object = None
stream.update(index=[])
overlay.object = plot * gv.tile_sources.CartoLight()
reset = pn.widgets.Button(name="Reset", width=80)
reset.on_click(on_reset)
date = pn.widgets.DatePicker(name="Date", value=dt.datetime.now().date())
hour = pn.widgets.DiscreteSlider(
name="Hour",
options=dict(zip(
["12am", *[f"{h}am"for h in range(1, 12)] ,"12pm", *[f"{h}pm"for h in range(1, 12)]],
list(range(24))
)))
submit = pn.widgets.Button(name="Predict my tip", button_type='primary', width=200)
text = pn.pane.Markdown(width=200, height=45, style={"padding-left": "10pt"})
helper = pn.pane.Markdown(width=300)
def b(event):
if pickup.value == -1 or dropoff.value == -1:
submit.button_type = "danger"
helper.object = "*You must select pickup and dropoff zone*"
return
submit.button_type = "primary"
helper.object = None
datetime = dt.datetime.combine(date.value, dt.time(hour=hour.value))
prediction = tip_prediction(pickup.value, dropoff.value, datetime, passengers.value)
subset = zones.iloc[[pickup.value, dropoff.value]]
trip = gv.Path((subset.geometry.centroid.x, subset.geometry.centroid.y)).opts(color="black", line_width=2)
obj = plot * gv.tile_sources.CartoLight() * subset.hvplot(geo=True) * trip
obj.label = f"{subset.zone.tolist()[0]} to {subset.zone.tolist()[1]}"
overlay.object = obj
text.background = "yellow"
text.object = f"## Prediction: {prediction: .2f}%"
submit.on_click(b)
predict = pn.Row(
pn.Column(
"## Predict my Tip",
pickup,
dropoff,
passengers,
date,
hour,
pn.Row(submit, reset),
helper,
text,
),
overlay
)
ml = pn.GridSpec(name="ML", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
ml[0, :6] = ml_intro
ml[0, 6] = logo
ml[2:8, :6] = predict
```
## Final Dashboard
```
pn.Tabs(volume, tips, ml, tabs_location="left").servable(title="Saturn Taxi")
```
|
github_jupyter
|
panel serve /home/jovyan/project/examples/nyc-taxi-snowflake/dashboard.ipynb
https://main.demo.saturnenterprise.io/user/aaron/examples-cpu/lab/workspaces/examples-cpu
https://main.demo.saturnenterprise.io/user/aaron/examples-cpu/proxy/5006/dashboard
python -m panel serve /home/jovyan/project/examples/nyc-taxi-snowflake/dashboard.ipynb --port=8000 --address="0.0.0.0" --allow-websocket-origin="*"
import os
import datetime as dt
import numpy as np
import hvplot.dask, hvplot.pandas
import holoviews as hv
from holoviews.streams import Selection1D
from bokeh.models import HoverTool
import panel as pn
import logging
logging.captureWarnings(True)
# URL to deployed model (see readme for more details)
MODEL_URL = os.environ.get('MODEL_URL', 'http://0.0.0.0:8000')
import s3fs
fs = s3fs.S3FileSystem(anon=True)
import zipfile
with fs.open('s3://nyc-tlc/misc/taxi_zones.zip') as f:
with zipfile.ZipFile(f) as zip_ref:
zip_ref.extractall(f'/tmp/taxi_zones')
import geopandas as gpd
zones = gpd.read_file('/tmp/taxi_zones/taxi_zones.shp').to_crs('epsg:4326')
# zones.hvplot(geo=True)
import geoviews as gv
basemap = gv.tile_sources.CartoLight()
import os
import snowflake.connector
SNOWFLAKE_ACCOUNT = os.environ['SNOWFLAKE_ACCOUNT']
SNOWFLAKE_USER = os.environ['SNOWFLAKE_USER']
SNOWFLAKE_PASSWORD = os.environ['SNOWFLAKE_PASSWORD']
SNOWFLAKE_WAREHOUSE = os.environ['SNOWFLAKE_WAREHOUSE']
TAXI_DATABASE = os.environ['TAXI_DATABASE']
TAXI_SCHEMA = os.environ['TAXI_SCHEMA']
conn_info = {
'account': SNOWFLAKE_ACCOUNT,
'user': SNOWFLAKE_USER,
'password': SNOWFLAKE_PASSWORD,
'warehouse': SNOWFLAKE_WAREHOUSE,
'database': TAXI_DATABASE,
'schema': TAXI_SCHEMA,
}
conn = snowflake.connector.connect(**conn_info)
conn.cursor().execute("""
CREATE OR REPLACE VIEW taxi_tip AS
SELECT
*,
HOUR(pickup_datetime) as pickup_hour,
HOUR(dropoff_datetime) as dropoff_hour,
DAYOFWEEKISO(pickup_datetime) - 1 as pickup_weekday, -- start week (Monday) at 0 to match pandas
DAYOFWEEKISO(dropoff_datetime) -1 as dropoff_weekday,
tip_amount / fare_amount * 100 as percent_tip
FROM taxi_yellow
WHERE
pickup_datetime BETWEEN '2017-01-01' AND '2019-12-31'
AND fare_amount > 0
AND tip_amount / fare_amount < 10
""")
def snowflake_query(query):
result = conn.cursor().execute(query).fetch_pandas_all()
result.columns = result.columns.str.lower()
return result
pickup_by_zone_and_time = snowflake_query("""
SELECT
pickup_taxizone_id,
pickup_hour,
pickup_weekday,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY
pickup_taxizone_id,
pickup_hour,
pickup_weekday
""")
pickup_by_zone_and_time.shape
pickup_by_zone_and_time.sort_values(['pickup_taxizone_id', 'pickup_hour', 'pickup_weekday']).head()
pickup_by_zone = snowflake_query("""
SELECT
pickup_taxizone_id,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY pickup_taxizone_id
""")
pickup_by_zone = pickup_by_zone.sort_values('pickup_taxizone_id').set_index('pickup_taxizone_id')
pickup_by_zone.shape
pickup_by_zone.head()
dropoff_by_zone = snowflake_query("""
SELECT
dropoff_taxizone_id,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY dropoff_taxizone_id
""")
dropoff_by_zone = dropoff_by_zone.sort_values('dropoff_taxizone_id').set_index('dropoff_taxizone_id')
dropoff_by_zone.shape
dropoff_by_zone.head()
zones_dict = dict(zip(zones.LocationID.tolist(), zones.zone.tolist()))
pickup_by_zone.index = pickup_by_zone.index.map(zones_dict)
dropoff_by_zone.index = dropoff_by_zone.index.map(zones_dict)
pickup_by_zone.head()
pickup_by_time = snowflake_query("""
SELECT
pickup_hour,
pickup_weekday,
AVG(fare_amount) as average_fare,
COUNT(fare_amount) as total_rides,
SUM(fare_amount) as total_fare,
AVG(trip_distance) as average_trip_distance,
AVG(percent_tip) as average_percent_tip
FROM taxi_tip
GROUP BY pickup_hour, pickup_weekday
""")
pickup_by_time.shape
pickup_by_time.sort_values(['pickup_hour', 'pickup_weekday']).head()
tip_timeseries = snowflake_query("""
SELECT
DATE_TRUNC('HOUR', pickup_datetime) as pickup_datetime,
AVG(percent_tip) as percent_tip
FROM taxi_tip
GROUP BY 1
""")
tip_timeseries = tip_timeseries.sort_values('pickup_datetime').set_index('pickup_datetime')
tip_timeseries.shape
tip_timeseries.sort_values('pickup_datetime').head()
conn.close()
total_rides = pickup_by_zone.total_rides.sum()
total_fare = pickup_by_zone.total_fare.sum()
volume_intro = """
# Taxi Volume
Ridership by region and average fares for 2017-01-01 to 2020-01-01.
"""
logo_file = '/tmp/logo.svg'
fs.get("s3://saturn-public-data/nyc-taxi/data/dashboard/saturn_logo.svg", logo_file)
logo = pn.pane.SVG(logo_file, style={"float": "right"})
def kpi_box(title, color, value, unit=""):
if value > 1e9:
value /= 1e9
increment = "B"
elif value > 1e6:
value /= 1e6
increment = "M"
elif value > 1e3:
value /= 1e3
increment = "K"
else:
increment = ""
return pn.pane.Markdown(
f"""
### {title}
# {unit}{value :.02f} {increment}
""",
style={'background-color': '#F6F6F6', 'border': '2px solid black',
'border-radius': '5px', 'padding': '10px', 'color': color},
)
fares = kpi_box("Total Fares", "#10874a", total_fare, "$")
rides = kpi_box("Total Rides", "#7a41ba", total_rides)
average = kpi_box("Average Fare", "coral", (total_fare / total_rides), "$")
data = zones.join(pickup_by_zone[["total_rides", "average_fare"]], on="zone")
data["million_rides"] = data.total_rides/1e6
tooltips = [
('Total Rides', '@total_rides{(0,0.00 a)}'),
('Average Fare', '@{average_fare}{($0.00 a)}'),
('Zone', '@zone'),
('Borough', '@borough'),
]
hover = HoverTool(tooltips=tooltips)
pickup_map = data.hvplot(
x="longitude", y="latitude", c="million_rides",
geo=True, max_width=600, max_height=600,
alpha=0.6, cmap="viridis", clim=(0, np.ceil(pickup_by_zone.total_rides.max() / 1e6)),
hover_cols=["zone", "borough", "average_fare", "total_rides"],
title=f"Rides by pickup location (in Millions)",
responsive=True, colorbar=True,
xaxis=None, yaxis=None, selection_alpha=1).opts(tools=["tap", hover], toolbar="above")
toggle = pn.widgets.RadioButtonGroup(options=["Pickup", "Dropoff"], value="Pickup")
@pn.depends(value=toggle)
def volume_table(value):
data = pickup_by_zone if value == "Pickup" else dropoff_by_zone
subset = data.total_rides.sort_values(ascending=False)
subset = subset.loc[subset.index.dropna()]
return pn.Column(
f"### Top/Bottom 5 {value} Zones",
pn.pane.DataFrame(subset.head(5), index_names=False),
pn.Spacer(height=10),
pn.pane.DataFrame(subset.tail(5), index_names=False, header=False),
width_policy="fit"
)
data = pickup_by_zone_and_time.copy()
data.index = data.index.map(zones_dict)
def heatmap(C, data=data, **kwargs):
return data.hvplot.heatmap(
x="pickup_weekday",
y="pickup_hour",
C=C,
hover_cols=["total_rides"] if C == "average_fare" else ["average_fare"],
xticks=[(0, 'Mon'), (1, 'Tues'), (2, 'Wed'), (3, 'Thur'), (4, 'Fri'), (5, 'Sat'), (6, 'Sun')],
responsive=True, min_height=500, colorbar=False, **kwargs
).opts(toolbar=None, xrotation=90, padding=0)
rides_dmap = heatmap(C="total_rides", groupby="pickup_taxizone_id", cmap="reds")
fare_dmap = heatmap(C="average_fare", groupby="pickup_taxizone_id", cmap="blues")
rides_summary = heatmap(data=pickup_by_time, C="total_rides", cmap="reds", title="Total Rides")
fare_summary = heatmap(data=pickup_by_time, C="average_fare", cmap="blues", title="Average Fare")
volume_heatmap = pn.pane.HoloViews(rides_summary)
def ride_or_fares_plot(zone, value):
if value == ["Rides"]:
if zone is None:
obj = rides_summary.opts(alpha=1)
else:
obj = rides_dmap[zone].opts(title=f"{zone} Rides").opts(alpha=1)
elif value == ["Fares"]:
if zone is None:
obj = fare_summary
else:
obj = fare_dmap[zone].opts(title=f"{zone} Fares")
else:
if zone is None:
obj = (fare_summary * rides_summary.opts(alpha=0.5, padding=0)).opts(title="Total Rides/Fares")
else:
obj = (fare_dmap[zone] * rides_dmap[zone].opts(alpha=0.5, padding=0)).opts(title=f"{zone}")
return obj
def on_pickup_tap(index):
if index:
zone = zones.loc[index, "zone"].item()
value = rides_or_fares.value
volume_heatmap.object = ride_or_fares_plot(zone, value)
return
volume_stream = Selection1D(source=pickup_map)
volume_stream.param.watch_values(on_pickup_tap, ['index']);
rides_or_fares = pn.widgets.CheckButtonGroup(options=["Rides", "Fares"], value=["Rides"])
def on_rides_or_fares(target, event):
index = volume_stream.index
value = event.new
if index and value:
zone = zones.loc[index, "zone"].item()
volume_heatmap.object = ride_or_fares_plot(zone, value)
rides_or_fares.link(volume_heatmap, callbacks={"value": on_rides_or_fares})
def on_reset_heatmap(*args):
value = rides_or_fares.value
volume_heatmap.object = ride_or_fares_plot(None, value)
reset_heatmap = pn.widgets.Button(name="Reset")
reset_heatmap.on_click(on_reset_heatmap)
volume = pn.GridSpec(name="Volume", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
volume[0, :6] = volume_intro
volume[0, 6] = logo
volume[1, 0] = fares
volume[1, 1] = rides
volume[1, 2] = average
volume[1:4, 4:6] = pn.Column(toggle, volume_table)
volume[1:8, 3] = pn.Column(
pn.pane.Markdown("*Choose rides, fares, or both and select a zone on the map.*", margin=(0, 10)),
rides_or_fares, reset_heatmap, volume_heatmap)
volume[2:8, 0:3] = pickup_map * gv.tile_sources.CartoLight()
tip_intro = """
# Analysis of Tips
Tips vary based on time of day, location and many other factors.
"""
tip_heatmap = heatmap(data=pickup_by_time, C="average_percent_tip", cmap="coolwarm", clim=(12, 18), title="Average Tip %")
date_range_slider = pn.widgets.DateRangeSlider(
name='Show between',
start=tip_timeseries.index[0], end=tip_timeseries.index[-1],
value=(tip_timeseries.index.min(), tip_timeseries.index.max())
)
discrete_slider = pn.widgets.DiscreteSlider(name='Rolling window', options=['1H', '2H', '4H', '6H', '12H', '1D', '2D', '7D', '14D', '1M'], value='1D')
def tip_plot(xlim, window):
data = tip_timeseries.rolling(window).mean()
return data.hvplot(y="percent_tip", xlim=xlim, ylim=(10, 18), responsive=True, min_height=200).opts(toolbar="above")
tip_timeseries_plot = pn.pane.HoloViews(tip_plot(date_range_slider.value, discrete_slider.value))
def trim(target, event):
target.object = tip_plot(event.new, discrete_slider.value)
def roll(target, event):
target.object = tip_plot(date_range_slider.value, event.new)
discrete_slider.link(tip_timeseries_plot, callbacks={"value": roll})
date_range_slider.link(tip_timeseries_plot, callbacks={"value": trim})
joined = zones.join(pickup_by_zone, on="zone")
tip_map = joined.hvplot(c="average_percent_tip", geo=True, alpha=0.6, cmap="coolwarm",
hover_cols=["zone", "borough"], title="Average Tip %",
clim=(0, 20),responsive=True, colorbar=False,
xaxis=None, yaxis=None).opts(toolbar="above")
tip_table = pickup_by_zone.average_percent_tip.sort_values(ascending=False)
tip_table = tip_table.loc[tip_table.index.dropna()]
tip_pane = pn.Column(
"### Top/Bottom 5 Tip Zones",
pn.pane.DataFrame(tip_table.head(5), header=False, index_names=False),
pn.Spacer(height=10),
pn.pane.DataFrame(tip_table.tail(5), header=False, index_names=False),
)
tips = pn.GridSpec(name="Tips", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
tips[0, :6] = tip_intro
tips[0, 6] = logo
tips[1:5, 0:2] = tip_map * gv.tile_sources.CartoLight()
tips[1:5, 2:4] = tip_pane
tips[1:5, 4:6] = tip_heatmap
tips[5:8, 0:2] = pn.Column(date_range_slider, discrete_slider, "*Use widgets to control rolling window average on the timeseries plot or and to restrict to between certain dates*")
tips[5:8, 2:6] = tip_timeseries_plot
ml_intro = """
# Machine Learning
Predict percent tip by consuming a deployed model. Must set MODEL_URL environment variable in Project or Deployment, otherwise the prediction will be -1 (see readme for more details).
"""
import requests
def tip_prediction(pickup_taxizone_id, dropoff_taxizone_id, datetime, passenger_count):
try:
SCORING_ENDPOINT = f"{MODEL_URL}/api/predict"
SATURN_TOKEN = os.environ["SATURN_TOKEN"]
result = requests.post(
url=SCORING_ENDPOINT,
json={
"passenger_count": passenger_count,
"tpep_pickup_datetime": str(datetime),
"pickup_taxizone_id": int(pickup_taxizone_id),
"dropoff_taxizone_id": int(dropoff_taxizone_id)
},
headers={
"Content-Type": "application/json",
"Authorization": f"token {SATURN_TOKEN}"
}
)
return float(result.json()["prediction"]) * 100
except:
return -1
options = {"Choose from map": -1, **{v: k for k, v in zones.zone.to_dict().items()}}
pickup = pn.widgets.Select(name="Pickup", options=options)
dropoff = pn.widgets.Select(name="Dropoff", options=options)
passengers = pn.widgets.IntSlider(name='Passengers', start=0, end=10, step=1, value=2)
plot = zones.hvplot(geo=True, c='zone', legend=False, width=500, height=500, xaxis=None, yaxis=None, alpha=.2, selection_alpha=1).opts(tools=['tap', 'hover'])
def on_map_select(index):
if index and pickup.value == -1:
pickup.value = index[0]
elif index and dropoff.value == -1:
dropoff.value = index[0]
return
stream = Selection1D(source=plot)
stream.param.watch_values(on_map_select, ['index'])
overlay = pn.pane.HoloViews(plot * gv.tile_sources.CartoLight())
def on_reset(*args):
pickup.value = -1
dropoff.value = -1
passengers.value = 2
date.value = dt.datetime.now().date()
hour.value = 0
text.background = "#ffffff"
text.object = None
stream.update(index=[])
overlay.object = plot * gv.tile_sources.CartoLight()
reset = pn.widgets.Button(name="Reset", width=80)
reset.on_click(on_reset)
date = pn.widgets.DatePicker(name="Date", value=dt.datetime.now().date())
hour = pn.widgets.DiscreteSlider(
name="Hour",
options=dict(zip(
["12am", *[f"{h}am"for h in range(1, 12)] ,"12pm", *[f"{h}pm"for h in range(1, 12)]],
list(range(24))
)))
submit = pn.widgets.Button(name="Predict my tip", button_type='primary', width=200)
text = pn.pane.Markdown(width=200, height=45, style={"padding-left": "10pt"})
helper = pn.pane.Markdown(width=300)
def b(event):
if pickup.value == -1 or dropoff.value == -1:
submit.button_type = "danger"
helper.object = "*You must select pickup and dropoff zone*"
return
submit.button_type = "primary"
helper.object = None
datetime = dt.datetime.combine(date.value, dt.time(hour=hour.value))
prediction = tip_prediction(pickup.value, dropoff.value, datetime, passengers.value)
subset = zones.iloc[[pickup.value, dropoff.value]]
trip = gv.Path((subset.geometry.centroid.x, subset.geometry.centroid.y)).opts(color="black", line_width=2)
obj = plot * gv.tile_sources.CartoLight() * subset.hvplot(geo=True) * trip
obj.label = f"{subset.zone.tolist()[0]} to {subset.zone.tolist()[1]}"
overlay.object = obj
text.background = "yellow"
text.object = f"## Prediction: {prediction: .2f}%"
submit.on_click(b)
predict = pn.Row(
pn.Column(
"## Predict my Tip",
pickup,
dropoff,
passengers,
date,
hour,
pn.Row(submit, reset),
helper,
text,
),
overlay
)
ml = pn.GridSpec(name="ML", sizing_mode='stretch_both', min_width=800, min_height=600, max_height=800)
ml[0, :6] = ml_intro
ml[0, 6] = logo
ml[2:8, :6] = predict
pn.Tabs(volume, tips, ml, tabs_location="left").servable(title="Saturn Taxi")
| 0.29931 | 0.84137 |
# Allocation Agent: Monte Carlo Tree Search (MCTS)
A full review of MCTS is available in [Browne et al., 2012](http://ccg.doc.gold.ac.uk/ccg_old/papers/browne_tciaig12_1.pdf), and has been the basis of the below work.
Broadly speaking MCTS randomly explores a decision space, building a tree of connected moves and their associated rewards. Over multiple iterations the MCTS determines which decisions are most likely to result in a positive outcome. Ultimately the best decision is the one that provides the best long term reward, the definition of which depends on the specific domain. For example, when creating a MCTS agent to play a board game, the long term reward would be a 0 or 1 depending on whether you won or lost the game after making the current move.
In the context of bed allocation we do not have a natural end state. Therefore the long term reward is determined as the total reward incurred after N time has passed according to the equation below. Here $R_{n}$ represents a reward associated with the state of the hospital at a given time step, $\gamma \epsilon [0, 1]$ is the discount factor. The reward associated with a hospital state is $1 - total penalties$ incurred. The first term in the equation, $R_{1}$, is the immediate reward associated with the current allocation i.e., the greedy allocation score, and the subsequent terms are rewards associated with future states, where the discount factor weighs the relative importance of these future states against the current state.
$$
\begin{align}
R = R_{1} + \gamma R_{2} +\gamma^{2}R_{3} ... +\gamma^{N}R_{N}
\end{align}
$$
In this notebook we demonstrate how to run the MCTS allocation algorithm in a simplified virtual hospital; see `src/agent/mcts` and `src/agent/simulation` for the relevant code. More details of the implementation are provided at the end of this notebook.
## 1. Import required modules
_Note:_ you will need to first install the module as per the instructions in the main README, and run a notebook server from within the same virtual environment to have access to the `hospital` submodules.
```
import cloudpickle
import copy
import time
import random
import warnings
warnings.filterwarnings('ignore')
from tqdm.notebook import tqdm
import agent.utils as utils
import agent.policy as policy
import agent.run_mcts as mcts
from hospital.people import Patient
from hospital.building import Hospital, MedicalWard, SurgicalWard, Room
from hospital.equipment.bed import Bed
import hospital.restrictions.ward as R
```
## 2. Hospital Environment
The MCTS implementation can take a long time to run, and have high memory requirements. To demonstrate how it can be run we create a simplified scenario with a hospital containing 2 wards (1 medical, 1 surgical), and 5 beds each. The Medical ward will have a restriction for not allowing surgical patients and vice versa.
```
beds = [Bed(name=f"B00{i}") for i in range(10)]
wards = [
MedicalWard(name="MedicalWard", rooms=[Room(name="R000", beds=beds[:5])]),
SurgicalWard(name="SurgicalWard", rooms=[Room(name="R001", beds=beds[5:])]),
]
h = Hospital(name="Hospital", wards=wards)
# Add ward restrictions
h.wards[0].restrictions = [R.NoSurgical(10)]
h.wards[1].restrictions = [R.NoMedical(5)]
# Populate at 50%
policy.populate_hospital(h, occupancy=0.5)
h.render()
# you will also need to normalise the penalties to lie between 0 and 1
utils.normalise_ward_penalties(h)
```
## 3. Create patients to admit to the hospital
To generate realistic patients utilise the PatientSampler class which takes a day of week and an hour of the day and returns a forecast for the number of patients estimated to arrive each hour, `N`. By default, the patients are synthesied with random data where the distribution for each attributes is informed by aggregated historic data. If historic patient data is available then a pool of historic patients can be saved and setting `historic=True` will return more accurate marginal distributions accross all patient attributes. Otherwise, the returned number of patients `N` is a random number, and the returned patient attributes are randomly generated. See `src/forecasting/patient_sampler`.
```
sampler = PatientSampler("monday", 9)
forecast_window=2
forecasted_patients = sampler.sample_patients(forecast_window=forecast_window, num_samples=1)
# we can unpack the above structure into a list of lists
# each sublist represents an hour of patients
arrivals = []
for _, patients in forecasted_patients[0].items():
arrivals.append(patients)
```
We will instead use the code below to create a simplified list of arriving patients, by initialising patients with the default value for most fields, such that there won't be any additional patient level restrictions (e.g., patient needs side room for immunosuppression). The arrivals list will still have the same structure as above.
```
def generate_random_name() -> str:
"""
2 random letters + 2 random numbers. As a string.
"""
letters = "abcdefghijklmnopqrstuvwxyz"
digits = "0123456789"
characters = random.choices(letters, k=2) + random.choices(digits, k=2)
name = "".join(characters)
return name
def generate_simple_patient() -> Patient:
"""Returns a random patient, without any patient level restrictions."""
# department and specialty
department = ["medicine", "surgery"][utils.bernoulli(0.5)]
# Patient, all other attributes are default=False
patient = Patient(
name=generate_random_name(),
sex=["male", "female"][utils.bernoulli(0.5)],
department=department
)
return patient
def generate_arrivals(max_per_hour: int, forecast_window: int) -> list:
"""Creates a list of arriving patients for each hour of in the forecast window"""
arrivals =[]
for hours in range(forecast_window):
arrivals.append([generate_simple_patient() for _ in range(random.randint(0, max_per_hour))])
return arrivals
# patient currently being allocated
patient = Patient(
name="patient_0",
sex="female",
department="medicine",
)
# 'forecasted' arrivals
arrivals = generate_arrivals(3, 4)
print(f"Incoming patients per hour: {[len(l) for l in arrivals]}")
# now we insert the patient we are currently trying to
# allocate as the first patient, as time t=1.
arrivals = [[patient]] + arrivals
```
Now we can run the MCTS as below. The output in `mcts_output` is a list of dictionaries where each dictionary represents a possible allocation for patient_0, and the results of the tree search for that option, further details of the tree search are provided below. The items of each dictionary are:
- `action`, the allocation of the current patient into a bed. It is possible to consider allocating multiple patients in a timestep, as such, the action is represented by a dictionary of bed_name:patient pairs, for each patient at t=0 in the tree search. In our example there is just one patient.
- `score`, the immediate pentaly associated with allocating the current patient(s) to the suggested bed(s).
- `violated_restrictions`, the set of restrictions (if any), violated by assigning the patient(s) to the suggested bed(s).
- `ucb_score`, the tree policy score associated with the suggested allocation. See below for more details on UCB score.
- `visit_count`, the number of times the node representing the suggested allocation was visited during tree search.
There are several potential strategies for determining what the best allocation option is. The one with the lowest `score` is equivalent to making a greedy optimisation that selects the best bed given the current circumstances of the hospital; whereas, making a choice between the highest `ucb_score`, or `visit_count` or a weighted average of both provides the best allocation according to the MCTS.
```
h_copy = copy.deepcopy(h)
t = time.time()
mcts_node = mcts.run_mcts(
h_copy,
arrivals,
discount_factor=0.9,
n_iterations=100,
)
elapsed = time.time() - t
mcts_output = mcts.construct_mcts_output(h_copy, mcts_node, patient)
print(f"Time taken to compute suggestions: {round(elapsed,2)}s")
mcts_output
```
# 4. Details on implementation
Below we describe the four stages of the MCTS algorithm as they are specifically implemented for the bed allocation agent. The implementation utilises the `anytree` library to build a tree structure, where each node represents a specific state of the hospital and each level of the tree represents a time step. Time steps are incremented in hours and connected to the number of forecasted admissions arriving each hour. The input to the treesearch is a queue of patients arriving at each time step, with the current patient to be allocated (`t=0`) as the first entry in this queue, and the current state of the hospital as the root node to search from.
<ol>
<li><b>Selection</b></li>
Starting at the root node a child node is selected. The root node represents the current state of the hospital at time $t=0$ where the state is defined by the patients that are currently occupying beds and the set of empty beds.
In the first iteration of the tree search, the algorithm selects the root node and moves to the expansion step. In subsequent iterations it will traverse from the root and choose one of the child nodes according to the tree policy. The <i>tree policy</i> is the UCB score, in which $\overline{R}$ is the mean reward from visiting that node, $N_{pi}$ is the number of times its parent node was visited and $N_{i}$ is the number of times the node itself has been visited. The first term encourages the algorithm to select nodes that have previously resulted in good outcomes, while the second encourages the algorithm to explore options that it hasn’t visited as often.
$$
\begin{align}
UCB = \overline{R} + \sqrt{\frac{2 \log{N_{p}}}{N_{i}}}
\end{align}
$$
<li><b>Expansion</b></li>
Once a node is selected, a child node is attached to represent each possible decision state for that time step.
For example, we have a hospital with 4 beds, 2 are occupied and 2 are available. We are currently trying to allocate a patient P1. As there are two possible decisions, two nodes can be attached that represent 1) allocating P1 to the first available bed and 2) allocation P1 to the second available bed.
As we progress through the tree search, we may encounter time steps where multiple patients have arrived. In such cases, a node is expanded for each possible combination of patients to available beds.
<li><b>Simulation</b></li>
From one of the attached children we then simulate a future. The simulation stage involves the following steps:
<ul>
<li>Each patient currently within the hospital has a length of stay attribute, and an expected length of stay attribute. At the start of the simulation step, the length of stay counters are incremented by one.</li>
<li>Then a discharge model is applied to discharge existing patients. The probability of being discharged increases according to proximity to your expected length of stay.</li>
<li>The patients arriving in the given time-step are then assigned to beds according to the default policy. The default policy is a random assignment of patients to beds to available beds.</li>
</ul>
<li><b>Backpropagation</b></li>
The total penalty of the hospital is calculated after the simulation step. This is the sum of all penalties for each broken restriction within the hospital. We then backpropagate this score up the tree to distribute the outcome across all decisions along the currently explored path.
This stage updates the UCB score (tree policy score) and visit count for each node that was traversed along the current decision pathway. If the result of the simulation was good, the UCB scores of each node will have increased, making it more likely that future iterations of the tree search will select these nodes again. The reverse is also true. In this manner MCTS is more tractable than a completely random search of the possible decision pathways as it more frequently visits the most promising options during the selection stage.
</ol>
The above procedure is repeated multiple times until a maximum number of iterations have been reached. At this point the tree object is returned and the best child node of the root is selected as the optimal allocation for the current patient. There are several potential strategies for determining what the best node is. In the current implementation we choose the node that has the highest visit count. Alternative approaches such as choosing the node with the highest UCB score or some balance of the two, and how this affects outcomes, remain to be explored in future work.
### Limitations
In the above implementation we take a single sample from the demand forecast and use this as a fixed version of the future. This means that the future within the treesearch is deterministic and significantly reduces the search space, and branching of the tree, allowing the algorithm to find a recommendation in a more tractable timeframe. However, a single sample from the forecast represents one of the possible futures. To truly capture the variability of incoming patients, we envisage a strategy where multiple simultaneous tree searches are implemented, each using a separate sample from the forecasted admissions. These could be run in parallel to increase runtime efficiency, and the final suggested allocation would be the bed that has the average highest ranking across the ensemble of tree searches. The efficacy of this strategy and alternative approaches to dealing with non deterministic search spaces remain to be explored.
Despite fixing the set of arrivals within a tree-search we can still experience an intractable amount of branching that makes the current implementation of MCTS unsuitable for operational use. For example, if there are just 4 empty beds in the hospital, and 9 arriving patients within a time step, the tree expands into 840 possible permutations of patients to beds. With multiple time steps into the future this can compound and result in either memory issues or extremely long compute times. Further work is needed to explore engineering strategies that can make MCTS more operationally feasible.
|
github_jupyter
|
import cloudpickle
import copy
import time
import random
import warnings
warnings.filterwarnings('ignore')
from tqdm.notebook import tqdm
import agent.utils as utils
import agent.policy as policy
import agent.run_mcts as mcts
from hospital.people import Patient
from hospital.building import Hospital, MedicalWard, SurgicalWard, Room
from hospital.equipment.bed import Bed
import hospital.restrictions.ward as R
beds = [Bed(name=f"B00{i}") for i in range(10)]
wards = [
MedicalWard(name="MedicalWard", rooms=[Room(name="R000", beds=beds[:5])]),
SurgicalWard(name="SurgicalWard", rooms=[Room(name="R001", beds=beds[5:])]),
]
h = Hospital(name="Hospital", wards=wards)
# Add ward restrictions
h.wards[0].restrictions = [R.NoSurgical(10)]
h.wards[1].restrictions = [R.NoMedical(5)]
# Populate at 50%
policy.populate_hospital(h, occupancy=0.5)
h.render()
# you will also need to normalise the penalties to lie between 0 and 1
utils.normalise_ward_penalties(h)
sampler = PatientSampler("monday", 9)
forecast_window=2
forecasted_patients = sampler.sample_patients(forecast_window=forecast_window, num_samples=1)
# we can unpack the above structure into a list of lists
# each sublist represents an hour of patients
arrivals = []
for _, patients in forecasted_patients[0].items():
arrivals.append(patients)
def generate_random_name() -> str:
"""
2 random letters + 2 random numbers. As a string.
"""
letters = "abcdefghijklmnopqrstuvwxyz"
digits = "0123456789"
characters = random.choices(letters, k=2) + random.choices(digits, k=2)
name = "".join(characters)
return name
def generate_simple_patient() -> Patient:
"""Returns a random patient, without any patient level restrictions."""
# department and specialty
department = ["medicine", "surgery"][utils.bernoulli(0.5)]
# Patient, all other attributes are default=False
patient = Patient(
name=generate_random_name(),
sex=["male", "female"][utils.bernoulli(0.5)],
department=department
)
return patient
def generate_arrivals(max_per_hour: int, forecast_window: int) -> list:
"""Creates a list of arriving patients for each hour of in the forecast window"""
arrivals =[]
for hours in range(forecast_window):
arrivals.append([generate_simple_patient() for _ in range(random.randint(0, max_per_hour))])
return arrivals
# patient currently being allocated
patient = Patient(
name="patient_0",
sex="female",
department="medicine",
)
# 'forecasted' arrivals
arrivals = generate_arrivals(3, 4)
print(f"Incoming patients per hour: {[len(l) for l in arrivals]}")
# now we insert the patient we are currently trying to
# allocate as the first patient, as time t=1.
arrivals = [[patient]] + arrivals
h_copy = copy.deepcopy(h)
t = time.time()
mcts_node = mcts.run_mcts(
h_copy,
arrivals,
discount_factor=0.9,
n_iterations=100,
)
elapsed = time.time() - t
mcts_output = mcts.construct_mcts_output(h_copy, mcts_node, patient)
print(f"Time taken to compute suggestions: {round(elapsed,2)}s")
mcts_output
| 0.433502 | 0.990216 |
# The Sequential model
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2020/04/12<br>
**Last modified:** 2020/04/12<br>
**Description:** Complete guide to the Sequential model.
## Setup
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
```
## When to use a Sequential model
A `Sequential` model is appropriate for **a plain stack of layers**
where each layer has **exactly one input tensor and one output tensor**.
Schematically, the following `Sequential` model:
```
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
```
is equivalent to this function:
```
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = tf.ones((3, 3))
y = layer3(layer2(layer1(x)))
```
A Sequential model is **not appropriate** when:
- Your model has multiple inputs or multiple outputs
- Any of your layers has multiple inputs or multiple outputs
- You need to do layer sharing
- You want non-linear topology (e.g. a residual connection, a multi-branch
model)
## Creating a Sequential model
You can create a Sequential model by passing a list of layers to the Sequential
constructor:
```
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)
```
Its layers are accessible via the `layers` attribute:
```
model.layers
```
You can also create a Sequential model incrementally via the `add()` method:
```
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))
```
Note that there's also a corresponding `pop()` method to remove layers:
a Sequential model behaves very much like a list of layers.
```
model.pop()
print(len(model.layers)) # 2
```
Also note that the Sequential constructor accepts a `name` argument, just like
any layer or model in Keras. This is useful to annotate TensorBoard graphs
with semantically meaningful names.
```
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))
```
## Specifying the input shape in advance
Generally, all layers in Keras need to know the shape of their inputs
in order to be able to create their weights. So when you create a layer like
this, initially, it has no weights:
```
layer = layers.Dense(3)
layer.weights # Empty
```
It creates its weights the first time it is called on an input, since the shape
of the weights depends on the shape of the inputs:
```
# Call layer on a test input
x = tf.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)
```
Naturally, this also applies to Sequential models. When you instantiate a
Sequential model without an input shape, it isn't "built": it has no weights
(and calling
`model.weights` results in an error stating just this). The weights are created
when the model first sees some input data:
```
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = tf.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6
```
Once a model is "built", you can call its `summary()` method to display its
contents:
```
model.summary()
```
However, it can be very useful when building a Sequential model incrementally
to be able to display the summary of the model so far, including the current
output shape. In this case, you should start your model by passing an `Input`
object to your model, so that it knows its input shape from the start:
```
model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()
```
Note that the `Input` object is not displayed as part of `model.layers`, since
it isn't a layer:
```
model.layers
```
A simple alternative is to just pass an `input_shape` argument to your first
layer:
```
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu", input_shape=(4,)))
model.summary()
```
Models built with a predefined input shape like this always have weights (even
before seeing any data) and always have a defined output shape.
In general, it's a recommended best practice to always specify the input shape
of a Sequential model in advance if you know what it is.
## A common debugging workflow: `add()` + `summary()`
When building a new Sequential architecture, it's useful to incrementally stack
layers with `add()` and frequently print model summaries. For instance, this
enables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers is
downsampling image feature maps:
```
model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))
```
Very practical, right?
## What to do once you have a model
Once your model architecture is ready, you will want to:
- Train your model, evaluate it, and run inference. See our
[guide to training & evaluation with the built-in loops](
/guides/training_with_built_in_methods/)
- Save your model to disk and restore it. See our
[guide to serialization & saving](/guides/serialization_and_saving/).
- Speed up model training by leveraging multiple GPUs. See our
[guide to multi-GPU and distributed training](https://keras.io/guides/distributed_training/).
## Feature extraction with a Sequential model
Once a Sequential model has been built, it behaves like a [Functional API
model](/guides/functional_api/). This means that every layer has an `input`
and `output` attribute. These attributes can be used to do neat things, like
quickly
creating a model that extracts the outputs of all intermediate layers in a
Sequential model:
```
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
```
Here's a similar example that only extract features from one layer:
```
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
```
## Transfer learning with a Sequential model
Transfer learning consists of freezing the bottom layers in a model and only training
the top layers. If you aren't familiar with it, make sure to read our [guide
to transfer learning](/guides/transfer_learning/).
Here are two common transfer learning blueprint involving Sequential models.
First, let's say that you have a Sequential model, and you want to freeze all
layers except the last one. In this case, you would simply iterate over
`model.layers` and set `layer.trainable = False` on each layer, except the
last one. Like this:
```python
model = keras.Sequential([
keras.Input(shape=(784)),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10),
])
# Presumably you would want to first load pre-trained weights.
model.load_weights(...)
# Freeze all layers except the last one.
for layer in model.layers[:-1]:
layer.trainable = False
# Recompile and train (this will only update the weights of the last layer).
model.compile(...)
model.fit(...)
```
Another common blueprint is to use a Sequential model to stack a pre-trained
model and some freshly initialized classification layers. Like this:
```python
# Load a convolutional base with pre-trained weights
base_model = keras.applications.Xception(
weights='imagenet',
include_top=False,
pooling='avg')
# Freeze the base model
base_model.trainable = False
# Use a Sequential model to add a trainable classifier on top
model = keras.Sequential([
base_model,
layers.Dense(1000),
])
# Compile & train
model.compile(...)
model.fit(...)
```
If you do transfer learning, you will probably find yourself frequently using
these two patterns.
That's about all you need to know about Sequential models!
To find out more about building models in Keras, see:
- [Guide to the Functional API](/guides/functional_api/)
- [Guide to making new Layers & Models via subclassing](
/guides/making_new_layers_and_models_via_subclassing/)
|
github_jupyter
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = tf.ones((3, 3))
y = layer3(layer2(layer1(x)))
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)
model.layers
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))
model.pop()
print(len(model.layers)) # 2
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))
layer = layers.Dense(3)
layer.weights # Empty
# Call layer on a test input
x = tf.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = tf.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6
model.summary()
model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()
model.layers
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu", input_shape=(4,)))
model.summary()
model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
for _ in features:
print(_.shape)
model = keras.Sequential([
keras.Input(shape=(784)),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(32, activation='relu'),
layers.Dense(10),
])
# Presumably you would want to first load pre-trained weights.
model.load_weights(...)
# Freeze all layers except the last one.
for layer in model.layers[:-1]:
layer.trainable = False
# Recompile and train (this will only update the weights of the last layer).
model.compile(...)
model.fit(...)
# Load a convolutional base with pre-trained weights
base_model = keras.applications.Xception(
weights='imagenet',
include_top=False,
pooling='avg')
# Freeze the base model
base_model.trainable = False
# Use a Sequential model to add a trainable classifier on top
model = keras.Sequential([
base_model,
layers.Dense(1000),
])
# Compile & train
model.compile(...)
model.fit(...)
| 0.921517 | 0.976669 |
# Ex1 - Filtering and Sorting Data
This time we are going to pull data directly from the internet.
Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
### Step 1. Import the necessary libraries
```
import pandas as pd
```
### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv).
### Step 3. Assign it to a variable called chipo.
```
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo = pd.read_csv(url, sep = '\t')
```
### Step 4. How many products cost more than $10.00?
```
# clean the item_price column and transform it in a float
prices = [float(value[1 : -1]) for value in chipo.item_price]
# reassign the column with the cleaned prices
chipo.item_price = prices
# delete the duplicates in item_name and quantity
chipo_filtered = chipo.drop_duplicates(['item_name','quantity','choice_description'])
# chipo_filtered
# select only the products with quantity equals to 1
chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
chipo_one_prod
# chipo_one_prod[chipo_one_prod['item_price']>10].item_name.nunique()
# chipo_one_prod[chipo_one_prod['item_price']>10]
chipo.query('price_per_item > 10').item_name.nunique()
```
### Step 5. What is the price of each item?
###### print a data frame with only two columns item_name and item_price
```
# delete the duplicates in item_name and quantity
# chipo_filtered = chipo.drop_duplicates(['item_name','quantity'])
chipo[(chipo['item_name'] == 'Chicken Bowl') & (chipo['quantity'] == 1)]
# select only the products with quantity equals to 1
# chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
# select only the item_name and item_price columns
# price_per_item = chipo_one_prod[['item_name', 'item_price']]
# sort the values from the most to less expensive
# price_per_item.sort_values(by = "item_price", ascending = False).head(20)
```
### Step 6. Sort by the name of the item
```
chipo.item_name.sort_values()
# OR
chipo.sort_values(by = "item_name")
```
### Step 7. What was the quantity of the most expensive item ordered?
```
chipo.sort_values(by = "item_price", ascending = False).head(1)
```
### Step 8. How many times was a Veggie Salad Bowl ordered?
```
chipo_salad = chipo[chipo.item_name == "Veggie Salad Bowl"]
len(chipo_salad)
```
### Step 9. How many times did someone order more than one Canned Soda?
```
chipo_drink_steak_bowl = chipo[(chipo.item_name == "Canned Soda") & (chipo.quantity > 1)]
len(chipo_drink_steak_bowl)
```
|
github_jupyter
|
import pandas as pd
url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv'
chipo = pd.read_csv(url, sep = '\t')
# clean the item_price column and transform it in a float
prices = [float(value[1 : -1]) for value in chipo.item_price]
# reassign the column with the cleaned prices
chipo.item_price = prices
# delete the duplicates in item_name and quantity
chipo_filtered = chipo.drop_duplicates(['item_name','quantity','choice_description'])
# chipo_filtered
# select only the products with quantity equals to 1
chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
chipo_one_prod
# chipo_one_prod[chipo_one_prod['item_price']>10].item_name.nunique()
# chipo_one_prod[chipo_one_prod['item_price']>10]
chipo.query('price_per_item > 10').item_name.nunique()
# delete the duplicates in item_name and quantity
# chipo_filtered = chipo.drop_duplicates(['item_name','quantity'])
chipo[(chipo['item_name'] == 'Chicken Bowl') & (chipo['quantity'] == 1)]
# select only the products with quantity equals to 1
# chipo_one_prod = chipo_filtered[chipo_filtered.quantity == 1]
# select only the item_name and item_price columns
# price_per_item = chipo_one_prod[['item_name', 'item_price']]
# sort the values from the most to less expensive
# price_per_item.sort_values(by = "item_price", ascending = False).head(20)
chipo.item_name.sort_values()
# OR
chipo.sort_values(by = "item_name")
chipo.sort_values(by = "item_price", ascending = False).head(1)
chipo_salad = chipo[chipo.item_name == "Veggie Salad Bowl"]
len(chipo_salad)
chipo_drink_steak_bowl = chipo[(chipo.item_name == "Canned Soda") & (chipo.quantity > 1)]
len(chipo_drink_steak_bowl)
| 0.282691 | 0.984321 |
# Description
Generates manubot tables for pathways enriched (from the MultiPLIER models) given an LV name (in Settings below).
# Modules loading
```
%load_ext autoreload
%autoreload 2
import re
from pathlib import Path
import pandas as pd
from entity import Trait
import conf
```
# Settings
```
LV_NAME = "LV5"
assert (
conf.MANUSCRIPT["BASE_DIR"] is not None
), "The manuscript directory was not configured"
OUTPUT_FILE_PATH = conf.MANUSCRIPT["CONTENT_DIR"] / "50.00.supplementary_material.md"
display(OUTPUT_FILE_PATH)
assert OUTPUT_FILE_PATH.exists()
```
# Load MultiPLIER summary
```
multiplier_model_summary = pd.read_pickle(conf.MULTIPLIER["MODEL_SUMMARY_FILE"])
multiplier_model_summary.shape
multiplier_model_summary.head()
```
# LV pathways
```
lv_pathways = multiplier_model_summary[
multiplier_model_summary["LV index"].isin((LV_NAME[2:],))
& (
(multiplier_model_summary["FDR"] < 0.05)
# | (multiplier_model_summary["AUC"] >= 0.75)
)
]
lv_pathways.shape
lv_pathways = lv_pathways[["pathway", "AUC", "FDR"]].sort_values("FDR")
lv_pathways = lv_pathways.assign(AUC=lv_pathways["AUC"].apply(lambda x: f"{x:.2f}"))
lv_pathways = lv_pathways.assign(FDR=lv_pathways["FDR"].apply(lambda x: f"{x:.2e}"))
lv_pathways = lv_pathways.rename(
columns={
"pathway": "Pathway",
}
)
lv_pathways.head()
```
## Split names
```
lv_pathways["Pathway"] = lv_pathways["Pathway"].apply(lambda x: " ".join(x.split("_")))
lv_pathways.head()
```
## Fill empty
```
if lv_pathways.shape[0] == 0:
lv_pathways.loc[0, "Pathway"] = "No pathways significantly enriched"
lv_pathways = lv_pathways.fillna("")
```
## Save
```
# result_set is either phenomexcan or emerge
LV_FILE_MARK_TEMPLATE = "<!-- {lv}:multiplier_pathways:{position} -->"
TABLE_CAPTION = (
"Table: Pathways aligned to {lv_name} from the MultiPLIER models. {table_id}"
)
TABLE_CAPTION_ID = "#tbl:sup:multiplier_pathways:{lv_name_lower_case}"
# start
lv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="start")
display(lv_file_mark_start)
# end
lv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="end")
display(lv_file_mark_end)
new_content = lv_pathways.to_markdown(index=False, disable_numparse=True)
# add table caption
table_caption = TABLE_CAPTION.format(
lv_name=LV_NAME,
table_id="{" + TABLE_CAPTION_ID.format(lv_name_lower_case=LV_NAME.lower()) + "}",
)
display(table_caption)
new_content += "\n\n" + table_caption
full_new_content = (
lv_file_mark_start + "\n" + new_content.strip() + "\n" + lv_file_mark_end
)
with open(OUTPUT_FILE_PATH, "r", encoding="utf8") as f:
file_content = f.read()
new_file_content = re.sub(
lv_file_mark_start + ".*?" + lv_file_mark_end,
full_new_content,
file_content,
flags=re.DOTALL,
)
with open(OUTPUT_FILE_PATH, "w", encoding="utf8") as f:
f.write(new_file_content) # .replace("\beta", r"\beta"))
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import re
from pathlib import Path
import pandas as pd
from entity import Trait
import conf
LV_NAME = "LV5"
assert (
conf.MANUSCRIPT["BASE_DIR"] is not None
), "The manuscript directory was not configured"
OUTPUT_FILE_PATH = conf.MANUSCRIPT["CONTENT_DIR"] / "50.00.supplementary_material.md"
display(OUTPUT_FILE_PATH)
assert OUTPUT_FILE_PATH.exists()
multiplier_model_summary = pd.read_pickle(conf.MULTIPLIER["MODEL_SUMMARY_FILE"])
multiplier_model_summary.shape
multiplier_model_summary.head()
lv_pathways = multiplier_model_summary[
multiplier_model_summary["LV index"].isin((LV_NAME[2:],))
& (
(multiplier_model_summary["FDR"] < 0.05)
# | (multiplier_model_summary["AUC"] >= 0.75)
)
]
lv_pathways.shape
lv_pathways = lv_pathways[["pathway", "AUC", "FDR"]].sort_values("FDR")
lv_pathways = lv_pathways.assign(AUC=lv_pathways["AUC"].apply(lambda x: f"{x:.2f}"))
lv_pathways = lv_pathways.assign(FDR=lv_pathways["FDR"].apply(lambda x: f"{x:.2e}"))
lv_pathways = lv_pathways.rename(
columns={
"pathway": "Pathway",
}
)
lv_pathways.head()
lv_pathways["Pathway"] = lv_pathways["Pathway"].apply(lambda x: " ".join(x.split("_")))
lv_pathways.head()
if lv_pathways.shape[0] == 0:
lv_pathways.loc[0, "Pathway"] = "No pathways significantly enriched"
lv_pathways = lv_pathways.fillna("")
# result_set is either phenomexcan or emerge
LV_FILE_MARK_TEMPLATE = "<!-- {lv}:multiplier_pathways:{position} -->"
TABLE_CAPTION = (
"Table: Pathways aligned to {lv_name} from the MultiPLIER models. {table_id}"
)
TABLE_CAPTION_ID = "#tbl:sup:multiplier_pathways:{lv_name_lower_case}"
# start
lv_file_mark_start = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="start")
display(lv_file_mark_start)
# end
lv_file_mark_end = LV_FILE_MARK_TEMPLATE.format(lv=LV_NAME, position="end")
display(lv_file_mark_end)
new_content = lv_pathways.to_markdown(index=False, disable_numparse=True)
# add table caption
table_caption = TABLE_CAPTION.format(
lv_name=LV_NAME,
table_id="{" + TABLE_CAPTION_ID.format(lv_name_lower_case=LV_NAME.lower()) + "}",
)
display(table_caption)
new_content += "\n\n" + table_caption
full_new_content = (
lv_file_mark_start + "\n" + new_content.strip() + "\n" + lv_file_mark_end
)
with open(OUTPUT_FILE_PATH, "r", encoding="utf8") as f:
file_content = f.read()
new_file_content = re.sub(
lv_file_mark_start + ".*?" + lv_file_mark_end,
full_new_content,
file_content,
flags=re.DOTALL,
)
with open(OUTPUT_FILE_PATH, "w", encoding="utf8") as f:
f.write(new_file_content) # .replace("\beta", r"\beta"))
| 0.345657 | 0.540742 |
# Testing Individual components of the FV HE scheme
```
import random
from syft.frameworks.torch.he.fv.modulus import CoeffModulus
from syft.frameworks.torch.he.fv.encryption_params import EncryptionParams
from syft.frameworks.torch.he.fv.context import Context
from syft.frameworks.torch.he.fv.integer_encoder import IntegerEncoder
from syft.frameworks.torch.he.fv.key_generator import KeyGenerator
from syft.frameworks.torch.he.fv.encryptor import Encryptor
from syft.frameworks.torch.he.fv.decryptor import Decryptor
from syft.frameworks.torch.he.fv.integer_encoder import IntegerEncoder
from syft.frameworks.torch.he.fv.modulus import SeqLevelType
from syft.frameworks.torch.he.fv.evaluator import Evaluator
```
## Keygeneration
```
poly_modulus = 64
bit_sizes= [40]
plain_modulus = 64
ctx = Context(EncryptionParams(poly_modulus, CoeffModulus().create(poly_modulus, bit_sizes), plain_modulus))
keygenerator = KeyGenerator(ctx)
sk, pk = keygenerator.keygen()
print(ctx.param.coeff_modulus)
# print(len(sk.data))
print('secret key values : ', sk.data)
# print(pk.data)
# print('public key values : ', pk.data)
```
## Integer Encoder
Encodes Integer values to Plaintext object
```
int_encoder = IntegerEncoder(ctx)
ri1 = random.randint(0,10)
ri2 = random.randint(0,10)
ri3 = random.randint(0,10)
pt1 = int_encoder.encode(ri1)
pt2 = int_encoder.encode(ri2)
pt3 = int_encoder.encode(ri3)
print(pt1.data," ", pt2.data, " ", pt3.data)
# print('plaintext data',plaintext.data)
```
### Decodes back to Integer
```
print(int_encoder.decode(pt1))
print(int_encoder.decode(pt2))
print(int_encoder.decode(pt3))
```
## Encrypter
Encrypt Plaintext to ciphertext using public_key
```
encrypter = Encryptor(ctx, pk)
ct1 = encrypter.encrypt(pt1)
ct2 = encrypter.encrypt(pt2)
ct3 = encrypter.encrypt(pt3)
```
Encrypt Plaintext to ciphertext using secret_key
## Decryptor
Decrypt Ciphertext to Plaintext using secret_key
```
decrypter = Decryptor(ctx, sk)
dec1 = decrypter.decrypt(ct1)
dec2 = decrypter.decrypt(ct2)
dec3 = decrypter.decrypt(ct3)
print(int_encoder.decode(dec1), " ", int_encoder.decode(dec2), " ", int_encoder.decode(dec3))
```
## Evaluator
```
eval = Evaluator(ctx)
cc12 = eval.add(ct1, ct2)
cc12 = decrypter.decrypt(cc12)
print(int_encoder.decode(cc12))
pc12 = eval.add(pt1, ct2)
pc12 = decrypter.decrypt(pc12)
print(int_encoder.decode(pc12))
pp12 = eval.add(pt1, pt2)
print(int_encoder.decode(pp12))
```
### Verify result
```
assert int_encoder.decode(cc12) == int_encoder.decode(pc12) == int_encoder.decode(pp12) == ri1+ri2
result = eval._mul_cipher_cipher(ct1, ct2)
print("\n\nct1 :",ct1.data)
print("\n\nct2 :",ct2.data)
print('\n\n')
result = decrypter.decrypt(result)
result = int_encoder.decode(result)
print('final result: ', result)
print(ri1 * ri2, " ", result)
assert ri1 * ri2 == result
```
## Try Relinearization operation
```
poly_modulus = 64
bit_sizes = [40, 40]
plain_modulus = 64
ctx = Context(EncryptionParams(poly_modulus, CoeffModulus().create(poly_modulus, bit_sizes), plain_modulus))
keygenerator = KeyGenerator(ctx)
sk, pk = keygenerator.keygen()
relin_keys = keygenerator.get_relin_keys()
# relin_keys
int_encoder = IntegerEncoder(ctx)
a = int_encoder.encode(10)
b = int_encoder.encode(19)
c = int_encoder.encode(26)
encrypter = Encryptor(ctx, pk)
decrypter = Decryptor(ctx, sk)
eval = Evaluator(ctx)
relin_prod_ab = eval.relin(eval.mul(encrypter.encrypt(a), encrypter.encrypt(b)), relin_keys)
print(int_encoder.decode(decrypter.decrypt(relin_prod_ab)))
assert len(relin_prod_ab.data) == 2
relin_prod_abc = eval.relin(eval.mul(relin_prod_ab, encrypter.encrypt(c)), relin_keys)
print(int_encoder.decode(decrypter.decrypt(relin_prod_abc)))
assert len(relin_prod_abc.data) == 2
final = eval.relin(eval.mul(relin_prod_ab, relin_prod_abc), relin_keys)
print(int_encoder.decode(decrypter.decrypt(final)))
assert len(final.data) == 2
```
|
github_jupyter
|
import random
from syft.frameworks.torch.he.fv.modulus import CoeffModulus
from syft.frameworks.torch.he.fv.encryption_params import EncryptionParams
from syft.frameworks.torch.he.fv.context import Context
from syft.frameworks.torch.he.fv.integer_encoder import IntegerEncoder
from syft.frameworks.torch.he.fv.key_generator import KeyGenerator
from syft.frameworks.torch.he.fv.encryptor import Encryptor
from syft.frameworks.torch.he.fv.decryptor import Decryptor
from syft.frameworks.torch.he.fv.integer_encoder import IntegerEncoder
from syft.frameworks.torch.he.fv.modulus import SeqLevelType
from syft.frameworks.torch.he.fv.evaluator import Evaluator
poly_modulus = 64
bit_sizes= [40]
plain_modulus = 64
ctx = Context(EncryptionParams(poly_modulus, CoeffModulus().create(poly_modulus, bit_sizes), plain_modulus))
keygenerator = KeyGenerator(ctx)
sk, pk = keygenerator.keygen()
print(ctx.param.coeff_modulus)
# print(len(sk.data))
print('secret key values : ', sk.data)
# print(pk.data)
# print('public key values : ', pk.data)
int_encoder = IntegerEncoder(ctx)
ri1 = random.randint(0,10)
ri2 = random.randint(0,10)
ri3 = random.randint(0,10)
pt1 = int_encoder.encode(ri1)
pt2 = int_encoder.encode(ri2)
pt3 = int_encoder.encode(ri3)
print(pt1.data," ", pt2.data, " ", pt3.data)
# print('plaintext data',plaintext.data)
print(int_encoder.decode(pt1))
print(int_encoder.decode(pt2))
print(int_encoder.decode(pt3))
encrypter = Encryptor(ctx, pk)
ct1 = encrypter.encrypt(pt1)
ct2 = encrypter.encrypt(pt2)
ct3 = encrypter.encrypt(pt3)
decrypter = Decryptor(ctx, sk)
dec1 = decrypter.decrypt(ct1)
dec2 = decrypter.decrypt(ct2)
dec3 = decrypter.decrypt(ct3)
print(int_encoder.decode(dec1), " ", int_encoder.decode(dec2), " ", int_encoder.decode(dec3))
eval = Evaluator(ctx)
cc12 = eval.add(ct1, ct2)
cc12 = decrypter.decrypt(cc12)
print(int_encoder.decode(cc12))
pc12 = eval.add(pt1, ct2)
pc12 = decrypter.decrypt(pc12)
print(int_encoder.decode(pc12))
pp12 = eval.add(pt1, pt2)
print(int_encoder.decode(pp12))
assert int_encoder.decode(cc12) == int_encoder.decode(pc12) == int_encoder.decode(pp12) == ri1+ri2
result = eval._mul_cipher_cipher(ct1, ct2)
print("\n\nct1 :",ct1.data)
print("\n\nct2 :",ct2.data)
print('\n\n')
result = decrypter.decrypt(result)
result = int_encoder.decode(result)
print('final result: ', result)
print(ri1 * ri2, " ", result)
assert ri1 * ri2 == result
poly_modulus = 64
bit_sizes = [40, 40]
plain_modulus = 64
ctx = Context(EncryptionParams(poly_modulus, CoeffModulus().create(poly_modulus, bit_sizes), plain_modulus))
keygenerator = KeyGenerator(ctx)
sk, pk = keygenerator.keygen()
relin_keys = keygenerator.get_relin_keys()
# relin_keys
int_encoder = IntegerEncoder(ctx)
a = int_encoder.encode(10)
b = int_encoder.encode(19)
c = int_encoder.encode(26)
encrypter = Encryptor(ctx, pk)
decrypter = Decryptor(ctx, sk)
eval = Evaluator(ctx)
relin_prod_ab = eval.relin(eval.mul(encrypter.encrypt(a), encrypter.encrypt(b)), relin_keys)
print(int_encoder.decode(decrypter.decrypt(relin_prod_ab)))
assert len(relin_prod_ab.data) == 2
relin_prod_abc = eval.relin(eval.mul(relin_prod_ab, encrypter.encrypt(c)), relin_keys)
print(int_encoder.decode(decrypter.decrypt(relin_prod_abc)))
assert len(relin_prod_abc.data) == 2
final = eval.relin(eval.mul(relin_prod_ab, relin_prod_abc), relin_keys)
print(int_encoder.decode(decrypter.decrypt(final)))
assert len(final.data) == 2
| 0.255901 | 0.745143 |
# Tutorial 2: Entanglement Forged VQE for the $H_2O$ molecule
In this tutorial, we apply Entanglement Forged VQE to a $H_2O$ molecule. We follow the same format as Tutorial 1 for the $H_2$ molecule, but we also simplify the problem by freezing (removing) some orbitals.
**What new here?**
- Freezing orbitals by specifying the `orbitals_to_reduce` parameter.
- Defing the ansatz using parametrized gates.
- Specifying `spsa_c0` in `EntanglementForgedConfig` to help with conversion.
For comparison, you may wish to review how the ansatz was defined as a `TwoLocal` object, for the case of the $H_2$ molecule, in Tutorial 1.
### Importing the relevant modules
```
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from matplotlib import pyplot as plt
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import PySCFDriver
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit import Aer
import sys
sys.path.append('../../')
from entanglement_forging import EntanglementForgedGroundStateSolver
from entanglement_forging import EntanglementForgedConfig
```
## Setting up the problem (the chemistry)
We start by setting up the chemical problem. Here, we are considering an H2O molecule. We will put the O atom at the origin and specify the positions of the two H atoms by giving their distance from the origin (radius) and the angle between them.
```
radius_1 = 0.958 # position for the first H atom
radius_2 = 0.958 # position for the second H atom
thetas_in_deg = 104.478 # bond angles.
H1_x = radius_1
H2_x = radius_2*np.cos(np.pi/180 * thetas_in_deg)
H2_y = radius_2*np.sin(np.pi/180 * thetas_in_deg)
molecule = Molecule(geometry=[['O', [0., 0., 0.]],
['H', [H1_x, 0., 0.]],
['H', [H2_x, H2_y, 0.0]]], charge=0, multiplicity=1)
driver = PySCFDriver.from_molecule(molecule = molecule, basis='sto6g')
problem = ElectronicStructureProblem(driver)
converter = QubitConverter(JordanWignerMapper())
```
### Classical Result
For comparison, we also compute the classical result.
```
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver, NumPyMinimumEigensolverFactory
solver = GroundStateEigensolver(converter, NumPyMinimumEigensolverFactory(use_default_filter_criterion=False))
result = solver.solve(problem)
print('Classical energy = ', result.total_energies[0])
```
## Simplifying the problem (freezing orbitals)
Freezing some orbitals can reduce the execultion time (for discussion of scaling and orbital freezing, refer to the Explanatory Material in the documentation). For water, we freeze orbitals 0 and 3.
```
orbitals_to_reduce = [0,3]
```
## Preparing the bitstrings
For a discussion on picking the bitstrings, refer to the Exploratory Material.
```
from entanglement_forging import reduce_bitstrings
bitstrings = [[1,1,1,1,1,0,0],[1,0,1,1,1,0,1],[1,0,1,1,1,1,0]]
reduced_bitstrings = reduce_bitstrings(bitstrings, orbitals_to_reduce)
print(f'Bitstrings: {bitstrings}')
print(f'Bitstrings after orbital reduction: {reduced_bitstrings}')
```
## Preparing the ansatz
Here, we construct the variational form that was used in (https://arxiv.org/abs/2104.10220). The variational form is constructed from several Hop Gates, which we define first.
```
from qiskit.circuit import Parameter, QuantumCircuit
theta = Parameter('θ')
hop_gate = QuantumCircuit(2, name="Hop gate")
hop_gate.h(0)
hop_gate.cx(1, 0)
hop_gate.cx(0, 1)
hop_gate.ry(-theta, 0)
hop_gate.ry(-theta, 1)
hop_gate.cx(0, 1)
hop_gate.h(0)
hop_gate.draw()
theta_1, theta_2, theta_3, theta_4 = Parameter('θ1'), Parameter('θ2'), Parameter('θ3'), Parameter('θ4')
ansatz = QuantumCircuit(5)
ansatz.append(hop_gate.to_gate({theta: theta_1}), [0, 1])
ansatz.append(hop_gate.to_gate({theta: theta_2}), [3, 4])
ansatz.append(hop_gate.to_gate({theta: 0}), [1, 4])
ansatz.append(hop_gate.to_gate({theta: theta_3}), [0, 2])
ansatz.append(hop_gate.to_gate({theta: theta_4}), [3, 4])
ansatz.draw('text', justify='right', fold=-1)
```
## Running the Forged VQE algorithm
### Statevector simulator
We first create an object that contains all the configuration settings, such as the backend, as well as other execution settings.
```
from entanglement_forging import Log
Log.VERBOSE = False
backend = Aer.get_backend('statevector_simulator')
config = EntanglementForgedConfig(backend = backend, maxiter = 350, spsa_c0 = 20*np.pi, initial_params=[0,0,0,0])
```
We are now ready to run the calculation and print the results.
```
calc = EntanglementForgedGroundStateSolver(converter, ansatz, reduced_bitstrings, config, orbitals_to_reduce)
res = calc.solve(problem)
res
print('Energies (from only one paramset in each iteration):')
plt.plot([e[0] for e in res.get_energies_history()])
plt.show()
print('Schmidts (from only one paramset in each iteration):')
plt.plot([s[0] for s in res.get_schmidts_history()])
plt.show()
print('Parameters (from only one paramset in each iteration):')
plt.plot([p[0] for p in res.get_parameters_history()])
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
|
github_jupyter
|
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from matplotlib import pyplot as plt
from qiskit_nature.drivers import Molecule
from qiskit_nature.drivers.second_quantization import PySCFDriver
from qiskit_nature.problems.second_quantization import ElectronicStructureProblem
from qiskit_nature.mappers.second_quantization import JordanWignerMapper
from qiskit_nature.converters.second_quantization import QubitConverter
from qiskit import Aer
import sys
sys.path.append('../../')
from entanglement_forging import EntanglementForgedGroundStateSolver
from entanglement_forging import EntanglementForgedConfig
radius_1 = 0.958 # position for the first H atom
radius_2 = 0.958 # position for the second H atom
thetas_in_deg = 104.478 # bond angles.
H1_x = radius_1
H2_x = radius_2*np.cos(np.pi/180 * thetas_in_deg)
H2_y = radius_2*np.sin(np.pi/180 * thetas_in_deg)
molecule = Molecule(geometry=[['O', [0., 0., 0.]],
['H', [H1_x, 0., 0.]],
['H', [H2_x, H2_y, 0.0]]], charge=0, multiplicity=1)
driver = PySCFDriver.from_molecule(molecule = molecule, basis='sto6g')
problem = ElectronicStructureProblem(driver)
converter = QubitConverter(JordanWignerMapper())
from qiskit_nature.algorithms.ground_state_solvers import GroundStateEigensolver, NumPyMinimumEigensolverFactory
solver = GroundStateEigensolver(converter, NumPyMinimumEigensolverFactory(use_default_filter_criterion=False))
result = solver.solve(problem)
print('Classical energy = ', result.total_energies[0])
orbitals_to_reduce = [0,3]
from entanglement_forging import reduce_bitstrings
bitstrings = [[1,1,1,1,1,0,0],[1,0,1,1,1,0,1],[1,0,1,1,1,1,0]]
reduced_bitstrings = reduce_bitstrings(bitstrings, orbitals_to_reduce)
print(f'Bitstrings: {bitstrings}')
print(f'Bitstrings after orbital reduction: {reduced_bitstrings}')
from qiskit.circuit import Parameter, QuantumCircuit
theta = Parameter('θ')
hop_gate = QuantumCircuit(2, name="Hop gate")
hop_gate.h(0)
hop_gate.cx(1, 0)
hop_gate.cx(0, 1)
hop_gate.ry(-theta, 0)
hop_gate.ry(-theta, 1)
hop_gate.cx(0, 1)
hop_gate.h(0)
hop_gate.draw()
theta_1, theta_2, theta_3, theta_4 = Parameter('θ1'), Parameter('θ2'), Parameter('θ3'), Parameter('θ4')
ansatz = QuantumCircuit(5)
ansatz.append(hop_gate.to_gate({theta: theta_1}), [0, 1])
ansatz.append(hop_gate.to_gate({theta: theta_2}), [3, 4])
ansatz.append(hop_gate.to_gate({theta: 0}), [1, 4])
ansatz.append(hop_gate.to_gate({theta: theta_3}), [0, 2])
ansatz.append(hop_gate.to_gate({theta: theta_4}), [3, 4])
ansatz.draw('text', justify='right', fold=-1)
from entanglement_forging import Log
Log.VERBOSE = False
backend = Aer.get_backend('statevector_simulator')
config = EntanglementForgedConfig(backend = backend, maxiter = 350, spsa_c0 = 20*np.pi, initial_params=[0,0,0,0])
calc = EntanglementForgedGroundStateSolver(converter, ansatz, reduced_bitstrings, config, orbitals_to_reduce)
res = calc.solve(problem)
res
print('Energies (from only one paramset in each iteration):')
plt.plot([e[0] for e in res.get_energies_history()])
plt.show()
print('Schmidts (from only one paramset in each iteration):')
plt.plot([s[0] for s in res.get_schmidts_history()])
plt.show()
print('Parameters (from only one paramset in each iteration):')
plt.plot([p[0] for p in res.get_parameters_history()])
plt.show()
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
| 0.505859 | 0.985229 |
```
%config IPCompleter.greedy=True
%matplotlib inline
# Import the dependencies.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# Import the requests library.
import requests
# Import the API key.
from config import weather_api_key
# Import the datetime module from the datetime library.
from datetime import datetime
# Starting URL for Weather Map API Call.
url = f"http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID={weather_api_key}"
print("Yes")
# Create a set of random latitude and longitude combinations.
lats = np.random.uniform(low=-90.000, high=90.000, size=2000)
lngs = np.random.uniform(low=-180.000, high=180.000, size=2000)
lat_lngs = zip(lats, lngs)
lat_lngs
# Use the citipy module to determine city based on latitude and longitude.
from citipy import citipy
# Create a list for holding the cities.
cities = []
# Identify the nearest city for each latitude and longitude combination.
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then we will add it to the cities list.
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count.
len(cities)
# Create an empty list to hold the weather data.
city_data = []
# Print the beginning of the logging.
print("Beginning Data Retrieval ")
print("-----------------------------")
# Create counters.
record_count = 1
set_count = 1
# Loop through all the cities in the list.
for i, city in enumerate(cities):
# Group cities in sets of 50 for logging purposes.
if (i % 50 == 0 and i >= 50):
set_count += 1
record_count = 1
# Create endpoint URL with each city.
city_url = url + "&q=" + city.replace(" ","+")
# Log the URL, record, and set numbers and the city.
print(f"Processing Record {record_count} of Set {set_count} | {city}")
# Add 1 to the record count.
record_count += 1
# Run an API request for each of the cities.
try:
# Parse the JSON and retrieve data.
city_weather = requests.get(city_url).json()
# Parse out the needed data.
city_lat = city_weather["coord"]["lat"]
city_lng = city_weather["coord"]["lon"]
city_max_temp = city_weather["main"]["temp_max"]
city_humidity = city_weather["main"]["humidity"]
city_clouds = city_weather["clouds"]["all"]
city_wind = city_weather["wind"]["speed"]
city_country = city_weather["sys"]["country"]
weather_description = city_weather["weather"][0]["description"]
# Convert the date to ISO standard.
city_date = datetime.utcfromtimestamp(city_weather["dt"]).strftime('%Y-%m-%d %H:%M:%S')
# Append the city information into city_data list.
city_data.append({"City": city.title(),
"Lat": city_lat,
"Lng": city_lng,
"Max Temp": city_max_temp,
"Humidity": city_humidity,
"Cloudiness": city_clouds,
"Wind Speed": city_wind,
"Current Description": weather_description,
"Country": city_country,
"Date": city_date})
# If an error is experienced, skip the city.
except:
print("City not found. Skipping...")
pass
# Indicate that Data Loading is complete.
print("-----------------------------")
print("Data Retrieval Complete ")
print("-----------------------------")
# Convert the array of dictionaries to a Pandas DataFrame.
city_data_df = pd.DataFrame(city_data)
city_data_df.head(10)
# Create the output file (CSV).
output_data_file = "weather_data/weatherpy_challenge.csv"
# Export the City_Data into a CSV.
city_data_df.to_csv(output_data_file, index_label="City_ID")
```
|
github_jupyter
|
%config IPCompleter.greedy=True
%matplotlib inline
# Import the dependencies.
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# Import the requests library.
import requests
# Import the API key.
from config import weather_api_key
# Import the datetime module from the datetime library.
from datetime import datetime
# Starting URL for Weather Map API Call.
url = f"http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID={weather_api_key}"
print("Yes")
# Create a set of random latitude and longitude combinations.
lats = np.random.uniform(low=-90.000, high=90.000, size=2000)
lngs = np.random.uniform(low=-180.000, high=180.000, size=2000)
lat_lngs = zip(lats, lngs)
lat_lngs
# Use the citipy module to determine city based on latitude and longitude.
from citipy import citipy
# Create a list for holding the cities.
cities = []
# Identify the nearest city for each latitude and longitude combination.
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then we will add it to the cities list.
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count.
len(cities)
# Create an empty list to hold the weather data.
city_data = []
# Print the beginning of the logging.
print("Beginning Data Retrieval ")
print("-----------------------------")
# Create counters.
record_count = 1
set_count = 1
# Loop through all the cities in the list.
for i, city in enumerate(cities):
# Group cities in sets of 50 for logging purposes.
if (i % 50 == 0 and i >= 50):
set_count += 1
record_count = 1
# Create endpoint URL with each city.
city_url = url + "&q=" + city.replace(" ","+")
# Log the URL, record, and set numbers and the city.
print(f"Processing Record {record_count} of Set {set_count} | {city}")
# Add 1 to the record count.
record_count += 1
# Run an API request for each of the cities.
try:
# Parse the JSON and retrieve data.
city_weather = requests.get(city_url).json()
# Parse out the needed data.
city_lat = city_weather["coord"]["lat"]
city_lng = city_weather["coord"]["lon"]
city_max_temp = city_weather["main"]["temp_max"]
city_humidity = city_weather["main"]["humidity"]
city_clouds = city_weather["clouds"]["all"]
city_wind = city_weather["wind"]["speed"]
city_country = city_weather["sys"]["country"]
weather_description = city_weather["weather"][0]["description"]
# Convert the date to ISO standard.
city_date = datetime.utcfromtimestamp(city_weather["dt"]).strftime('%Y-%m-%d %H:%M:%S')
# Append the city information into city_data list.
city_data.append({"City": city.title(),
"Lat": city_lat,
"Lng": city_lng,
"Max Temp": city_max_temp,
"Humidity": city_humidity,
"Cloudiness": city_clouds,
"Wind Speed": city_wind,
"Current Description": weather_description,
"Country": city_country,
"Date": city_date})
# If an error is experienced, skip the city.
except:
print("City not found. Skipping...")
pass
# Indicate that Data Loading is complete.
print("-----------------------------")
print("Data Retrieval Complete ")
print("-----------------------------")
# Convert the array of dictionaries to a Pandas DataFrame.
city_data_df = pd.DataFrame(city_data)
city_data_df.head(10)
# Create the output file (CSV).
output_data_file = "weather_data/weatherpy_challenge.csv"
# Export the City_Data into a CSV.
city_data_df.to_csv(output_data_file, index_label="City_ID")
| 0.49048 | 0.40754 |
```
from numpy import genfromtxt
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
sentences = pd.read_csv('../data/processed/temple_radio_1_2_sentences_with_translation.csv')
sent_embeddings = genfromtxt('../data/processed/temple_radio_1_2_sentence_embeddings.csv', delimiter=',')
pos_sent = sentences[(sentences['Translation'] == 'The liver is enlarged.')]
pos_sent_embeddings = sent_embeddings[pos_sent.index, :]
neg_sent = sentences.drop(pos_sent.index)
neg_sent_embeddings = sent_embeddings[neg_sent.index, :]
neg_sent_embeddings = sent_embeddings[neg_sent.index, :]
pos_sent = pos_sent.reset_index(drop=True)
neg_sent = neg_sent.reset_index(drop=True)
new_sent_df = pd.concat([pos_sent, neg_sent]).reset_index(drop=True)
y_pos = [1 for p in range(len(pos_sent_embeddings))]
y_neg = [0 for n in range(len(neg_sent_embeddings))]
pos_df = pd.DataFrame(pos_sent_embeddings)
pos_df['class'] = y_pos
neg_df = pd.DataFrame(neg_sent_embeddings)
neg_df['class'] = y_neg
pos_df = pos_df.reset_index(drop=True)
neg_df = neg_df.reset_index(drop=True)
new_df = pd.concat([pos_df, neg_df]).reset_index(drop=True)
new_df['sentence'] = new_sent_df['Sentence']
new_df['translation'] = new_sent_df['Translation']
new_df = new_df.sample(frac=1).reset_index(drop=True)
y = new_df[["class"]]
X = new_df.drop(["class"], axis = 1)
skf = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
acc_scores, f1_scores = [], []
i = 0
conf_scores = []
for train, test in skf.split(X, y): # Provides train/test indices to split data in train/test sets.
clf = LogisticRegression(random_state=0, max_iter=1000).fit(X.drop(["sentence", "translation"], axis = 1).loc[train], y.loc[train].values.ravel())
y_pred = clf.predict(X.drop(["sentence", "translation"], axis = 1).loc[test])
df_skf = pd.DataFrame(X[['sentence', 'translation']].loc[test])
df_skf['y_true'] = y.loc[test]
df_skf['pred'] = y_pred
df_skf.to_csv(f"../data/processed/classification_results/liver_is_enlarged_result_{i}.csv", index=False)
acc = accuracy_score(y.loc[test], y_pred)
f1 = f1_score(y.loc[test], y_pred)
acc_scores.append(round(acc, 4))
f1_scores.append(round(f1, 4))
conf_scores.append(confusion_matrix(y.loc[test], y_pred))
i += 1
print(f"confusion matrix score:\n{sum(conf_scores)}")
print(f"Acc scores: {acc_scores}\nMean acc: {sum(acc_scores)/len(acc_scores):.4f}\n")
print(f"F1 scores: {f1_scores}\nMean f1: {sum(f1_scores)/len(f1_scores):.4f}\n")
```
|
github_jupyter
|
from numpy import genfromtxt
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
sentences = pd.read_csv('../data/processed/temple_radio_1_2_sentences_with_translation.csv')
sent_embeddings = genfromtxt('../data/processed/temple_radio_1_2_sentence_embeddings.csv', delimiter=',')
pos_sent = sentences[(sentences['Translation'] == 'The liver is enlarged.')]
pos_sent_embeddings = sent_embeddings[pos_sent.index, :]
neg_sent = sentences.drop(pos_sent.index)
neg_sent_embeddings = sent_embeddings[neg_sent.index, :]
neg_sent_embeddings = sent_embeddings[neg_sent.index, :]
pos_sent = pos_sent.reset_index(drop=True)
neg_sent = neg_sent.reset_index(drop=True)
new_sent_df = pd.concat([pos_sent, neg_sent]).reset_index(drop=True)
y_pos = [1 for p in range(len(pos_sent_embeddings))]
y_neg = [0 for n in range(len(neg_sent_embeddings))]
pos_df = pd.DataFrame(pos_sent_embeddings)
pos_df['class'] = y_pos
neg_df = pd.DataFrame(neg_sent_embeddings)
neg_df['class'] = y_neg
pos_df = pos_df.reset_index(drop=True)
neg_df = neg_df.reset_index(drop=True)
new_df = pd.concat([pos_df, neg_df]).reset_index(drop=True)
new_df['sentence'] = new_sent_df['Sentence']
new_df['translation'] = new_sent_df['Translation']
new_df = new_df.sample(frac=1).reset_index(drop=True)
y = new_df[["class"]]
X = new_df.drop(["class"], axis = 1)
skf = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
acc_scores, f1_scores = [], []
i = 0
conf_scores = []
for train, test in skf.split(X, y): # Provides train/test indices to split data in train/test sets.
clf = LogisticRegression(random_state=0, max_iter=1000).fit(X.drop(["sentence", "translation"], axis = 1).loc[train], y.loc[train].values.ravel())
y_pred = clf.predict(X.drop(["sentence", "translation"], axis = 1).loc[test])
df_skf = pd.DataFrame(X[['sentence', 'translation']].loc[test])
df_skf['y_true'] = y.loc[test]
df_skf['pred'] = y_pred
df_skf.to_csv(f"../data/processed/classification_results/liver_is_enlarged_result_{i}.csv", index=False)
acc = accuracy_score(y.loc[test], y_pred)
f1 = f1_score(y.loc[test], y_pred)
acc_scores.append(round(acc, 4))
f1_scores.append(round(f1, 4))
conf_scores.append(confusion_matrix(y.loc[test], y_pred))
i += 1
print(f"confusion matrix score:\n{sum(conf_scores)}")
print(f"Acc scores: {acc_scores}\nMean acc: {sum(acc_scores)/len(acc_scores):.4f}\n")
print(f"F1 scores: {f1_scores}\nMean f1: {sum(f1_scores)/len(f1_scores):.4f}\n")
| 0.449634 | 0.520009 |
# Directed, Polar Heat Diffusion
```
import random
import sys
import time
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from itertools import product
from typing import Optional
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from IPython.display import Markdown
from sklearn.preprocessing import normalize
from tqdm import tqdm_notebook as tqdm
%matplotlib inline
mpl.rcParams['figure.figsize'] = [8.0, 3.0]
print(time.asctime())
print(sys.version)
# My favorite seed
np.random.seed(127)
random.seed(127)
def draw(graph):
edges = graph.edges()
pos = nx.spring_layout(graph)
colors = []
for (u,v,attrib_dict) in list(graph.edges.data()):
colors.append('blue' if attrib_dict['weight'] == 1 else 'red')
nx.draw(graph, pos=pos, edges=edges, edge_color=colors, node_size=60)
def assign_bernoulli_polarity(graph, p:float = 0.5) -> None:
"""Bigger probability means more positive edges."""
for u, v, k in graph.edges(keys=True):
graph.edges[u, v, k]['weight'] = 1 if random.random() < p else -1
# Insulation parameters to check
alphas = (
0.1,
0.01,
0.001,
)
n_subplots_x = 3
n_subplots_y = int((1 + len(alphas)) / n_subplots_x)
n_subplots_x, n_subplots_y
```
# Definitions
Definitions:
Directed graph $G$ is a defined as:
$G = (V, E)$
Where edges $E$ are a subset of pairs of verticies $V$:
$E \subseteq V \times V$
Edges $(V_i, V_j) \in E$ are weighted according to weighting function $w$
$w: V \times V \to \{-1, 0, 1\}$
where edges with positive polarity have weight $w(V_i, V_j) = 1$, negative polarity have weight of $w(V_i, V_j) = -1$, and missing from the graph have $w(V_i, V_j) = 0$. More succinctly, the weights can be represented with weight matrix $W$ defined as
$W_{i,j} = w(V_i, V_j)$
Nodes have initial heats represented as vector $h^0 \in \mathbb{R}^{|V|}$
# Exploration of Update Strategies
## Strategy 1: Update with L1 Norm and Insulation
Heat flows through the out-edges of $V_i$ divided evenly among its neighbors. This first means that $W$ must be row-wise normalized (the "[L1-norm](https://en.wikipedia.org/w/index.php?title=Norm_(mathematics)§ion=4#Absolute-value_norm)"). It can be redefined as:
$W_{i,j} = \frac{w(V_i, V_j)}{\sum_{k=0}^{|V|} w(V_i, V_k)}$
Luckily, [`sklearn.preprocessing.normalize`](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html) does the trick.
However, only percentage, $\alpha$, of the heat on a given node is allowed to flow at any given step. The remaining percentage of the heat ($1 - \alpha$) stays.
### Derivations and Musings
Heat flows through the *out-edges* of $V_i$ divided evenly among its neighbors.
$\delta_{in}^t(i) = \sum_{j=1}^{|V|} h_j^t W_{j, i} = h^t W_{., i}$
$\delta_{out}^t(i) = \sum_{j=1}^{|V|} h_i^t W_{i, j}$
$\delta^t(i) = \delta_{in}^t(i) - \delta_{out}^t(i)$
Using step size $\alpha$, the new heat at time point $t + 1$ is
$h^{t+1}_i = (1 - \alpha) h^t_i + \alpha \delta^t(i)$
Therefore
$h^{t+1} = (1 - \alpha) h^t + \alpha \delta^t$
```
class BaseDiffuser(ABC):
def __init__(self, graph: nx.DiGraph, alpha: float, steps: Optional[int] = None) -> None:
self.alpha = alpha
self.deltas = []
self.heats = []
self.steps = steps or int(30 / self.alpha)
self.weights = self.calculate_weights(graph)
@staticmethod
@abstractmethod
def calculate_weights(graph):
raise NotImplementedError
@abstractmethod
def run(self, heat, tqdm_kwargs=None) -> None:
raise NotImplementedError
def _plot_diffusion_title(self):
return f'Diffusion ($\\alpha={self.alpha}$)'
def plot(self, heat_plt_kwargs=None, deriv_plt_kwargs=None) -> None:
fig, (lax, rax) = plt.subplots(1, 2)
lax.set_title(self._plot_diffusion_title())
lax.set_ylabel('Heat')
lax.set_xlabel('Time')
pd.DataFrame(self.heats).plot.line(ax=lax, logx=True, **(heat_plt_kwargs or {}))
rax.set_title('Derivative of Sum of Absolute Heats')
rax.set_ylabel('Change in Sum of Absolute Heats')
rax.set_xlabel('Time')
derivative = [
(x2 - x1)
for x1, x2 in zip(self.deltas, self.deltas[1:])
]
pd.DataFrame(derivative).plot.line(ax=rax, logx=True, legend=False, **(deriv_plt_kwargs or {}))
plt.tight_layout(rect=[0, 0, 1, 0.95])
return fig, (lax, rax)
@staticmethod
def optimize_alpha_multirun(graph, alphas, heat):
alpha_heats = {}
alpha_deltas = {}
for alpha in alphas:
diffuser = Diffuser(graph, alpha)
diffuser.run(heat)
alpha_heats[alpha] = diffuser.heats
alpha_deltas[alpha] = diffuser.deltas
return alpha_deltas, alpha_heats
@classmethod
def optimize_alpha_multiplot(cls, graph, alphas, heat, heat_plt_kwargs=None, deriv_plt_kwargs=None):
ds, hs = cls.optimize_alpha_multirun(graph, alphas, heat)
cls._optimize_alpha_multiplot_helper(hs, plt_kwargs=heat_plt_kwargs)
cls._optimize_alpha_multiplot_deriv_helper(ds, plt_kwargs=deriv_plt_kwargs)
@staticmethod
def _optimize_alpha_multiplot_helper(hs, plt_kwargs=None):
fig, axes = plt.subplots(n_subplots_y, n_subplots_x)
for alpha, ax in zip(alphas, axes.ravel()):
ax.set_title(f'$\\alpha={alpha}$')
ax.set_ylabel('Heat')
ax.set_xlabel('Time')
pd.DataFrame(hs[alpha]).plot.line(ax=ax, logx=True, **(plt_kwargs or {}))
plt.suptitle(f'Diffusion ($\\alpha={alpha}$)')
plt.tight_layout(rect=[0, 0, 1, 0.95])
@staticmethod
def _optimize_alpha_multiplot_deriv_helper(ds, plt_kwargs=None):
fig, axes = plt.subplots(n_subplots_y, n_subplots_x)
for alpha, ax in zip(ds, axes.ravel()):
ax.set_title(f'$\\alpha={alpha}$')
ax.set_ylabel('Change in Sum of Heats')
ax.set_xlabel('Time')
derivative = [
(x2 - x1)
for x1, x2 in zip(ds[alpha], ds[alpha][1:])
]
pd.DataFrame(derivative).plot.line(ax=ax, logx=True, legend=False, **(plt_kwargs or {}))
plt.suptitle('Derivative of Sum of Absolute Heats')
plt.tight_layout(rect=[0, 0, 1, 0.95])
@classmethod
def multiplot(cls, graphs_and_heats, alpha):
for graph, init_h in graphs_and_heats:
d = cls(graph, alpha=alpha)
d.run(init_h)
fig, axes = d.plot(heat_plt_kwargs=dict(legend=False))
fig.suptitle(graph.name)
plt.show()
class InsulatedDiffuser(BaseDiffuser):
def run(self, heat, tqdm_kwargs=None) -> None:
for _ in tqdm(range(self.steps), leave=False, desc=f'alpha: {self.alpha}'):
delta = heat @ self.weights
self.deltas.append(np.sum(np.abs(delta)))
heat = (1 - self.alpha) * heat + self.alpha * delta
self.heats.append(heat)
class Diffuser(InsulatedDiffuser):
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
return normalize(adj, norm='l1')
```
### Example 1
Example 1 is a small system set up to run out of heat defined by the short set of relations
```
A -| B
A -| C
B -> C
```
with weight matrix $W$ (indexed in alphabetical order):
$W=\begin{bmatrix}
0 & -1 & -1 \\
0 & 0 & 1 \\
0 & 0 & 0
\end{bmatrix}$
```
example_1_graph = nx.DiGraph()
example_1_graph.name = 'Example 1 - Small Decreasing Graph'
example_1_graph.add_edges_from([
('A', 'B', dict(weight=-1)),
('A', 'C', dict(weight=-1)),
('B', 'C', dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_1_graph)
plt.title(f'Visualization of ${example_1_graph}$')
plt.show()
example_1_init_h = np.array([5.0, 2.0, 2.0])
Diffuser.optimize_alpha_multiplot(example_1_graph, alphas, example_1_init_h)
```
### Example 2
Diffusion on synthetic data.
- **Architecture**: directed scale-free with:
- $n=20$
- $\alpha=0.31$
- $\beta=0.64$
- $\gamma=0.05$
- **Polarity**: bernoulli with:
- $\rho=0.5$
- **Initial Heat**: normal distribution with:
- $\mu=0$
- $\sigma=1$
```
example_2_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_2_graph.name = 'Example 2 - Random Graph with Even Polarity'
assign_bernoulli_polarity(example_2_graph, p=0.5)
draw(example_2_graph)
example_2_init_h = np.random.normal(size=example_2_graph.number_of_nodes())
Diffuser.optimize_alpha_multiplot(example_2_graph, alphas, example_2_init_h, heat_plt_kwargs=dict(legend=False))
```
### Example 3
A random graph with more positive edges.
```
example_3_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_3_graph.name = 'Example 3 - Random Graph with Mostly Negative Polarity'
assign_bernoulli_polarity(example_3_graph, p=0.3)
example_3_init_h = np.random.normal(size=example_3_graph.number_of_nodes())
diffuser = Diffuser(example_3_graph, alpha=0.01)
diffuser.run(example_3_init_h)
diffuser.plot(heat_plt_kwargs=dict(legend=False))
plt.show()
```
### Example 4
A random graph with more positive edges
```
example_4_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_4_graph.name = 'Example 4 - Random Graph with Mostly Positive Polarity'
assign_bernoulli_polarity(example_4_graph, p=0.7)
example_4_init_h = np.random.normal(size=example_4_graph.number_of_nodes())
diffuser = Diffuser(example_4_graph, alpha=0.01)
diffuser.run(example_4_init_h)
diffuser.plot(heat_plt_kwargs=dict(legend=False))
plt.show()
```
### Example 5
```
example_5_graph = nx.DiGraph()
example_5_graph.name = 'Example 5 - Small Increasing Graph'
example_5_graph.add_edges_from([
(0, 1, dict(weight=+1)),
(0, 2, dict(weight=+1)),
(1, 2, dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_5_graph)
plt.title(f'Visualization of ${example_5_graph}$')
plt.show()
example_5_init_h = np.random.normal(size=example_5_graph.number_of_nodes())
diffuser = Diffuser(example_5_graph, alpha=0.01)
diffuser.run(example_5_init_h)
diffuser.plot()
plt.show()
```
## Example 6 - Chaotic Increasing System
```
example_6_graph = nx.DiGraph()
example_6_graph.name = 'Example 6 - Small Chaotic Increasing Graph'
example_6_graph.add_edges_from([
(0, 1, dict(weight=+1)),
(1, 2, dict(weight=+1)),
(2, 0, dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_6_graph)
plt.title(f'Visualization of ${example_6_graph}$')
plt.show()
example_6_init_h = np.random.normal(size=example_6_graph.number_of_nodes())
diffuser = Diffuser(example_6_graph, alpha=0.01)
diffuser.run(example_6_init_h)
diffuser.plot()
plt.show()
```
This is the first example of a system coming to a non-zero steady state! One of the reasons is any system that has a sink will always hemmorage heat out of the sink.
Some ideas on how to deal with this:
1. Scale how much heat that can go into a node based on how much heat it always has (differential equations approach)
2. Self-connect all nodes
3. Self-connect only sink nodes (ones with no out-edges)
```
example_graphs = [
(example_1_graph, example_1_init_h),
(example_2_graph, example_2_init_h),
(example_3_graph, example_3_init_h),
(example_4_graph, example_4_init_h),
(example_5_graph, example_5_init_h),
(example_6_graph, example_6_init_h),
]
```
## Strategy 2: Self-connect nodes
All nodes diffuse a bit of heat to themselves, independent of their insulation. This means that the weight matrix gets redefined to have 1's on the diagnal.
```
class SelfConnectedInsulatedDiffuser(InsulatedDiffuser):
""""""
def _plot_diffusion_title(self):
return f'Self-Connected Insulated Diffusion ($\\alpha={self.alpha}$)'
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
for i in range(adj.shape[0]):
adj[i, i] = 1.0
return normalize(adj, norm='l1')
SelfConnectedInsulatedDiffuser.multiplot(example_graphs, alpha=0.01)
```
## Strategy 3: Anti-self connectivity
```
class AntiSelfConnectedInsulatedDiffuser(InsulatedDiffuser):
""""""
def _plot_diffusion_title(self):
return f'Self-Connected Insulated Diffusion ($\\alpha={self.alpha}$)'
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
for i in range(adj.shape[0]):
adj[i, i] = -1.0
return normalize(adj, norm='l1')
AntiSelfConnectedInsulatedDiffuser.multiplot(example_graphs, alpha=0.01)
```
|
github_jupyter
|
import random
import sys
import time
from abc import ABC, abstractmethod
from collections import defaultdict
from dataclasses import dataclass
from itertools import product
from typing import Optional
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from IPython.display import Markdown
from sklearn.preprocessing import normalize
from tqdm import tqdm_notebook as tqdm
%matplotlib inline
mpl.rcParams['figure.figsize'] = [8.0, 3.0]
print(time.asctime())
print(sys.version)
# My favorite seed
np.random.seed(127)
random.seed(127)
def draw(graph):
edges = graph.edges()
pos = nx.spring_layout(graph)
colors = []
for (u,v,attrib_dict) in list(graph.edges.data()):
colors.append('blue' if attrib_dict['weight'] == 1 else 'red')
nx.draw(graph, pos=pos, edges=edges, edge_color=colors, node_size=60)
def assign_bernoulli_polarity(graph, p:float = 0.5) -> None:
"""Bigger probability means more positive edges."""
for u, v, k in graph.edges(keys=True):
graph.edges[u, v, k]['weight'] = 1 if random.random() < p else -1
# Insulation parameters to check
alphas = (
0.1,
0.01,
0.001,
)
n_subplots_x = 3
n_subplots_y = int((1 + len(alphas)) / n_subplots_x)
n_subplots_x, n_subplots_y
class BaseDiffuser(ABC):
def __init__(self, graph: nx.DiGraph, alpha: float, steps: Optional[int] = None) -> None:
self.alpha = alpha
self.deltas = []
self.heats = []
self.steps = steps or int(30 / self.alpha)
self.weights = self.calculate_weights(graph)
@staticmethod
@abstractmethod
def calculate_weights(graph):
raise NotImplementedError
@abstractmethod
def run(self, heat, tqdm_kwargs=None) -> None:
raise NotImplementedError
def _plot_diffusion_title(self):
return f'Diffusion ($\\alpha={self.alpha}$)'
def plot(self, heat_plt_kwargs=None, deriv_plt_kwargs=None) -> None:
fig, (lax, rax) = plt.subplots(1, 2)
lax.set_title(self._plot_diffusion_title())
lax.set_ylabel('Heat')
lax.set_xlabel('Time')
pd.DataFrame(self.heats).plot.line(ax=lax, logx=True, **(heat_plt_kwargs or {}))
rax.set_title('Derivative of Sum of Absolute Heats')
rax.set_ylabel('Change in Sum of Absolute Heats')
rax.set_xlabel('Time')
derivative = [
(x2 - x1)
for x1, x2 in zip(self.deltas, self.deltas[1:])
]
pd.DataFrame(derivative).plot.line(ax=rax, logx=True, legend=False, **(deriv_plt_kwargs or {}))
plt.tight_layout(rect=[0, 0, 1, 0.95])
return fig, (lax, rax)
@staticmethod
def optimize_alpha_multirun(graph, alphas, heat):
alpha_heats = {}
alpha_deltas = {}
for alpha in alphas:
diffuser = Diffuser(graph, alpha)
diffuser.run(heat)
alpha_heats[alpha] = diffuser.heats
alpha_deltas[alpha] = diffuser.deltas
return alpha_deltas, alpha_heats
@classmethod
def optimize_alpha_multiplot(cls, graph, alphas, heat, heat_plt_kwargs=None, deriv_plt_kwargs=None):
ds, hs = cls.optimize_alpha_multirun(graph, alphas, heat)
cls._optimize_alpha_multiplot_helper(hs, plt_kwargs=heat_plt_kwargs)
cls._optimize_alpha_multiplot_deriv_helper(ds, plt_kwargs=deriv_plt_kwargs)
@staticmethod
def _optimize_alpha_multiplot_helper(hs, plt_kwargs=None):
fig, axes = plt.subplots(n_subplots_y, n_subplots_x)
for alpha, ax in zip(alphas, axes.ravel()):
ax.set_title(f'$\\alpha={alpha}$')
ax.set_ylabel('Heat')
ax.set_xlabel('Time')
pd.DataFrame(hs[alpha]).plot.line(ax=ax, logx=True, **(plt_kwargs or {}))
plt.suptitle(f'Diffusion ($\\alpha={alpha}$)')
plt.tight_layout(rect=[0, 0, 1, 0.95])
@staticmethod
def _optimize_alpha_multiplot_deriv_helper(ds, plt_kwargs=None):
fig, axes = plt.subplots(n_subplots_y, n_subplots_x)
for alpha, ax in zip(ds, axes.ravel()):
ax.set_title(f'$\\alpha={alpha}$')
ax.set_ylabel('Change in Sum of Heats')
ax.set_xlabel('Time')
derivative = [
(x2 - x1)
for x1, x2 in zip(ds[alpha], ds[alpha][1:])
]
pd.DataFrame(derivative).plot.line(ax=ax, logx=True, legend=False, **(plt_kwargs or {}))
plt.suptitle('Derivative of Sum of Absolute Heats')
plt.tight_layout(rect=[0, 0, 1, 0.95])
@classmethod
def multiplot(cls, graphs_and_heats, alpha):
for graph, init_h in graphs_and_heats:
d = cls(graph, alpha=alpha)
d.run(init_h)
fig, axes = d.plot(heat_plt_kwargs=dict(legend=False))
fig.suptitle(graph.name)
plt.show()
class InsulatedDiffuser(BaseDiffuser):
def run(self, heat, tqdm_kwargs=None) -> None:
for _ in tqdm(range(self.steps), leave=False, desc=f'alpha: {self.alpha}'):
delta = heat @ self.weights
self.deltas.append(np.sum(np.abs(delta)))
heat = (1 - self.alpha) * heat + self.alpha * delta
self.heats.append(heat)
class Diffuser(InsulatedDiffuser):
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
return normalize(adj, norm='l1')
A -| B
A -| C
B -> C
example_1_graph = nx.DiGraph()
example_1_graph.name = 'Example 1 - Small Decreasing Graph'
example_1_graph.add_edges_from([
('A', 'B', dict(weight=-1)),
('A', 'C', dict(weight=-1)),
('B', 'C', dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_1_graph)
plt.title(f'Visualization of ${example_1_graph}$')
plt.show()
example_1_init_h = np.array([5.0, 2.0, 2.0])
Diffuser.optimize_alpha_multiplot(example_1_graph, alphas, example_1_init_h)
example_2_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_2_graph.name = 'Example 2 - Random Graph with Even Polarity'
assign_bernoulli_polarity(example_2_graph, p=0.5)
draw(example_2_graph)
example_2_init_h = np.random.normal(size=example_2_graph.number_of_nodes())
Diffuser.optimize_alpha_multiplot(example_2_graph, alphas, example_2_init_h, heat_plt_kwargs=dict(legend=False))
example_3_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_3_graph.name = 'Example 3 - Random Graph with Mostly Negative Polarity'
assign_bernoulli_polarity(example_3_graph, p=0.3)
example_3_init_h = np.random.normal(size=example_3_graph.number_of_nodes())
diffuser = Diffuser(example_3_graph, alpha=0.01)
diffuser.run(example_3_init_h)
diffuser.plot(heat_plt_kwargs=dict(legend=False))
plt.show()
example_4_graph = nx.scale_free_graph(n=20, alpha=.31, beta=.64, gamma=.05)
example_4_graph.name = 'Example 4 - Random Graph with Mostly Positive Polarity'
assign_bernoulli_polarity(example_4_graph, p=0.7)
example_4_init_h = np.random.normal(size=example_4_graph.number_of_nodes())
diffuser = Diffuser(example_4_graph, alpha=0.01)
diffuser.run(example_4_init_h)
diffuser.plot(heat_plt_kwargs=dict(legend=False))
plt.show()
example_5_graph = nx.DiGraph()
example_5_graph.name = 'Example 5 - Small Increasing Graph'
example_5_graph.add_edges_from([
(0, 1, dict(weight=+1)),
(0, 2, dict(weight=+1)),
(1, 2, dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_5_graph)
plt.title(f'Visualization of ${example_5_graph}$')
plt.show()
example_5_init_h = np.random.normal(size=example_5_graph.number_of_nodes())
diffuser = Diffuser(example_5_graph, alpha=0.01)
diffuser.run(example_5_init_h)
diffuser.plot()
plt.show()
example_6_graph = nx.DiGraph()
example_6_graph.name = 'Example 6 - Small Chaotic Increasing Graph'
example_6_graph.add_edges_from([
(0, 1, dict(weight=+1)),
(1, 2, dict(weight=+1)),
(2, 0, dict(weight=+1)),
])
plt.figure(figsize=(3, 3))
draw(example_6_graph)
plt.title(f'Visualization of ${example_6_graph}$')
plt.show()
example_6_init_h = np.random.normal(size=example_6_graph.number_of_nodes())
diffuser = Diffuser(example_6_graph, alpha=0.01)
diffuser.run(example_6_init_h)
diffuser.plot()
plt.show()
example_graphs = [
(example_1_graph, example_1_init_h),
(example_2_graph, example_2_init_h),
(example_3_graph, example_3_init_h),
(example_4_graph, example_4_init_h),
(example_5_graph, example_5_init_h),
(example_6_graph, example_6_init_h),
]
class SelfConnectedInsulatedDiffuser(InsulatedDiffuser):
""""""
def _plot_diffusion_title(self):
return f'Self-Connected Insulated Diffusion ($\\alpha={self.alpha}$)'
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
for i in range(adj.shape[0]):
adj[i, i] = 1.0
return normalize(adj, norm='l1')
SelfConnectedInsulatedDiffuser.multiplot(example_graphs, alpha=0.01)
class AntiSelfConnectedInsulatedDiffuser(InsulatedDiffuser):
""""""
def _plot_diffusion_title(self):
return f'Self-Connected Insulated Diffusion ($\\alpha={self.alpha}$)'
@staticmethod
def calculate_weights(graph):
adj = nx.to_numpy_array(graph)
for i in range(adj.shape[0]):
adj[i, i] = -1.0
return normalize(adj, norm='l1')
AntiSelfConnectedInsulatedDiffuser.multiplot(example_graphs, alpha=0.01)
| 0.678327 | 0.812607 |
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgAAAADhCAYAAAC+/w30AAAABmJLR0QA/wD/AP+gvaeTAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH4wEeDgYF/Qy0kwAAIABJREFUeNrsnXl4G9W5/z8zWrxNHDt29j0kBEgQMYQdwtqW0CKgtHS5XShtb1vfbrSltOpduro/StfbW3VvaaG0QEtApQ1Q9n1JIlDCEkL23fES2+NFsjTz++MckUHYia0ZW5J9vs+jR7JkHc3MOXPe77uDgoKCgoKCgoKCgoKCgoKCgkIBYSbCA75WUFBQUFBQGAfCX5EABQUFBQWvoalLUNQkwAD+AuwGvmCEYt3qqigoKCgoeAFdXYKixqnA24ELgXnqcigoKCgoKAIwPjBZPvcDvepyKCgoKCgoAjA+UCuf04Ay/ysoKCgoKAIwTlDjIAA96nIoKCgoKCgCMD5QrQiAgoKCgoIiAOPYAmCEYpliOjBVo0BBQUFBEQCF0bEAFJXwN0IxzETYr0iAgoKCgiIACt6jUj73FpOQlcL/XKDfTIR/aoRiGKGYmi0FBQUFRQAUPLYApLKCt4jwHfm8TE2TgoKCgiIAYwJFZM6uks9dxXRdzET4ZOAMIAncUWTXTEFBQUFhCPCrSwBmIqwBc4H5wFojFOvM+rkLjAr5nCyG6+S4Hl+Tz/uA3+R8pqCgoKCgLAAlgzLgv4E7gXcXifAHEQNgA31FpP0vBc6Sb99ihGJdSvtXUFBQUASgVGEBBxA+9w8BM4rkuCrksRVTGeBPICoU9gPfU0tHQUFBQRGAkoURiqWA24AMsAI4tkgOrUw+20VwjTAT4fnA2XLd/N4IxQ5mP1NQUFBQUASgVLEN4QIAuMpMhCuKwLTtkxaAziK5Rhc7yNF31JJRUFBQUARgLFgBWoG75J8fAKYUgWZbXegDcPj+a4G3AkHgLiMU26F8/woKCgqKAJQ0HILsSeA5+foTOZ8V4nhs+ShYGWAHCToROFe+/nERBUoqKCgoKCgC4E7IGaHYZuBh+XajmQhXFkLI5fymTYEbAZmJcDnwNoRF4hkgroS/goKCgiIAY80KsArYDUwEriqUFaDIUA98UL7+DUVSmEhBQUFBQREAL60ATwEJ+fZXBtDIxxsx0hG+/2nAZuAZIxSz1IpRUFBQUARgLOL3CLP7DDMRvnwcC38Q1SI/Ld9aDbzi+ExBQUFBQRGAMWUNuB1R5tYHfHa8Cn9p+TgeaADagQeMUKxfBQAqKCgoKAIwVrVegP+TzyEzET5lHGu9X5XPLwCPSoKkFoqCgoKCIgBjTvvPvvwVItitFvjweBN8svLfVOCdiLK/DxuhWJsy/SsoKCgoAjCmrQBGKNaNiHjXgLPMRHjBeLECOM7x0/L8W4Bb1MpQUFBQUARgvFgBvi+flwArx4sVwHGOH0HUIXjWCMU2jZfzV1BQUFAEQGE/8BdEMOBbzUR48njR/s1E+N1AHZBGWEIKbv1Q7gcFBQUFRQBGCxngp/L1hYiI+DEt6Bwa/oeBcqDZCMXuLqT2n70eMi6hykyEa8xEWFPLU0FBQUERgJEUhi8DjwGVwEpZFne0hHB2jiaMsvZ/nIPs/LxYtG8zEa4D7kCkJB6jVqiCgoKCIgAjiQ7gJvn6Q8DUUfztdIG0/0sRlf8Afl1I7d+h+ZcD30RUJQSYpJamgoKCQokTgFztslh8vTIbwAKeAF4FpkgrgDZKx9iDiMIPjuI5TwDOlr95txGKNRdyPsxEOLsePgV8Ur79LeBpFROgoKCgUMIEIFtVzkyE/WYiPNnxdzFdp1cQZXABvjTKv60BZaNIwpYDy+TrHxRS+3dUHFwJfFeu1xuB7xuhWEbdvgoKCgolTACksA8CX0CU3/13MxHWiiHdzNEgyALuQ2QFHAVcNErHd1ASgMBonStwGjAd2A6sLQJiOB+4TZKgx4GvG6FYZyGJiYKCgoIiAN7BB8yUx/NL4OJslHcRWQLuB9bL15FR+k1bEoCKURK6MxHZDgA3A30FFv7lwEOAIQnJV41QbLsy/SsoKCiMAQIgN/te4CfAs/LtvwMrHBaCYjjGlDyuHkRlwKWjQFCyFoDRigGYB5wnX99phGL9BRT+GnAXMBdRkvk7Rij2uGpEpKCgoDCGLAByU9+CCPJaL4XePWYifGExkACHwPkTsFe+/lrOZyOBlHyuHOnrbybCAeACee0fAXYVUPgDfA94i/zo/4xQ7NdK81dQUFAYQwQgK0Dl5h8HPgFsRRSh+YuZCIeLgQTI42sF/oEwzV9kJsKzRtgK0CGfR8MFUAG8X75eDTQXQvhLXI2I+teA3xuhWGQAMqagoKCgUOoWAOemboRiTwHvQwTc1QE/KxYSIPF9wEL4pf9jhH8rawGYMArX/zhgMdAKPGeEYtZoXmsHETwLkeZXBfwLERyKMv0rKCgojEECkCvcjVDsGYQv2gRmAVEzEb64kBqgIyNgJxAD/IiaADUjeExx+dw5kpq3xAfl8zpgQyGusZkIT0dUHpwBbAY+aYRiB5XwV1BQUBjDBGAAEvAycDoiAGwmcIuZCJ+bI7RGW0BlX35HPs8HLh/BY/oGsBR4z0gTGwcBWGOEYs2jSbYcMQi/kedrAh+TcSFK+CsoKCiMdQLg3Oyl1rcBOAvYDUxEBAaei/ANjzoRyBIUIxRbi8iRr0akLAZGyD1hG6HYi0Yo1j2S52omwm9BuBn2AY+O1rXN+Y1vAxcD/Yh0v4cLSfYUFBQUFAEoMBEwQrEEcCWwCVEM5kHgnYWqGOjQRr8rn0+Xj5H8rZHWgrPa/25EsZ0R17pzzPqfBL6MCK78DfCLAf5HQUFBQWE8EACn9meEYk8iGvEkpPZ/O/A5MxH2FYIEyN+7F1GcZiZwoZkI+0tYWCWANcDNRihmjvT1dAp2MxF+N/C/8qP7gK8ZoVi6lIR/Q2PTqP/eaP9mqWE0r4+aC4VSRdH3VM8RFkuA3wGnIEzFPwb+xwjFekdTYEgB6UOkq/0UuB5oMkKxZJ4asJfadD7f9wELgG1GKNY/ktfSQeowE+HzgT9KEvUyosTyjmIX/g2NTcSjkYHeDwJzEEGMUxEdC2sR7pUqROBopeO+s4EkkEEUmOqVz13y0SEfrYiiUC3xaKQv3+Mby8J+sPNtaGyaLdfXFGCyYz4q5cPPGwtt9SG6cKbkXHQjYlJMRDBum5yTNqA1Ho2Yaj4UFAEYHUsAZiI8DdEQ5m1y47wF+IwRinWMtuAwE+EKRKW6ZqBtsN8+3HGZiXA1ItMhd4MK5sxPSgqFTikQ9gG7jFCszQ0pyP2/kb6GDtfNUkS54RPkeZ1ihGKvFKvwH2gTb2hsWoLonbAckUZZj6inUI5wVwURfRwCHNnaZsv1nJECKC1Jbr+c++xzUs7/HmmB2glsk4/N8WjEGg+CaJD5mIGIGco2tJqeMx9lci788nEkZOcikzMf2bnIPjoQhbN2ysd2OR9b4tFIz+GsBuOZGJT6emxobNLi0YhdytdRK5WLnUMCKoCfAR+RHz8IvMcIxVoKKUBy4xJyj8NMhE9AxAucJDeoBVJI6HIuss/aYYRE9mHJ5xSwA2HGX4cw5T9thGLpkbI4eHCdpgJ/kCQO4DSZ+ll0fn/nzdXQ2OQHLkJkZbwVUQvCL61BvlE+NGuAhy3JwUZpUdkAvAisj0cjXWNhE8493obGptPlfKwEZst7yDdEAT/S82FJK8Im4CX52ABsiEcj+wotvIAocJkkNJq0QCU9/BlbzkW1fI1jn/t5PBr5dqmSgOxxNzQ2XYWIXzLkR2mp0OgeX8cq3tgUTkdUpW2MRyPPjWkLwCBCJIBIk/uqfGuz3JS3OKL1R1XwO/7OCoRZwNuBS4FzRnlT2oDwqf9dEoNeIG2EYnYhSIGDHBmINsP/Lj96H3ArIuOh2AR+thPjYuDzwHsZ4bLMI4iDkiTG5WMtsEVqt1lhlZGbWtFsyk5NWb72IdwsH0FUi6wv0flIIcqerwOel8T9ZSmMrey8xKMReyTmo6GxSZeKw8wCXoPaeDRysIQtABWIJnFnFPAwPhyPRv44rghAjqb9BeC/EWmC3cA7gQeMUCwzmCY+goK/DuH/fYcUbMcXySXrAe6WgnYt0GKEYt0FmK8goofCf8uP/gv4f7nWiiLRLOsRDamuQZiVxyo2SELwPPCYG21ihMlYLcLN8jng3WN4Pl4DXpBz8jjwbDwa6R2Ba6sj3BSzC3iuH41HI78rVStUQ2PTmQg35rwCHs6H4tHITfl+2V+Kd0iOhv9DMxHeh8gjnw/cA3zVTIR/nu0bP1KariM48VjJAi+XGn+xoRKRSnklIl7hDjMR/hew1gjFto+kRSBnzC86hP8vgR8VQ8R/jqCpkQTuUwVm9qOFpfLxQXn+1YO5Cwqk9ZchrGcflet3rGOhfFwh/74IkXE0FnEFIqi7pOAgKw0FtqC4hl6qB57ja78F+ACii50G/D/gF2YivMBrC4AzRc5MhE8yE+HvSc36N0Uq/HMxBZF3fxuiuuI3zUT45BxC47nwNxPhrwHZfKk7EOl+3UUm/LNZCTeOE+GfizSHfJkFnQ8p/JciUkTvGCfCfyCUjeFzO6WhsamsRGMAAogA5oAiAAUmAVKIPClJwE/lx+8D7syWD3Yr2HKCEGeaifAvgL8B11I8pv7hwCeF3H8Bt5qJ8K1mIrzCKyKQI/y/AmTv8oeBTxihWGuRCf/vIsx5lzD6AX3FAhsRK1Bw7aqhsenfgL8iYkWqxul8dCNid8YqqoALS/TY55Tovj92CECusDJCsV3AV4CPI4Jojgf+aSbC1+ZaDYYjyHKEWQQRuPMxRArgWMB8qWHFzER4tZkIh/K9XgMI/88C/4lwQ8SBy7LZGsWgaTY0Nk1qaGx6EOGemM44x0j4m4c6H47X35VWmMXjfDq8jsovNgQR5b9LppiS4zjnSgtASUMbqytLFpn5nUNI3we8ywjFunI1+iGOtwxh5j9pnGw+NyECLFtySNZQr5cGfBj4FcJMthFYnq00WCjNP0frXyS1zBAKAN3xaMQYzaCsnPnQEe60d6mpAERtgXfHo5GnR+C6F0MQIMCaeDRycilNirx2n0EUois0XAUB6mPtjnFo7A8C5yLSNPoRKYIbzUT4wmwJ4SONIV9Xm4nwfwBPjSPhDyIobB/wBTMRnuC0CBxJe5fC/63A76XwfwFYUWjhLzXc7E18ghL+b5465zUa5U3VAP6lhP8b0IdIFxzLmNLQ2HRyKVkBEBln546Fiz/mCIARijmzBLYZodhbgO8jiiZMR2QJfMNMhGsGEkRZASXHOBbRn/7/ENXExht8iJz9+81E+Pxs18MhuAZ0YBHCp/wE8M7RbjE8mLYpn48Ffq2E/5vQXghrjEzx+ytwvpqCcUcAJjNCzdRGmACsUASgyImAw8wfQdTtf0AKta8NNIE5vutLEZHy71f7EKcAdwLfkqWYDyvIjVAsgwio+xBwlRGKbcle30IKfylsJktSd7Ka1jehpwDzYQA/4VBVSIVD6EfEMo1lVAAnQmmURZbFwY5F9PlQBKCESMA9CLP2f0pt49XDCP8vIvz9S9Ue9DomANcBq7LZAodzBxih2EEjFLvZCMVeyw2kLAQcm8uvEHnlCm9G5ygLfx1RaEmR7PFrAQA4pqGxaUGJHKuP0kj3VgQgV1M1QrG9wA2ICP5Xc4W/mQjrZiL8beBblG6J0ZHGacBdZiL81SG6A153qRRS+5fP1yNqnysMjFGpDukgY2+VpNKnLv2AyDaFGutYLB9FHQfgOLYxs4fo4+VOclgCUkYo1mGEYlZObn8Zoprg1xBmKYXBUYNwB9yRjQsodFrfELTNcxCRuwqDo3cU52Ua8D3Gb47/UJBEuAHGw36yJIccFh3ksS2ixKv/OeEfL3fSEaL+yxF54F8t1PHZaNi2Lp812TpLG+C/AM1GE/+Jrllo2NlPRhM+ROnjx8xE+L1GKLYtS7SKpZufQ/iXISrKVRx5HsCv2ZQFLGxbo7dfz39KD2lwGcejFRFsZyJ87lkfb0AKQ00eZ6V8rpbXOtslUj/Ms+6S1JujNC8+4NOMfiGV3PlII9weLdL60cGhrnXZGzDbha1azkcVh6olDnVONBfHO9ZjALI4raGxqS4ejbQWsyKB6DUzZjBuCMBAFgGpuQaALyHM/iMKDRtNswGNZKaMZKacpBWkN1PJwVQtHakazLRBb6ZSfubHtg/tHQE9Q7kvSbmvB8PfTXWgg9qydir9XQT1FGV6knJfH7qWfp1MjAJOBe42E+FPGKHYE6PdiXEIjB1EtcZjDve/GVvDr9vMmJDkjDkdfODEPaTSPq66fQk9QycBFqLXQgui095aRMe3V4Gtg/WGH+IGpCHiMGrko1Y+6uRjsnxMRQQoleU8yh3Pg5ncR1TbdGyixyIsbaOBduAAsBNRwOsF4BVgSzwaaXd5PlWSGEx0zMck+ZiCcCPWyzmpQhS+GWg+gkcgLOOCAMj1W5QEwLGXjF8CMNDGXmz92/M4j+tGWvhrmoVPy9CXruJAcjKtyTqa+6ZyoG8Kbck6uvoNNKnJa1K7ZxDx/brubx96LvelqC1ro66slcllB5hU1kJtsI3asnaCepKM7cOyR9TbswS42UyEP2+EYncVw7pwaP/HIMpCBwdU0TMauqZx3BSTU2Z38LajWzhuWie2BXu7yqnw23T3H1GFywDPAI8CDwJPxaMR8whCcFjnEY9GbKmtdiLauB7pe35JDOod5CD7mCKFVY1DeNUh+tWPxib601FYAusRtTseQHQ53OvlfMjz6ZaWg71D/O5Eef3rcuZiinzPSe4mIwoBjeVSwE7MBBY0NDa9UqxugIbGpjpkxsJYwbBVRDMRPgrRoepZIxR7rtROeIAa9U2MUEVEDYugL01bsp5NnYvY2TOblr7JHEzVkLE1fJrlEPj5w0kILFunwpd8nRDMqNjNPGMrdWUHyNg+MvaIxlu1AF80QrE/ymJAdoFJgIbodfCNXMGfyujowPKZXVy48AAnzupk8WQTXbfY1V7F6o2TeWTrJF5qrjzSzzyKyBh5KB6N7MpXsIyAlj1UgVTrEDrr49FIywgTskuAkVwUryIyPe6LRyPr3Qj7AsxHpWMuJgHN8Whk4wgdV7FUAnTih0AkHo0UZfnjhsamj8p7vZjgqhJgPgTgdkS71GapidwP3GKEYptKwSrgMP1/VC646pH4nYDeT1d/NevaTmFL13w6UhNJWsHXffYjCRuwpcZf5ktR5e9mWsU+jq9Zz/wJr5G2RpQIdALfMEKxHxaKBDiEzSJE/YLjsp+lMsJte9acDt55/D6OmdzFlAlJyvwWbd3l3PL8DB7aUsuujjKSaR2fPuhcWYigwjvi0ci+Qgv+QgisPMd/gZErwPQb4IZ4NPJqqcyHs/XxaM5JkRKAVxA1R7qKac4c+8m9iMyVcU0APoso3JFFv3y8hKi9f5MRipnFSAYcwv8c4A+MQDOfbEDehoPLeLblFMz+qkIE6L3JQqBj4dfT1JW1cebkJ5hrbMYauTiBHuALRij2y0LNv9zgrgJ+C5C2RJDlyqNbee8Ju1lQ103AZ1HmtzjYW8ZN62Zy10uT6e7XSVvaka7KDkQqUCIejWTGguAfpQ30EkStf6+zbNLAZ4GfqzkoaQIAsCgejbxWhNcriCiNXjuWCMCwHMNyM/9fRITy1cDjUuEsB5YDUaDDTIRXmYnwuTK1Ts8do1CQwn8OwiQ8Ip38DqYmcdv2f+PePW+lq98ouPB3kpJ+K8C+3qn8dccV3LHjStpT9SN1fJXA92R2wKjOe/Z3LJuJwFWaBkbQ4n2hZlZ/ZA3ffNvLLJ7Shd9n0WKW8eNHj+LtN57EH9ZNozPpI3Nk4b8euDAejcRxRGgrwTM4HNfmE3hfUrsfuCwejfzcqVErjCq+DZyNN7Ed4WKaR8dxXOTB2n0GkTl1DaNUdMtTAuCI8O41QrHfG6HY2VKQXgc8B+yXm+JlwEMIf1zETISPMRPh2uwY2Y16tMmATPf7HB5XgtOw6beCxNtO5vebP8ru7un4teIN3tWw2dy1gD9svoo1LafTk64aCbdENXCDmQif51w7IyX0c7s7njP/4DGL6nrP/szpu7jzQ2v5ynmvUl/VR0dvgE0tBj94ZCHv+lMDv183nVQG9KHxoI3AVfFoZJPS+Ie3gTY0NoUQaX9eMs408K54NPIPR7Ckuuijj1XxaORx4HrgZZdjFVWUvWM9XcTg2RpDQTvw3Xg0cieiTHpPMZzfsNMAczU6IxTbh2i2830zEV4uGdwKRNrVHESE/beAO81EeJUkChuNUMxyWBVGXCuUvxFGtLj1jkFpNi19U3j8wAo2dR6FX0ujlUCTZZ+WwQYe2ncOr5kLOX3y48yt2oZPS3uZMTAL+KGZCP+bEYq95PU8Oxs3yb/L5Lo7qaV766fqKvsxKpL0JYXQ33jA4MHN9Ty0pYZkWqPcL1wAQ0Qb8JV4NLJOaZl5baBvB6Z5PPwX49FITJGxgiPgEHIPItI888WZDY1N1fFopLPIzvE03FWsbAMelq9nUCQp+HkfhHMzd2zEa4A1ZiJcBZwlzULnAWdIq8BlwIvAg2Yi/C/gXiMUS40kEXD4/echuvp5rEkv4vHmsznQV1/UWv/Axw9lviR7e6bwj12XsKz2eU6sew7D3+VlkOAySQLeY4RiHV7M8wCCfxaik9wKYIWu2Ytm1/TQl/bx2ObJPLezhjW7q1m/z8AGArpFRWBYFg8b+KVk77mCTeHIVoBKuQcEPRz2j8Av1FwUFdnraWhsehb4D5dDvR34cxGt39MQqZr5wkbEC3XIv61iOTdPWIjTKiA35m7gXuBeKXiPkaadKxE540uA9wKvmInwXcAfjFCsJceygJfHhkgNmuyd5m+xvr2BJ5rPpDtdiU8r3YJdPs2i3/KzpvUkDiQnc87Uh6gra8byjgS8TVqJPu6BFce53sLApcBJwEKgSgM6+wLWfZum6I9vq2FrWwXN3UEsGwK+vO+7V4Cvy81ACZyhb5zZaxXCkYnhAbYAP4pHIyl1lYsOLwOvyfsxX7wX+HOh7zXH75+JqNOQLzKIbKSig6fVYXKtAvK9bbIT3zVSG/wPeQNPlhaCbwPrzET4Z2YiPNvLxjHZYzAT4U8Bb/GMNWlp1rWeygN7z6cnU4GuWSV/12ZjALaZc7lr5zvZ1zvL67iAj5mJ8MeGGwuQSwjNRLhBrpUtUgu8GjgBUWntqcqq1BUfv2Pp3dGnZ/HMzokc6A6gazZ+3dW5fDEejaSU8B+2Rph9uQTwstvbH+PRyPPZTVqh6AiA24JS5xeDZcfx+yfiLgDQAu4Y8wRgMKuA/LsbkXYSlexwJfC0vLCzgUZgk5kI/8lMhBfkCoB8j8FMhOuA//FOW06zpvUMHtp3DjbaiOf0F4IIHExVc9u297KrZ67X5/dzMxFedCQS4PxM/u8CMxH+tpkIb0aUc/0UMB9Rxa4d+AFwdNXxa87Qjrrnjl0d5eck0zq6ZnsRj7E2Ho2sVvt63lpUlbTQeClg7hpgk1YoDtJnAs+Dq42joqGx6a1Fsn4XAUe7HObJwSqDjlkCMJBVwPHaNkKxe4xQ7HSgAfgLohGHD9EbfLOZCN9uJsINiJQyhkMIcv7nO3jU2lfXLJ5vP4WH9q0oKq3ftm0qggFqqrzJsNKwSdsat217D5vNxV6SAD9wm5kIawORABnNHwAmmonwMjMR/rqZCL8CbEbUjl+AiJ5tk4z67UYoNskIxb5khGKbNG0PDY1NJ0pi4BW+roSNK0wGTvZwvGey2r9C0eIJREaYG7l0SZFYeI7BnTsDKd+KEqPeDjgneAsjFHveCMXeh3AP/C+wAZHb+y6p7d1iJsIXmYlwvUMjHNJvmYnwKYiqha6d2bpm8WrncTy67+yiE/66rnHBCQu46KRFnmUgaICuZfj7zkvY2LlEtj3zhAgcD3xzkLmsAD4pNYi4tNwsBg4igkdvl5/PNUKxK4xQ7J/ZdZSTr+sV9gP3KjOzK9TLe9sLtAOrikQwKAyOpxhif4TDbD9F4QYAliLKM7vBHcU6UXohf9wpAGSswBeBixF1Be5H+E4uBVYDN5qJ8FVmIlx/pOIyclwNUQnOde9mXbPY3TOHR/evoN8urgaKmqaxfOFMjpszhQXTJjFjUjWW7Z3Z3sbm/r0X8rIkAR7AB1xtJsKnDvBZLaJYzDxEV7CHJSlsBC42QrErjVDsJulOGtDKhIexHoh83dIP8CgQZLW5xXiX8tQM3KOsMUU958SjkS5E10U3987khsamhgKfSz2iNLEbPBePRg4UK2HVC30AA8QK7DRCsR8BH0ZkDayS//p24JfAHWYi/DkzEa4YiAg4Xp8stX+XAtams7+Gx/avoCNVXVQ+f9uG+VNrOWvJXPw+MZUXLjsKy/LuGDUgZQV4bP8KtpgLvbJ+zAA+bSbCRo4VoAVRp6EReDfwQSMU+5wRiv3ZCMV2HI70yVKzPilwvMJdQEYJm7wRRORPewEL0WUxpbT/ksDdQJ8b0QBcUGBrzxQP1u+fipmwFo06OwAR2AP8zUyEH0T0Hvgiwi90NiKo6CNmIvxTRAphOkf7D0jC4LrOddoKsKblFHb3zEQvslS/iUYZF598NBXBwOvvzayr5oR503l+214CPm/4nYaN2V/FI/vPozrYQV3wgBclhN8F3Ab83VFhMgXcl/uPA6UADoJleFdqdiewXQl/VwggSoR7gYwUKgpFDMf9cjeQJCeGaxioAE4vlPCUpGMh7otX3VnM86UX2wFlYwQcRKAd0XL1MrkgHpeL6gREbv81AwwzC9ED3r0U6J7LurYTi074a8Blpx7HxMo3y7tzT5hHRcCP7aErQNcsWpM1PLzvApKWJzK2HJEaWOcgbm8Q+nnUhFgGlHl0yk9TJPW6SxFyAw0ggny9gI2oLaLM/6VBBJJyr3aDBQ2NTQsKdArlwIUux1gLHFAEwIVFQL62AcsIxZ6W/QcuQjRlaQd25QgNXZptFrk9hmSmnHv3rCw64W/ZNueGFjCrfuBOxhMqyrhg2VGkM96inH6aAAAgAElEQVS6r32axeau+cRbT/aqiVAYOD4r7HMzRoYq+B0mwiV4V21uLWCi4EYTnOYhIXstHo2YyvxfUrjd5fdnS1JfCDeAFwTg74iAdkUAvCIEUlDca4RiIeAoIxT7c45fuAwRSOZa4310//mY6cqi8vtnLIuF0+s4/ZjZhzl2jWNnT2bxrHoylrfHHtRTPN58Jnt7Z3sVD/B5t0LboRF6GXD2SjwaSSuB484Q4OFYDyvtv+SwCneBgHUIS28h5n0K7noaAPwrHo0oAjCCloGOAT6bi8tufz4tzfbuBWw4uKSo6vtbto1RHuTik45s3KgsC3DGsXOZWFnmaVYAgE9Pc+/ut5HMVHhBji4F5rmt/tjQ2FSGR/UeEClMzWr/do3jPRzrKXU5S4j5iWwAE3jM5VAnyGj80cYlLr+/AYd1WhGAESYFDivAB9yMpWGTtoI8tn9F0Zn+MxmLFUvnUz3Egj9zJk/kpIUzX88Q8AqiWuBEnjhwNgGfJwT3k4DbdsEzEZHDXmAXIg1RaZzu4GX9/7XqcpYOHPfNH91yCWQw92hY4xy/8W6XQz2MyGpSBGCUcbWbL/v1fjYcPIG25KTiEv6Wxfxpkzh65vDI8KmLZzG7fqL3C0ez2NhxNLu653rRCOmjWSLnAtMR/QC8sgC0qm3cNTxLyYxHIy+ry1mSuMvl9+cAR40WGZfpxDXAqS6HeiYejfQoAjC6VoAGKQjy1mx7M1W82HEcGbvYLo3G0rlTmVAxPHe536dz8fKjCfp9nh9RX6acta0no2uu3QDVZiK80uUYUz0kAAcQ1QcV3GlRR3k05M7R0gAVPF8HB3HvBjhLtpQeLbzd5fdfQ3QQLfo1OyYIgMN07Mps49MyvNZ1NAdTtUV1frZtU1NVxuTq/O6BWqOCC5ct9DQtEERe1t7eaWzvnu+FFeADLr8/mfxzjnPREo9GMii40aIMPCjBLbFptDRABW/XASII8G8uhzoXmDCKh+7W/L8BeLUU1uyYiQGQcOVE7reCbOuaR0+6wpPjylgWi6bXMaOu2tU4lp1h3baHWbf1ybzHWLZgGgtn1HmaFaABnalqNncu9CItMK8a/g6GPQlRPMQLXtOqNE7XmOPhWNvU5SxZ2MjyzS5wAlA/ivej206EL8Sjkc5S2D/GjAvATIQn4cLnqGsWzX1T2dc33bNmP5Zls2B6LeFTjqEimH922v7OLTy5eTXfi/2MF7ZtyHuct5+8GKM86HGvANjdM5O2ZL3bjICqQfoDDEXLANFHwIviBL2IboNK4yweArBTXc6StgK0As+6HOrC0ZBXsg2xm0pn+yihjJWxFAR4nitBZms0902lPVnrSd6/ZdvUGhXMmFTNpAkVnLVkHv3p4ROL3n6TNdtWY9tpdrc3c0MsSm+yNz9LSXmQ809Y4GmvAEGcptDcN8Wt+PUhTH35wisTYS+iNbWCO0z1cKy96nKWNDo9sAJcJgX0SAn+7MvLXQ61D3iyVBSIsUQAznBzPn2ZCnb3zMLyqOedZdksmlHHlBoRl3bSwhksmF47rOp8Pj1AfPt9tHbvxaf7qQiW8/jGNdz2ZP7lpY+eWcdR0yd5WiUwZQXY3TObVMZVPR8fcCYMPx2wobEpiHcpgElFADzBNA/H2q8uZ0lbAVKI0tpucC4wYaSEqmPcc8hflbGA52U3RGUBGA3kdP/L+3x6MpXs7p2FjvvYL9u2mTShgiVzp+LTdakpa1x04iL8Pn1I9gVd87GjdT2vNa/Drx9q9lMeKOPHq39Dd193XsdWEQxw0lEzCAzxOIYkubUMu3pm05dx5YLXgKNySwIP9bTwLgMgBXSj4BaTPRxLpWSWKBya9VYg4XK4t4zwsZ7kct32IlrXl0z8UMkTAJn+58dF+h+A2V9NW1+tFylt6LrOwul1zJn8xvz7SRMqWbFkHv3pI5EMDctO8/SWGFoOF9U0DbOvl98+cHPexzezvpq5U2o9cwXomsX+3sl0Z1wr4Qb5xXGU4V0XwBTQg4JbeFlIo11dzpLV/rMvtwNrXA737pEQro7xzsedJbEP2bGyVOKHxooLYCEuIsAtW2d370w0j4L/aqvKWbF03ps1ZV3j+PlTmVVXfVjh69f9rNt+Lz2pLgayRvl9fv78ZP5FcyZUlLFgWi0+XfNsAmw09nbPxHZXP6GS/Jo4BT0kAGnJ5BXcocbDsVRXxhK3AsSjkV5ENUc3Wsc7RkK4OsY7y+U+8lw8GukppeyhsUIA5rqZOMvW2d87zZPof13XWLF0HpVlgQE/n1hZzmmLZxPwD2yC1zUfbd172NwcP/yO2NfNA4lH8j7OOVNqqDUq8CohQNcs9vZNdxtDUQnMz9MCUOHRWupXFoCiIwAHVUrm2DAIIFwBee8PDY1NK0aIpCwA5rkc5i+lpP2PJQIwGxdtRy10DvRNQcMdAUhnLI6bPYXj5kw57P8dPaueRTPqB+TCNhYbdj9GMn14JdS2be594eG8j3VKdRUTKio8XEgWzX3T3FoAyiWZG24goB/Re94LZBBuAAV38LJym6ZSMktY6h+auzjuazq8K2tV8Mo6IXGilCNu9o07S21uxgoBmO5GAKStAAdTE135/23bpraqnLcsO3L106Dfx6mLZ2FUBN/AAXy6n/0dW9nb8RrWEciIbdts2Pkq2PmRFps0O1rXk7a86VapaxZtyVoy7paUjoweH2YgYADv+s6nEZkACu7gVVpmBoqoH7eCGyLQB6yT91i+WOmllu0YZxmilki+eCoejXSUmqVqrBCAKbjoKd/ZX4Nlu/OHJ9MZ3nriIirKhsZDZtZVc8L8aeiOKL/+TJLtresxkx1oQzClm33dbNy9Oa/jPdjdwdptD9Bm7vZsEtKWTne/68ZD1WYiPNx16fNqLds2mfiOmiERAJfdC8c6gh6N068uZenDIRjvx11Mx9SGxqbjPT62yUDI5TA3leK8+Et5UTlSxvKuAqdh09Ffg+ZC+89YFotn1jN/2vAI5LlL5/PKzgO0mb1omk67uYvtrS8NSfgD9Kb6eGXPayyeNfy4udaudsxkN89t+weXnPBpbA+ULE2zOZiqpq5sv5vSwFXARIYX+X1EC4Bti0BFW5g/sAd4T9OgptzW9vzXg/WTv/eOqmRar3SsLV3+TlISji1GKLYpz7TF8QCv0jK7lAVgTGj/TgLQQf5ZImWIsuHrZXChK1Iivz8XUW7YDf7qpWVCEYAhwLHxuvI3dvcbrqr/WRactnjW8DvuaaI87x8feh7LSrGr/RU6+9oI+IamPPWkenlp96tcyvAb6bWZB+lLJWnu2sPm5rUsnLocy3YXA6Fh05NxbfmtRKTitA+gbesDrAGrKpjx96VFwQXtdTIiXtvYVPqhrjJNndHH5Mo0kyr7mVLVx6SKDHXyuaain+ryJMBpaUvbkUwf2aBgJsIhIxRb77GW5JOPQgm9fo82Ma8sAFYh95iGxiYd75oa5YN0PBoZEwRICtxMQ2PTE4iAOy3PdXUhcIPbder4/gLcla5+EhmoOooEwJL7oav7w8/YgKvNpjdTnjcByFgWC6bVMqUmP8E3e/JETlwwnYdffFEW/Rn6lPRn0mzZvwPbstD04VnA28x2elK9+PUgz21bzZy6JQT85a46Bop2yhWg2ZC/S+VNEf1S+M9G5OkGHcJxYs/6S7Rt7S8e+8Le6lqfbjExaFFdnmZieYpJFRlqK1MEA9KKbMujfN0CoJGxNPotjbSl0ZX0o2mky/2ZToTv2ZIP23GjZa0BQbx3ofmBq4F/QwQijubGr0nSdTVgejSeJwKwwBaA98r5CIzy7+oIU/k3gefHmBXgb8D7XBCreQ2NTbPj0YjrHhGyzfCZHmj/9ggK/wyiOFmPfLTINbEOeGhcEwDpL3bF0FNW0NV2tWhGHRMq8ucg55+wgH/EV9Pe3UxZYOiR+Zqmsb/jAJv3b2Ph9AXD+s1Ws52eZA+appFM97B2+z2cufBdpG0XLlcNUpZfat55YyBzfgD4OPBfb6LBtsb8Sd0cO7UT5w/3p/30pXUO9vnoM4Mk0xq9/T5SGZ3etE6y30dfWqc75cdM+ulM6nQlAwR99itfOW/TV3v6fb22TR/C5N+PKPKhyUcl0GWEYq967AIIAFcAKwp4S/1YajRu4VVdhu4CE4BLgIsL+Pt/GysEwIF/SkGWr7lwMqL0+60eaN3VUrFwg3s9XqPtQLN87Ae2AC8DLwGvxKORzgEsK+4JQLH7M7NBV14fo4U/r/mzbZvqynKmT3Jn9g76fZx4VB2r1qYpG4aeoaGxp30/m/ZtHT4B6DpIX3+KoD+IpulsbVnPgskNTJu4gIyVf5CuB3UFBkrpywCPIRqKZCerF7B1jb7dHeWT4nsmntmVDFT2pXV6+zV6Un560z56+3V6Un56+jXMVIDefh0zpdPbH6CnXyNjZd0FNpoGts3er37qx3cPdT2OwP1SyAwEL7Vtr7IyNA+tCfnOh13AYyioC8RrSGGVamhsegRZ2CcP1AKnALe6jQFAZB0tdXFKa4H9wzwOnUMVB01gsxTyWxEVE3fIx854NHLgMNcx17KSPwHIbmZmIlyHSIko45BvvYxDptdyxwatyb/9g2gAQxFnNiLQZ6AbLPt+ErjXCMW2FVPktW1D/YRK6ia4S3nWNI0LQ+fyx0f/xu72Zvy6b8jfa+/uYOPu11jZcMGQf68/3U+r2f6Gnb4/k2TDrkeYPGG2XJ8FU7retOEboZhlJsIPSE0ou6b6ASqmdyb/869nnr69vWJpf0avTEuTfnZpafB6OeVsXICGDZpF+cD2ryFbk1Tw3xHvawV1Hd8Ah7C6yQUBAFjS0Ng0KR6NtLkYQ8dlF1lE7f/husxagC9JId8sNf42oC0ejViHE/YDXEfX2lbuZvY7DrVl1QbYlHM36IH+Hi5ztwZZ7JaDTPzUTIS/aoRinldp08jkRfBtYGJVOUaF+3inWXUz+Nj57+c/b/0e/uDw3AAbdr5Cc8cBpkwcWh+Lrt4u2rra35CCCLC/aztbDrzA0VNPdlEfwPv9SpJTCxiQDYc+9TZT17LC3iboU7JHQaHIcZfL7y+Wj6fyMYFL7V8H3GqUT8SjkeFa7fYBv4xHI+kjafdeCvvBGNBAmnu1fEyQDwOR1lMlrQIVjke5tBCUSe2sB2Ge7UH474by6OVQ/fUyxyP7GxqiJ3jvAFqYhcvKbUG9L6/vBXw6kyZ4U01P13RWHHsapx61jHRm6CZ4n+5j/Y6X2dM29I6pXX3dtJrtaDnTn0z3sOXA83T1tQ05FTFX9pf70m7dABlyCoUcSdP2abamoaCgUEKWgCTCd54v5gHH5Csg5XcqHcpuPljPMCsbSuFuZYX/QIWDRjOV8E0uAOBSRFpEtiVqtjuaLk0dabkpF0TNGsTv6spvWulLYtvasI0AAb/OhHLvgoPnTJ7NxSdewIZdr2INUYrqms6egwdIbH+R4+cei28I7oPOnk5aOlvRcsSmrvnYc3ATu9pfYdHUk/OQ/xoVvl5cukv78yB0liQOCsWDbkQ9B7dQ3G7s4s/A21x8/+SGxqbb4tFIvu27h/rblpQxSbk3HQReAG5nmL0NRsqU75oASP8/RijWh4g2dCOQR0X4O/7ucDNuVcAcduEa27bpTraRzEzCZSfiN2Dlsgv4Z/xBnt/+Ero2tCyzskCQe55/iEtPWcnEyuoj/n9Hj0lzZ+ubXABCiNu8svdpptcsxCgbXmEjG41Kf5fbS5Bk+N348iENgxoU1L7sCdIejVOpLuWYxX0uv386UO9QVoejhQO8Z5B/6UL45duBVkSA3nr5SMSjkdZBxis5vCEMKh9BPpqBULm/5fi7WQqBYavjNhq1wXZhARgGMnaaJ167iwUz3sryhcd5do5Taiazctl5vLJnM6n00Pzwfp+fZzY/z/bmHYTmHTmg9UDHATp7TarKqwawKPg4YO5kS3Oc0OzzhqWA2bZGTaDTTRVApPA3hz2N3gUf6A2NTfpAwTgKBSEAPmUFGHuQpu8ORB57voF4y4AZiMj5YWnhDY1N2ZTbpPz+TvnYIQX+FmBLPBrZfQQSQSk3qhorhYD2SA0wL3v8hEAHfn3o+72maexue5nNzRv46zMZTpizlOPnLfHsZN5zxmX8+Yk72dG6d2jHg0bGsrntibuOSACS/Um2Nu9AO4yrwO8Lsn73o8yrD1FdUT/k4w7oGS8sAN0Mv1Z4Gu9qxvsQGS99jE94JWx7PRqnAoUxBymEU4gOem4i8U9vaGx6brCAusPpk4jaItlI/GageaBxsn760QrMG02MlWZAO3ARB+DT+qkNtmMNsZWtZVms2X4vfj3Aq3u28MnffpXP/OYr/P251bR0HHC/45VV8I4TL3yTj/5wKA8E+duae2jtOnxWTF9/ks3NO/AdpnKghkZ/Jsmabavx6UPjVJatUxM8iE9z7YrvMkKx4ZrzR4IAjFd4lWXT5dE45coCMGZJQBp4CnfWu5VAWR5d+Dri0chv4tHIffFo5Pl4NLJnsMC8eDTCWG1HPVYIwBY3GpuuWUyt2Ic1hMvh0/28tOcxzGQ7mqZjI+rqP/DSU0Ru/R4Xfvt9XHHD1Ty0/lFXJ/TBFe8ecgxA1iqRSqf59b8O35SqN9XHq3s3HzFY0Kf72dn+MvsOvjak47DRqC87gKa5spzbiGyP4cLrGIAyxi+8IlLdHo0TVARgTGMv8LSL718IVAxHQGcj8QcS9mNJux9PBGCjG83Fh8XMyt1HtABomkZfyiSx62F07Y0C1LZt0pkMyXSKDbs38e+//gpfv/V6unrzU4RqjBqWz1+CZQ1doAb9AW5+/A7Mw/xmV08Xm/ZuHVK2gKbprNm++k3nOpgFYGpFs6umSpLEbc/jeym8q6AXRAWeeYFOD8dSBGDs4gDw+BAUg2w5blOShtXA/0gLwLD2/qyAX3N5mHg0wprLw6y5PBxcc3m4fM3lYW3N5eOnzfdY6AWAEYqlzUR4O3BUPpuFpllMq9iD/wjma58W4Pmdfz9ikRxd09B8fn73yO34dB9fuvTTVASHXxr9bSecx2Mb1xIcRqOftGXxw7//gv++8to330W2TWL7i2TsoYXpaWi0mnvY0bqB2XVLDlsi2EZjavledHcWgB5EWczhIol3pms/yu/sBdo8HGsiIvVKYQxBauLJhsam53I+6kW4kLokkdyCqAIaB+LxaGTPIGMN6/eXr4qx5vKwBpwMfAiYAvwMeJRxUsmy5AmAIxPgCURRh7zSuKp8JtMr97KnZxq+AYSYrvlo697NttYNQ1ZZKssq+e1Dt3LcrMVccfolwz6m0xadSMbKMJzYRk3TiK27n/eddTmLZizM0dItntq0loBv6OPZ2Ly453FmTToWBmnzY6MxIdDNxGC72+nsRjS9yMdy4FXQWRDvetnng7FilWv1cKxa8rMMeQGVhTDy2AD8Rd53O4BNCKvuxng0svUw5OFNWv1wIDX9ZcDfpfAHOBuYxTipK+IfQ+fyAJC386bS38Ocqh3s6p45IAFAg5f3Pkmyf+iKpgYEA2VcH4ty/NxjOTpHIB8J86bOzas9b0+qj5/deyM//si332gdyKR59KWnCfiHN+1tPfvZ3b6RWZOOHdAKYNk60yv2UuZzbYXvMEKx1/L4Xi/e+pwLRQCsYQpOq4hJwwEPx6orEUtGxkEaFI4Ah9B+DfgcIjAvOZCwzxXybv30Uvj7gE86hH+WbI4bjBkCYIRij5qJcC95tpj06/3MqNiN4e+mzyp7gy9b13wc6NzO3o7NWFjDKpPr03Xaujv40k3f4LZrfkX5sOr857evW7bFYxuf465nV3PpKStff//+Fx6muauNiuDwLNypdA9bWl5gZu3igXc928fMyl2U+1xlzlmIQhvDLi4Vj0Z6GxqbTI+WUjneVLDLBylEy+NfcGQTZIZD9Q9mAz8E5hfRLbnfw7GmFfA8vgf8aQgky1mN0kJUiTtKifnDQ2ry/Yg0vCMRBa+hA+/Nee9xxlEjqzFBABwC41/AO/MZw7Y1plXsZVrFPraa89A02yFQM+xoe4mO3pYhBcTlIuDz8+q+bXzyV1/mxk//dMjf292ye1ipgIcsDxpmXzc/ved3zJ8yh9C8JVhWhutjUcoC5XlcG2gxd9Fi7qTemI1lH7KQ2GhUB7qYUr4Pn5YecirlIALtITfLwKPlVIbog1EIjcjmUCvQ4WyiuxElSYuJAOwpdQIghdNuYHce3908jgiA7mLNF+SApf+/YYB7PTaeCMCY8Dc6tMWb8x1D+LE7mGtsI6D3v0GYHuzZz862l/NrkOMY59nNCRp/fR396aFlrN365F34fflxNF3T2dm2l0/8+lqa7vgJ4es/THNn24Dlf49sidDo6DnA3o4tgxCnfUwubx52NcUcpCWBG5b270jjafdoOb1uAcgjt7hQ0Ci+vvG7PRxrRoEImZuvj6VKkrbDwpFGBOY9DtwAvAtRF79k4Ijyv3yAj+9TFoDSxZ0If3BeUdxp28fR1Rt56eASWpJ18i622N+5jRZzNwFfmcu7yOb+DY/z6d9+la+98/PMmTx70P99eedGfv/IbQR8+U+Rrum0d3fx24f+gl/3oeu6q2Pfe/A15tefQFVwIra8R/x6mrlV26kOdNJvuVpO24xQbHse5v/sy1ZEMGC5yzVUhvQ5j6d84BHANg/HmqUu56gixaEOrb2IKPx1wBpgTTwa2TKItaQkTm75qthgBOAFoNXxuSIApQIpOGwzEb4R+FReQs7WmVTWwoIJW2hNTsJGI9nfw6b9z+HTvblUPt3HvYnH2N6ym09e+CFOO/okptVOff3zTCbNui0JvnJLE/2W5Tr8WNM0gn73HQs1TWdf51Y6epqpCk583WpSV9bGouqNpG3XxqSbXX6/RW5W5R5M0yQlA1xrz60eWlAWqCs6IsjeK/skgW5B5Ni/jGgI92I8GhkwK2c0e9aPkBVgOpAb1PQg3rkSFQEYTTi0xl/kSwAAUpkgJ9Y9x8bOYziYmkCLuZMDXTvw+7wrDlcRLOe1/Tv48p++w+lHn8iJ849nWs0UMlaGV/du4d4XHqbV7Dhsud7RhoZGKt3H7vaNTKmei08P4NMyHFO9kdpgKynLdfXcm3LmcbhoRmQCeBHFW9fQ2FQej0bGaz8AV3AIhx3AHA+GXFRqWmYJwAa+LwXeVkSa5bZ4NNIzFgX+ABgoVuyJ5ativeNpEYwpF4CZCAO8iosOUzYaVX6Tc6Y+xKodl/PK3qfQNO+zegI+PzbwxKtreXzjc1QFK8jYFn39Kfw+X1EJ/9cXix5g84HnWTrrHHx6gEnBgyybtIZ+y7WF4WkjFNvpsrX0XrxLBZwC1EjNSGH42n/25SaPCMDEhsamysGEk0Jec2QjMhyOKOzHiMDPav5ZF8AVOR/tQBYhc/zPmIc+Bs8pCfzEzQBpy88C4zXOqLuLzS2bPTP/v1mrFkQg6A+SsjJYtk3QHxhWD4BRtQJoOt2pDvYc3ETG9nHetAcJ6Cm37X8B/ncQMvem10cgAF41oJmOcgN4gY0ejnWcupzeY7zVwpfR/1XACTkfPY8MXC208L9u5bUDvlYE4AgwQjGMUMwG1uKuwQQ2cPLkV1la7yM9CvG8pVJqzKf7eXnfOk6sizO7aisZ27V1pAO4Ozt/WYFvhGKYibAv9/3ByEA8GunwkADMBOoH2yCLEMUatbzBw7GWK3E9IpaA8XjaF/DmQPH48lWxA4U8qKywv371DVy38tqp16281pCvFQEYJhHYBdzoXtjBV86oQtdsbFttFiCK/kwr28n50+7zQviDqL3d6xTsUvhPAHaYiXCfmQj/0UyEV0qtvMJMhN/Al77642uyL7fiTfpVHTL3vEQ2SJ3h1IsePTLhJQE4o4QImUJx4yLe2PK7HUjAG1IECyX4z75u5bVbEe7HfdetvPYd16++QRGAocIhSO5F9Jp2hdBUPx9fViFKro1zEmDZUKbDNadWuin440QSuMUIxdID+P4nyxuzDPgg8E+Ej+4XwBVmIrzUTIQnAXz38z/KfuclvGsLvKShsSlYIlPjZQtjL/1dXroAzhrHGquCtziVN5Zr3o6sZVBI8/91K689DVgFzJNvVQEjykhKjgAMZAJ2moazgsQIxbYBtyByw/NG0AcfOL6cixYEPVePSgm2LdwU/95QTmiqZzLiZgZo8iJdAFuAf0e0/LwLkVdeg+jadTtwD/BzMxH+vJkIn2vb8yu2t1c8m8roScubSVpOnmWlCzE9Hi5NT/ogSE09iajz7gVmNzQ2TS+lW0bJ2eLDmsvDJ0vlwokty1fFhtyF9LqV1w5ols/HVH/dymuzmn8d8C3e3PdiRNdRyRCAXAFvJsJnmYnw18xEeHmu9uggCb8DnnOr9daWa3zh1ApOm+kfl1aAbBmwS48O8v6l5fi9WTUdwN+MUMwcbL6NUOxJIxT7phT67wE+Kud0B8JPfyXwI+Dmvg1L71rz2SeuumBBe6AyAF1JH6mM5ma+ziohAlB0kJp6P6KAjFd4u4NcFDuCahUUleDPvjwNGd8j0Qc8k/M/R4QU2vXXrbz2E9etvDZy3cpr5+bjr5ff0eQed37Oxylg17glAANp9mYi/B4zEX5AaoHfloLhDelj0oeMEYr1AP8pNZG8kbZgqqHzjRVVHDNJxxpnJMC24ezZAT5/SiVGUPOKBN0HPDKQVUcGczrnvtMIxZ41QrHfAZ9HtH2+COEO2AfMzNjaW6ZUpa6+7rzXKn9zRYLrL3qN8xd0ENChO+UjbQ07zLIaaGhobFKtYPNHyi0Bd8AHXFZC514xXiZ5iFk6BT0Oh2n/pJy56czuQ0Mx/zs09ulAFPgxwkp5z3Urrw3m6a9fDHxhAHnchchOGDH4i3lROYR+FfAJ4DOIFK2sv7MNePRw48gugb8GPu3meDIWTKnSia6cwIfu6mKXaaGPE9GwuM7Hd86pZGK55pNQZN4AACAASURBVBX5aQH+IgnaoLn/zvez68EIxbqALjMR3oao9/Bl4HjgKuDK2orUxEmVKebU9HD+wgN0p/ys3VXDPa9O4cntE7AAXbOHOnfvBu5uaGzqH0e+Z8/obTwaSTc0NnlFADREXMaMeDSypwSuY0nHVw1Wk8NMhGs41EBnvxGKJXOzd0ZT2OcqfgAdz15anmz31VlpUcDFX24n6y+4Y/8Dc8PT7H4WaX6caVfty1fFnslDY79E7g9ZHGOjLUZ2NB0KpLWgHPgiA5e77gKeHfMEYAAtXwP8ZiI8VV6cj3PIN9mPiPb+AfBrIxRLDbTwchbGZ2QUuavuXJYNtRU6t19RzYf/3sWrbZkxTwLmVOv8cuUEaso1Mt5ZPp42QrE7hvOFAebXNhPhlBGKpYAn5ePfb7z5U9dUBjI/PGt+G5XBfiaWp3jL0fu56Jh9dKeCPPhaHf/aNJnEvirSFmQs7XAS7z1AYzwaOTiOtPZOj8drBnYiWha7Rb3ceH9ZAtex5NZMzj7qMxPhIBBCFM25AFiWS2zMRPggoonXz6QyZo8UGehcdynVJ97lJFg+MxE+GngbwmV3AjYL0Gwqp6ffdG52Bjt5QLNaHgTzVRtsrKwCOcziP/XA+52MuUrr54MVL+373jCEvyQTpwAfG+Tftl+/+ob9IznnRcFSHSZfzUyEpyB8ITfLjePzQCWiv/g9wLuNUGyBEYr9bDDhP4hZ6DJcBgSCMIeXBzRuvGQC584OoGuMOZeALcnOMZN83HhJNbUVngr/VuA7RzLZDZcUZMe66qen/PK/71+Qvuh3y/nyP5bwj1emsaXV4IBZRsCX5vKle/jFO1/gzg+t40srtnP6nE5mVCcxghlsWxOEwH7D/XE1jKv0M68p7QFEExlPtgrgHVm3jEoJ9F4BMxPhgJkILwQ+C7yIqKdyLXDiIPKiRmrCDwN/MhPh2pE4vo5nhPDf/9g7ta4XwvMRFuEEGhs0nR9oOpfbGRZkkpA2Id0F/R3iOW1CpgfsDFr5dNtXs1xD87++1d0NQ4/+l1r7AuCc1xVDNI72H7Q6reBHhyn8K4HfDvJvaYSbdETrAPgLuehyNP5lwNnAe4HTX7+2IojoUeDPRii2Jvf7R2KZjniADWYi/FngV16QgMqAxv+7wOBX63q57ZUknUnbq+C4giJLZs6dE+AbKyqF5u9tIaQ/GaHY015rCEYoJkuYfqSn4qSmNTac9sSOCTy0dSK15RkaZnRy0qwOjq7vZnZNHzOre7ly2U6uXLaLHa0Ga3dXs25PNdvaK9hxsJzWngAaNj7dvvak/2iKrv1ZpK/Ia9EXJQ2NRyMt0g1wuUdDngy8Jbs5Kni6Dy9GBNZ+CuFqHS7eB/SYiXCjtMy51xYeuRwjtEqYVJ68dLa/Kn0ZGp9FY6HmE4I92Qz9bRrJA5Bqg4wJVj9YKdADoPlALwf/BPBVQvdmG1sYCHqHs46kINYR7kaHELXtuXqHnkGvGebp/Rew8DD38z9AuB3GBAFwMk0pmMsQkb0rEYFd2YvRLU/+HuARmRI2LME/iKZ4I7BUslvXgrLCD43LK1hc5+MX63rZ1J4h6Ctdn0DGEmmPVx5bxidOrKC6zFPNH0Re+IhJUIdwvkmD04I+m6AvQzIDT2yfyMNba5lcmWZhfTeL6rtZMqWb46d1saDeZE6dyWVL9rK9vYpNLZVsbDF4cZ/B+n0Tpm3rKPsy8M1pVZminTpcBrqOBByE6QVETQcvtMOpwJUNjU2Px6ORHtUgyL3wlwrYB4FrpCI2EJKIPittUm7MYWC3Thj4M/CAW5Lf8dylTDx5Vfb15XrA/iwa52o+sNPQ8aKG+RL07oTUAVGsTdMdNizNQYtFoqxt26K1i2zv8szyVbG+oZr/pdZejYjYlzeexjxflxXUMj5bVKAdqvZ/srzeg6H5+tU3vDDSa2BUCYCDadYhUrrCiAjIeoe58JeIoi+vGKFYuxvBP8Dv95uJ8LcQhRZch65mbFEt8G1HBTmm3s+NL/Tyl5eTBH0apcYD0hZMLINrTqnk4oVByv3aSLg2rjZCse5RCBa6DeGTBEDXQPfZBHwZzJTG2t0TWLd7AhPLM9RXpZhT08fpsw9y+tyDzKs3mVdnctb8Ng50B2npDvJy88TPfKn/x7euvuHzG4tU4NiSBBQrEogiTWd6NN4H5Bzfp4S/u/3YTIQrgSZEnFXlIKT914gYmxaEG1VDBK+9DxGE6/zeZKlkPZDvPX5IUbyLPXdeETTm9H9D89kf1zTqNB+kWjWa7xOafKYHNA20AGhija0DtshjPSiPrQ4RJ7AiZ1u+PY/Du8x5vpats8DXriFi0/4+FBIh8SPeXLzLdtCXe52EYaQw4kbrnKYuc81E+OeISMlvyg2hHtE17KPAEuBbRij2VFb4DxTt6dL60CKZlyfpFbYtrAFzJ+p8+YxKfv/2Ccys0ulNl05gQL8FR0/y8dt3VHPZ4jLKfCMi/L+GrMw4ksJf+oVbsjdQLjQN/LqNT7fpSulsbSvn0a01/PjJOVx1+1KuvrWBv8Rn09UXYGZ1H0undbJy8b76E2aa/5u1MhSh77ko6WZWOMejkV14Ww+gDPhBQ2NTDQp574VS+P8R+NwAwr8HYSk9A/iJ3JM3GaHYTiMU22GEYq8aodg3GLja43QzEc67NLXDQlxXvaD/Ft3PdZpGneaHzhc1tv0Kul60sXpB09mCxrWIAO+zgE8iYoyiiDiyXwExRPpfLv4GQ0//k/jC68IfjTq915qoJXXgr8tXxfqGMsZ1K6+9Jud4OhGB7c77+I4cwlD8BCA3qMthYjrDTITvRlRz+ySHUvkeBN5ihGJHG6HY74xQ7AA5pVy9EhY5qSpbgA/jYZEFy4Yyn8YpMwP89V3VNDZUUObTir4cmA18/IRybn9nNYtqfa+/5zEeAqKyUdOoCB2GECmuSUIAkEzrHOzz8/zeKq5/ZC4X37icT9xxArEXZ/C39TPY1ek/r6Gx6ds5v6EwNEIGwp3X7OHQS4H/a2hs0nN+R+EIe7Sj0dZfeHNbXCRZW2yEYj9FmPzTh9nnWwbYMrR8jy37bCbCcxDZBVcAGjrsX62x62abTLcNsAuNDwDHLF8V+/7yVbEtiJS+biC1fFUss3xVzEb0D/kXb65wuWH5qljLUIr/OMz2RyNSjl8nAAv8HVpQswC+OxTt/7qV1x4FfERaULLb7XZkK2IHVo/GetC9WlQ5wSQBmcL3fjMRfgF4AuHrTyHatv4BONYIxS4wQrH7Byr4M8JmL4xQLIFI8/I8zaLCr/HF0yu49fIJXDg3QE25hkZxZAtka8YGfRoNU/3cdlk1Xzit0tNasjnYA1xjhGIHR6tgiBTQjyLbew5bndbAsm2e22XwP/cv4CdPziboswLANQ2NTR9TUeh5WQH+KbUcL/FvwHcaGpv8WcuMmpMjC3+JXyBSKt+gxwA3G6HYSUYotmsgt2vOPl8DLBpA4O8zQrF+F8e2RMqMBjTI9GrsvEmj9REbzUe3lB9Llq+K/Wn5qlh/VojnavJrLg9XI4r8THOj/TvwyTdq/31M101Nx75/+arY+sHIRE4U/yecJAIRG3MzhwLfAf51/eobrKFG/+f+32CligeC34sF5VgQExF9uy9E5DbOcZzkSwiz7G+NUGzPSGj5wyQBmhGKPWkmwlfICZjnpZDtS8PcGh//t3ICa/b089eXU6w/kGZnZ4bufhu/ro1qDYGMDZYlyhofW+/jkkVlvGNREF2DVGbEmEk3cJ0Rir1QgP2uC+G7/Hq+A/ikq8CBSkTlL39DY9Pv4tFISgWhDc0KIK/RnxHpZF52LvwKUNHQ2PTNeDTSlvN7YwFeNbdy7tOfl/uz0+dsAT8yQrEvDSCQBxqjGuHHzi1gsxPZin2osT45pOJ8hG9+Ehok92nsjUHvDpv/3955x8lR1o//vVdzZC+VkNAJhBLKQuBEqkQFZQQOgmIQpPhTkW8URf2Oo4nYkMAwwFdUIirNQhPxZFEGQZp0CASPXkInvecu5dr+/ng+wz333Ozu7N7utczn9ZrXttmZZ57P83x6SVSyDLiwoSn9a2HwNDSlezDx4Lv5MxpHy3X2yXLbO6POm2aG/7DwTyUZdq1cQ32ijQxcmCuQMPi/Y9lHoOJXdLhbrCi6hSKS+d/oIjhGcLHS9b3FUQWAoiwAIYV7dmhpbjwL+A0qr/JnwvzfQeU5ngtYyVT6wmQqvShXX/d+FAIysvAeFUvAgpIz3S5obcuQ2qaKiz8xkss+OZLzP1LHZ3arYdLICjZ1ZNjUkaGjqzzP2JmBzZ0ZNnZk2Km+gpP3qmHO4VtxxdFJPrtXLZkMZbu3EBQPuLEQYlBCzbNNNvmyEl96JCrA8NJps+buEjCaWPPMbwUQgWxJCS8dMK9vAX+YNmvuR4z7DQcoiZqgmdcP1oRi/drzRJgyGXLYtY4G/oRyo1YZAtkfKCDWx7jXmcL8xpGA1tcTfHAbbHwnQ6KCRcCXTeZvaPwB8x8pgvoxWW67BHg1qvlfXo9BgtUzwKTKVnauXAtkbgaezmdJkJx/M71yDaqM8JeM0/8VZVyu7wXMf5rQpCbgV45l7xg1dqAoC4CGsH1RKRFHAfvTHdX4rGjVDwHPB+agUkTyl8ESQDKVfqqlufEM4EpUxauSQnsXtHdlmDy2kinjqjh+YxfvrOtk4eouFixp57mlHby6spPODFRVQGWFyiIoxEKQyYiWn1ECRQbYPlnBR7ar5pDtqtlzfCW7jq2kvqaC9q5MfwQp/g64WBO0BkLjfBEVLf6NEt+iQpjO4dNmzb0W+O2CebMzuiDQz0xo0EecCk42TJs19wrRHEsNxwP7TZs19wbg8gXzZq8fYGvAoMKJ0LoK4EfA6BCG87NkKt1h7lVdyUNF0h+PaqcbVifgesDV6GohzP/bIpiMSlTCuhcSLLkzQ8c6SFSyDvhqQ1P6rhzm/oD5V6NSjc/IcWsfI64hApyKNHiqS3SyX9UKRtC5opPEbxqa0hvzCRAijJxm/PwTVHrlEdp3LwGLc0X/6785lm2JohVYOnZA1TaIVO+m2GCNKlSE6AWojmlBb+V/AL8WAWBVMpXuZAhBS3PjdsCvgJPLLdIHFQRb2zO0tmVYuznDSys6eGF5J6+t6mDhqk6Wb8yoFBc0YUA+B9kHGXk/uibBDqMqmDK2kqlbV5LapprtR1UwsjpBska5GzKZfqNKtwJnSNrlgAp802bNPUq0lR3LdIsNwMvANcB1YnmgnMzHvO60WXPHi6DziT5eugM4YMG82S+Wa8wStPd2GfERdFD7E3DVgnmzl+eauzKuu3+XSJn43IJ5s2/vq/YvTPkkWad6y9l3gJnJVHc9fKnPsisq0LJBGNSOqKp/2Tpk/hC4PJlKbypkz0sw4g+El9QkqmD9Kwk+uCVD12bJ61ea828lqC8rzJ/RmEC1EJ9Hbuv251FR+5HIoWPZI1FpkKkMCQ6v+YAdK9bRRcIFZjc0pbtyuQAcy64FXqGnm/kVYBqqv41eQfgK4Puu77VHGNfpqJL4Ew2rwlmu70VCQLECwAhUasUxIsH8EbgE1aa1M4j27q/AvlKYxzRJtBL4pSy6fk2vymSU3TwoRbuxI8OidZ0s25Bh1cYuWtqUHz8D1NfC+LoKJo5MsG19JXXVKtAwiGwfwB4FtwKnRtUC+sEKwLRZc69GBd+UE7pQcQf/RPke710wb3ZrAUy8IMY0bdbcrYRI7yIWuG/Rd9962QQAQwg4lvJHOXehcrMfFWvkXQvmzV5awJopFB9VwM4o1+dBommXop10nwUAjc5dg0q31mEV8ALKtTUa2AYVoR6Qk4o8zPQllBX42cDaF4Xmy3mVwIUiAJCohA3vJXj3mgyZbtXxL8A5DU3ptTkYf/D20yifei5YDxzW0JR+Ieq8OZZ9LHBDJ4mJ06qXM7VyBV0kngE+29CUfieX9i8m+osR94oGlut7dzuW/QI94xQ+DdyTS/sXfJwryqqJm7+5vvfZQpTRYhZSAhW1+DHgd8lUepXJSIciGILAOcBlDHBP+MACELzq9sWMZgEYJHALKjK7azAIfhrDmYxKBdqtn4fwtljDXkKl+byHyjpZLZYDcy9WipY1Wl7HowqrTJIjqL62K+XpN19WASDAiRCt2+n/9r5BX4KXUPnr7wo+Vgo+zIiYKlTg5xhUFcMAHxNQJvAdRACbjOpTUA44bcG82TeXQPvfGxWPc0Bf9RSxsrwFeNKiO7LJP6ALYmW4MhDMExWwaVmCd6+Bzo0fErTFwFkNTel7c/n85f3+qEp8ldopjwmudte++zdwRkNTOnIsimPZP8+Q+MEeVWsqDqpaTAeJtRk49+Cm9C3ZNH8jddCslXAXKu5sG3qm/60ADnN97/UQph9cb4RYW+aECLx3u753nH7/fFBsDECG7g5sQ0bTj/BcwbMkkqn071qaGx8Xk9lBxsLqV6tAJth2gxc6UGlF302m0l0a0RnQdRGkhi2YN/utabPmXiaWnep+HMIucpw8hLZBotw4AbqmzZprowqBTejHZ5uAKjtuDSF8lGrn7wbsVeR/O8VSsBxVxO3PyVT6H2GKU0TlaiIqYHwGkKGCxOaVCT64BTo3ZPQV+J+GpvS9EO7z197vjWpGVGlYJs6ld3D348JoI8GXP31BHWzaf9fKtRUHVC2hkwRLupKvn3DHTbeEjSuE+V5v/NwG/J/rey2OZX/P+O0xUQ5Cr+VY9iQRmj5v/G8T8HvX975ZCPOHEtUBGEyBfSV6liBw7XmUX9Wj9NHkwwXWiUT63bDujEFFr5bmxnpdWOxPIUBerwZuitGVF+r66T4LAQdlpo+hjPRMYCe6i8/kgjahdS8JU70RZab/MnBUMpU+NWD+hZj7Neb/EeAOguZQCRKdrbQsuTOzdvPSHsx/bbBfs0XrS9Df3sDfxUqjW95OQ+XbJwxF5b8NTemOfBkAwe+TKjdO2btq1W4HVC+lii6WdCU7H2vb/h8Af2j8Sj7LwXn0zO9H5vNxeW+mBD6pCwAG8z8QFeVvMv9lgF0M8y/aArAlbZxkKt0K/KClufFeVBnI4+LZ+RDekjn5e5jPX76bikp1qW9pbvxxMpWeLy6kTH8JjZo/95so8/mRMeqyQnU/4SMzbdbcW1Dd/f4nnvbygZjbdwyxLNyICkbrRNXsaEH5yFej3CJLk6n0YpOZF6r0acz/dBEmJn9obqpg2ZpnuLH1dU5NVPTITliMijPLFfG/OyrNXDfxrwS+3dCU/u/8GY2zDQHgVVTZ+bzMv6EpTeYseGTtW6fWJDr3GEEHi7qSPN0+iQ2ZqlsBzkpfk1X7dyx7L1Rcjn7/D4DrXd9rldS9yYbg9YLre50hzP94VHDg7sbt3gC+7freP4ph/luUAJDNTJXPfKWlLt7f0ty4AJUGcynh1aW2JLgdVeRnYZ55nAh8CuVD3beluXF2MpXu19oAmitg3bRZc89GpcnsRgxhkOlHfGycNmvuD1ENwT4RT31ZhbrRIXi+KplKP1EI/Sxkv2quwCqUFfWr9Cx4s7miljOX+UxMVPXojJcBHhImn63Qz7Yoc/ghBhO9oKEp/ff5MxorUK5bnQG/JkfWiH39fs+sa7yyLtH+tWq6KpdkRvJ0+yQ2Zirfv/Lui1/JovEHzL8SPuxPEEAX0OT63sPy+Qx6FmJ6FdXEyGT+X5K5G2/crhkV7f9cscwf+qEZ0GDT6FuaGz/a0tz4Wktz45UtzY3JfAva8GWvFql5KqqD1pYI7WIOPCMC8yeZSj8o57eizJC/aWlunGPObT8ynTdRAThLiGHAQar3fQFlco6hfJCIQv+z7cdCGb/G/HcBnhZtWGf+G0gwPT2FexNVH7aB15nlfWHXFuY/SpiiGcvxi4am9G/k/eH0dAt0Ac/natijCRdj5s9o/A/wzUq6ahd1JXmybVtaM9UkJMsgrNKexoBPorep/j2xfgRwQoh14i1DoJgDXBfC/B8DPt5X5r9FCABaBayKlubGY1F+rd1l8YwuRHgIFlIylV6TTKXnoNJ+mii8qMRQhftRPRyuS6a6i19EsKA0oYqHLEZlVfyspbnx/1qaG+v6uTpg8PoMyv+2OOYLA8b4gQ/dActQOfML45kpm1UnjEaN6gujzyU4CPM/T7TaA0KY3V7J/dJP7HYySXqXE84g3SO1KP/gtQ5VQOd04z+3NTSlHc23/zF6ZmasAJ4yGb4eCyDM/zhUbYQjE8DirnqebN+WlsyHnrF7DGaPLhA4lr0L8L/GvdsB1/W9ZXLOUSFM/SXX94K04QrHsq8Dfh4y1X9zfe9w1/dW6dkBxcKwFQAMKXQEKiLURwXCvI2qDldwsxjDx/1uMpU+GZWHfQ/K/zTcoA1V6OYr0rxpYSGlnLXYgBdRhUUek3V3PjBPii/RX+Whg4YxC+bNvg+Vv/xqzB8GVhAQfCxBFZ15Mp6Vsuzh1SEWgam5tP5CaK1mYa0BDm5pbnwIlXVjBuHdBnwkmUq/J6y+NoQZZhqa0gtDNPNaVODot+npqvp3Q1P687rAIApHrXbOMlMACPoIzJ/RmJg/o3Hn+TMaf48qZjcK2LQyU/f8o+3bbxbNP4BHzefXTP+1qHiWQ4xTHnF97zfa5+PpmV6+HMlWcCx7G1Sq4pdCcDg3yPHvi9Y/7AUAI990gpiLrgqQAZySTKXv6asJWrvPY8lU+tMild4sUu9Qhw5UzvTPgcOSqfS1+mYvRFPQhIBVouldJ1Lx2cC1Lc2N++aqPV5GzfPfKPPzv2MeMSiEgDWoWIDrUalNMZQApBT7+yECwKdKofUHzdWkz8AlqE6cHzNOX4LKFjotmUqv174Pai2YzO5DDV2Y9AjR/H+sjR/gYeBzhpVgN7ob0QUWhYUNTenl+nnyfhKqMdJ98grKMnjR3Zt2+U57pqK2olvWaHV9b2mORjufA8zUvhWongmBsDACOIye8XfLgKckMPBe4OPGNZYDX3d9b04pmf+wFAB0BtXS3LiXMJugFvyNKN/1fLOFcZEby9wI/0qm0qeJIPBD4AFUhO1QY/z/Eg19hjRwWtPXnH5NCNgkUvJPUXEBxwI3tTQ3flrDW38KAQtQKUNzZTwxDKwQsAFVHOY7KFNsDCVQhkQAMGvWH9zS3NhQiAVOP0ejf0eLtn+baOe1xt/uBk5PptJu0G8gRBjRoV3X0oX5X0rvanpPAf+voSm91kjrS9HTrdCGcl/q1xw5f0bjl1Alo39Hd8De/cDZDU3pn1cluqYkesbEbjQ1f8eyE6L9fwpVgtiEWa7vvacJDYcawgmoglT7oQKrU8ZvbwBnu753TamZf9jED6eF3wD8FtV+FJEcr0ym0mvLVaDGFChamhu3B6agXATHAQcnKjLU1naR6KeZ7+pM0NZWEaVa4CpUydQ7geZkKr0s7JlKNUdScvlU2TSjgEXABYVUFisFGDXqpwuROWYL5RcdwMcWzJv9+EANwCjHux/Kn3rmFszDv7Bg3uxbSrDv9kDl1R9kaMaPAmcmU+m3otI1+W5rVKDbCcK8tg/7K6pi3V+SKVV5z7zW/BmNE1CpfCcYAsAODU3pZfNnNI5FNY860+BXLwNfbGhKP2vedP6Mxh/Tsw34OmBaQ1P6zfkzGmtQlscvCm8IAhM3ARcD1zY0pT/4nmVXJxQt+Jlx+ZNd32syBIHPiBBhzsFcVEnoTi2qfw49fftdqNiXEfRO1XwOFenfXA7mP+wEAI25HI6qIb2d/HQ68NewQjXlkrgNQaAGqK+szIxd+Fry9EcfHP+TFcuqSSTKm3GVySSYstdGPnnsUkbUhRoiMsLw/yimtPV6cF+55srwGR6Oqp8/GtXI4qJkKn3ZQAgB8n4Mykd3SRaiNtwFgOkL5s1+dCAHYeCjDlUrwAMO3gIFgD6VAjb23S9Q0fgmDVgoytINyVR6RY7/7ydC8vGo7q+jyF446lZUg5+3dK3f3M/zZzQmUQ1tzjHG9F9UHYCjRWvWedX7wMkNTemn5Rp6sOBYVNtpvR7+6yiXxDdQZvqdjHE/i2rK82RDUzrIw69EWSp/ZTzXarFGXCVj+i4wC2kVrMHfgK+5vrdCixEYL8LOiaaeRm9r/L3ATNf3VpeL+Q8rAUBj/p9GBXJUoapJzQTuGYi2tOb9vv8ZuyaTYY5Ihf0CmzdVcNasD9h9z3X6AvaBv8oiawkRWMo+T4YQsDMqOHA7YUI/Bi4Na0/aj0yoBmWK/iGqZveWAG3A5AXzZi8a6IHobZXlfaVYZi6m7/XshwqsA2YsmDf7/hLtuX1RWUtTsigDCZQ/+g2hEx0imO+IKmldEYFvvIgquPVgMpXuikJP5s9o/Dqqi2y2MemwCji6oSm9wGT+8nlfET72DrmWOfY2ocVeQ1O6S7sGDU1pHMs+jJCgP3rXyjDH+BTwxZCa/gfK2KbkQdVvXd87V/tfWZj/sBAANMafENPOdfLTm6jI9QcGy1gdy64TCfAL/XXPzo4Ehxy5+r6jj1t2U1V15j/JVPqNfOa9AcLfWFQwXuCy+QmqvWhLf48xpFPf50UYOFBMdSOG2DYJ0sDatdd2VCOcl4RovwQ8smDe7OX91TK3D/g5TDSvwzQtdKjRsgAXOl5Wo7JSXpbjPwvmzX6rxPvsbNG4x5VwbbWi/NgXJ1PpP0elLVqA30dRwdOT89xrMWBJlb/QYj7zZzQehio1vHWO8W4UJfG8hibl6gy7nmPZE1A9Tgrp5fEccKbre8+bjNux7E/K2EZmI9fAD13fu6QUKX7DWgAwIlDrUL7CwF8zHzgvmUo/MZgaFQ2EAACwaVPFd698wL1isDD9PELAH1EmRoBfAD9LptKrB2LMIYLATkIMPika0QQhNJWDYBq7UB3t9CMo77oYlfr6lhwLF8yb/XaUZx5kjN/Ex1igEVXTY0+6O/XVDJIhbxQc6PhoRUXEv6PhZKHgpLOcOGlpnjQL7AAAIABJREFUbkyIJfQ0lG9+ah94QAsqjfq/wI3JVDrdF/oyf0bjj0Qbz7aXHgNOb2hKv52N+ct1PipMdmIWAeJR4PKGJlUBMUI3v4NQDeHyWZ3aUJkEs1zfeztMa3cs+xgZW5jbZBHwA9f3/tifC3RICgAG8x8rjD+I9L8bOD+ZSr862LoUigBwHSr4rV9vDVzh+l7HEMDteJS/92xZnzejGg0tHkB3QC8CPG3W3B1REbv7oCKId0C5MMrBhDajarS3yLFOPq9DxU2sRaUbrUClDC2TY+mCebNbcz0XMKi1/ajjFmFgGqrWxBSUn3dbVMnuremdatYX6BA8BALWWsHHWsHJapSpeoXgYblYW5YumDd77QDTzUAI2AUVWHe4CE87kjsrbJ0wqbdR5XQXAA8H1UD7wPgDK0AdKvPji/TsWvgWymx+SRDtn435y/W2QbkTTtG+XoBKTWxqaEo/FJFWf6iBO5adQmVFWfQuAd+BMvn/FbjK9b22bCZ7x7J3Q2WifdQQ3O8CLnN97yFd+IgFgGjM4tcoP39CmOucZCq9ZDC2KJYc0Hn0LvJQbvgKqglF1xAR6upRLoDvyM//Bs4NihANJE6zaWPTZs0NrAFj5RhFdw/5UcKAtpJ1OlJj7B2iPWzWNMRWjdlvkiNUu8/F4IeCVl9GPI0RQWw8qiRsveBjHMqvnUSlq5mCWkbmt0M0+E0GPjYKrjbI+w0m3rJp8oPN4ibvt0GZ3reR+RlDd456Rp55jRzLgUVBhlCpLIpGEN8BIlCPl7l/EXgmF9MPud6uqFz6USKwvNzQlH4l7H4FCAFboVyAU2W+KlGF314F/uv63tv5mLdj2QkRUoMqtGtR7ZWfdH1vaX8z/yEpAGjm4npU3mSQsjUXFTi2drCZuLUFUCHCSn+2pe0CDnZ975khZtmpRjXUuEh+fgL4UjKVfmWwCHdRGOu0WXOrhKBW0m3erNRwk9FeO1B+wM4F82ZnSj2WLYTx55yHabPmVhr4CKOBnYKPTsFN54J5s7uGEz5yNUfT5ySZSmcK+X8phIBsvwMFnTN/RmOioal7/FEZf5ggYPjya2WO2nSlKg/z1xv8BDShI7DKlsLnf9GtDwIwZ+b0vOcF5wwpAUBj/luhKvpNk58uQAWMbRyszF9bCNViptyD8nde60T56Ba6vpcZaniW9+egUpQAnkEVFHl1sOO5P4WMGPLPIZTG1TFc8RG1K2p/QzFMu1hGX6ggEIXxl/L/OuOOIAhsj3JLbo9y72yPCjadN2fm9A+GlAXAYAhbiza4G8p0ehEqUIyhwBQcy65HdYvaTwSAcjHmhGgvDwH3u77XNoStPV9EVexCBJrPBu6AwWAJiCGGGGLoC9OWcxNCtyuyvCZQbprtsxwBw89Wm2ETcN6cmdOvGTICgMH890AFTOyG8k1dkkylLxpICbVA5l+JSie7qp9vfaTre48MZc2kpbnxDFT8RBLlN5uZTKVfjoWAGGKIYTAzdjm/EhVvUgNUa++Dz3WomJWJqEDDSagg1m3kdVtCujeGwGY52kKOVwFnzszpC4eEAGAw/4OBG1BBGOtRZWOvHCrMXwSAOpQ5+4x+vvUs4Heu73UO5U3X0tx4JnAFKkDodeALyVT6mVgIiCGGGAZIawcV1JtEBfkmtc8j5ahHBQiPE9qlHxNEq88HHcL31qMCT9fTMzh1PSrrJMg+WSnHcmDVnJnTV4Y9X9VgRoTG/A9D5WJORUVOzkqm0jcNJeavQfUA3HMUQ7zmgzQK+mNLc+MmEaJ2B25uaW78cjKVfrgUzZ1iiCGGmKkH511064NB5khwjBEmPkY+jxbamtRe6+UYJb9Habi3Rpj1Knm/SvtuNSoFcx3dKad6KvD6OTOnbyx2HgYtU9BMvwej6vrvLA99SjKVvmcoMv+BKgSEampx+VCoA5BvPcj741G1tqtROcnnJFPph2JLQAwxxMw9wvmB5r0N3XU7JoqWvg3ddSNqtWMEylxfJ++jKM9rUfUflmmvwfugXsdmlG8+9HXOzOllTd0elBYAjfnvi6pbP04m6zSkreMQ1vYGIhe/hfJnHJQV9CDPZCr9D+n5cD8qm+L3Lc2NZyVT6cfL3Uo4hhhiGHwa+0W3PlgrTDzwlwc+9ImowLiJcmwlWnkFKg1Uf62KqLGvRFUVXCTHEnldjKqOuES08x6ppMb7rjkzp2dKPReFwqCyABha3lRUSd+tRGL6UjKV9ofyopb8z28Bl/UTQw7wOz2oMjUcQBMQjxIhoAIVGPj5oE5AbAWIIYbBy9xD+E/Y+0rRxrcT5r2Dxti3k/fBa5Ry3BnjVX/fJkpmwMQXGe8XAR/MmTl9bdRnLBfTHpYCgMH8DwQeRPlSFqPq+t8+HEy8jmWPQ5nkp4o02FUGYSCQcBNiQbne9b2Nw4mIaELAscBtKP9bM3BiMpV+OyazMcTQf1q49p+gwJJeAMt8DSLegyj3SfI6UXu/Lcr0ng+6hHkHRbQ6tKMTZUoPSjEvltcl8rpImP6iOTOntw8nxj6kBACD+R8hBH2SIOqbyVT6tmHC/MMaRGyLCmgrFXQB813f25Tv3sNIGPgCKrVyLHBTMpU+PSbfMcRQMqaeoLsLZp0w5jrjuxGowDc92j14P47ussz1EW+7ie5Sy8H7Tcb3QaS7GfX+4XdRzezDkbkPKQuAxvxvQOX5LwW+mkyl7xzOwV2OZT9Bz+YQpYBfuL737eG+eA3B8WxUw46bkqn0pTGpjyGG3oxcZ4gX3frgSGHa9SgLWtAjoV77Xn8/KuR9EAkfBTpR0e3rtdcgwt18HzRWMhstrZszc3pLuYWeWADof2K+D6qj0l5ilpmZTKUfHObMv1qk1foSX/pp1/cO3hIWsOYKqEK16H0vmUpvjrd2DDGEMsNPoIqRjTa0d/39VmTvWR8GgfYd5KCvpjsnfZX8FjRS2mho9EFTpU1RzfAxYy8dDKYsgAnC/FsC5h/8MIwDuqpFKi41VGwpC1jLDugA3jAtAzHEEEMP+AYwI8J5esraMmHoi1Em9uXyfg3Z/e8f+uHLlcoWM//hJQA8DhwtC+7FLYGQu763IegCVWLYsCUtYnONxMw/hhiywu+Eaa+hOyguCIwLguP0viGZbK8F5t7HDDsWAMJBGP1m4D7ju2E78VpQ3u3AyYSnpkQFM4XmhnhpxxBDDCFM+G7g7v5m1jHzH5yQiKdgQIWAhOt7GceyR2iMP6hEFdVslhHJfS3K9N/m+l5ncO14lmOIIYYYYhi0FoAtGDIAesqeY9knAA49zXC5BLi1wGzX9/5lWBdi5h9DDDHEEEMsAAxGyJKXvwiVzx41M+B14N0I140hhhhiiCGGD6EinoJBB2lUo5uoFoSrXd97eUuesDIFUhZ174EcS75x6mMbrOMcKuOLIYaBWqNh9433Sx8RNRgmMBiDY9mVjmVf5Vh2JsfR4Vj214baAihknMU+U3/MRS7GP5hwETLOmlKPs5zPK/0zhvz6NgWwGIYuLwnZU9X9tBdKvu8SwxVJgRncseygy1MtkAJ2RdUcqEVVlnof+C/K9N4JdA6kCT1Aqut7OJa9D6p50NGo2thdwEKxEvzC9b0V+vmDmSDK8wS4SAL7AZNRVcRqUdW+WlABjW+h0pHaxcrRJXjJGNceAdwFfFz7eqbre3/pp2f7M6CXHf4pMNf1vbZBhoPTUKWSx8hXy4C9XN9b3dc9JvitFhxOQfW4GC33Wo9KSV2OclO9g6rN3qUf+toVYvptwNVud5vre58fQvQnBfwPypXXBdzn+t61w4CuBiWBs8UXdbi+1xG17LhcL1e9/07X99oH6VychCpcFzQhWgxMdn1vc5nvWyX7qoZuC/7Bru89Xcz1hl0MgMH8xwHHA18Bjszz10Wo9LmbHct+WSLp+52xamPH9b0XgXOiPOsQwMV44FhUIZJDIl7ifeAVVKe/u4F7jN8rgH2N7z4G/KWfHvFA4/PeqCpqbYMMFRM05o8IYAcAD/SR+dcB04BvoorL1ET4+1rgZcHrA6i+H3qjqkoRdnX4+BAjQ/8LnKF9nupY9i2u77UOcfLaADwl61tnzAEz+iXwnQJo0pGopm8ZYw0ggsathoA9mGAsPTsQbiv7f0GZ75uUudFhT8eyn3F9r+CCS8MuBkBjOIcBdwB/iMD8QbWVnA08BHzdsexqTcMZyOfIal4e7Mxfe3+AMOU/F8D8QbX/PFo0wm9l0xKMz+/142N2hDC3zkGIDrPjZKeMtS/MfzxwIfAoMDMi80esA4cAZwPfQ3WA0yETomGuHGJkKGE8Q9swobU7aQx/pHZUCzM8xbHsPQugDefJXFUY1xsp1xs/iOcizAqyZIDuu44iO8oOKwuAtrAOBm5E1YYvFMYBV4pE94PBItAMBaYfNnbHsvcTISzVx8sti3jemgF85NZBKgCEEZH1fdT8LwfO6uM4AhdBlPOGElTR071aQ/S6HoMZ1kUQ7o4AXs1lnZQ1lAA+E0FwHUqwvJ+ES0IUj1gAEKgDrs7C/O9FmR3fQZmwxglj+qxoIhltgr/vWPaLru/9eTi30i2XICabvB6wszD/N1H++zdRzUMCLWAEylw9TnAStBF9d5Bqevk251DSYKIKo18DTstCiO4CmlFNYDYLTmsEp+MFr9ui2n1/EIGpDEW4W+jPSJSV6M5hYP6PwpDrRfm6NgK9PBLlLhtO0B8Cy4ZS7OVhKQAI0zkb5ZfUoV2kzSeEKLVrQWm1wMUoc+bZxv+uEN9dR8zWi4KPinClQyfwM7GyBL7ETmGegTmwAmUC1I/B2OGvZUtDqGPZk4HPo8y+OtyFiu9YquE0I7hMhOC0Ss7ZNAyn6WZUoG4Fmn97mCoSrfTsHLiXY9nbub63KJdyAJwaYl0YQXRX0mCEsisAru+1ldItPRwtAE7Id590fe9hcwNK0MRG4H3Hsr8nGsoJ2v/GowIIrzYC2kI3cpQNXkCELEZ0dJgmRrYsgFzZAcWOoUBBbATKf29K+Ve5vvezkPFlNAGh3y0Whc6hJliW7H755jxX/Ec/MpeDgY8Y3/0XOMn1vfaQcXSWE2/55qqY9V/oXBqZLkgWSFs23EW5X641Wcgz6dkaRazvKHAcKjg3YNx7oDJBFuWxIp1k7KMfAP8POKgv+M9FJ8tJ8/oy1gHcy8MuBmBfYGfj6z8CT+UjFq7vLXcs+1rgGLqjLCtEUr3aEBwCBhf49hLABskcCNLcRmjz2ymCxvoguyAfcZJ7VIpZrVbutRWwybHsNtGcWoOUsxDhJrjeCNHWuujuFbBZrB8jUS6TGnmGLiFcLa7vbcxGOCLCWGC68d164PwSEJ0+bUB59gBH1TIPnY5lbxRitFHmoCgToGPZW2nzWilz2ylWjPXZcGaMsUrDe7WspSp53+5YdofgqrUEuIo6h1Wo7IEqg8FfmIX5lx2fWipZjTZXFYLbhOC0Q/bLetf3uvKNUcPBSNlzespVF92tbtu0/aTThmrBf2AS7nR9b2M+IUXWTXC/WqDGsewWuc9G1/da9MDkEJpUKfcNNNG2IC1Nu0e10JQa1/dKEbT2KvCcCIagXDtT0Rq7hTz3AaieJwG8hnLL1hSCe20uAjxVa/uu1bHszYL3Ftf3OqMIjRFxv1nuU5BSpOE4CJwcAVQJjtuFh7T2x15GY3DDCayQ725xfW9zxMl8XhazDlNkMZjwQ1Sg4dXyeoxj2QeiIlv/jvJtvyfHm6iMhK+KeSxUKzAW4K4oX+vtssneRaVOvS3HXYDjWPa+jmVXhGUsyOdzgT8B81DBeOc7lr0zKk3pennm9+T676PaMl/iWPbBfcRFvTALHW6X5kelxHlnVIIh76eIpvFn0Vzfl/l9A+WTflpweopj2Vvn094MmIRyeVwO3C9ETZ/b+4DZjmXvpWuMpmDpWPZ0VObDr4E7ZU1+INd7Q17fB/4DeI5lH+pYdmU/ZK2MQ9Vv0KHF9b3b+1uok2fdCxWIeBFwE/Awqk7G+zJPr8v7l1ApZWc7lj0hG061QlwjHMs+HvgN8Ixc410Njy8LLv8A/MSx7KTx7KfInrsauBaYExQ0ymI1GOlY9ieBK2T/fSA04xW537PAbx3LPtGx7PHZ6AewO3CNjOsPwI+1+yVlT9uAL2u+FDBClKweViLHskfnsGCdZJjLX5KjosC9PEH2229Q7t0PBO8vyR55GZVm+j+y78m2R/S4JceyPy308ukQ3L8o8/c/hViHHMuucyz7WOAX2lgXCh4CujPPsewTHMuu768MtOEmABxhfF4szDKSdCaLxjy/ht655ogmezwqT/UEIfr/kk18lCEhjkDlp/8GuMGx7Km5zD6OZX8GVWTiKuATqOhac0yHoHzpdwNnOZZdFbJoxgCNwInAmahc7QtQ/skbZPNsY1x7N1Re9x2OZX+mmIUo50+id5GP+0vMKDJEiyIPxvU5VDri7wVnk0JO21lw+hfgesey9y5gDj4nxPBcVOCj7idPAPsLUU4LkQmDycA/gUtRMSmH0zsvPrjeVODrQpDO6getYbQwGR2eK1BIKiX8VoTY74gpeq8smtk44NPCjK91LHtc2DwJnmtl7u8QIXnHLNat/WU/fT+EPnxB9t3pqHiJE0P2cHC/rUWAuVsE/ikh99sBFXT5d+DXjmXvkQXPO6FSMmfI8QPHsrd3LPtEYWj3yL0OBkaZQkmRUCNMVofDQuiKvjaP1XhPuwhZK4jgQ9do5H6C+78KnnbNgqdjgF8JPZthFLAymf8Ewf3dQi93DrnmeKG9+xVgpRolfOFO4KtZxrqz3DMtON62P4SAYSEAaJNkapxvUEAKkZiJzDzyWsIzCszgpb1RbXzzwTHAhYEkH6K1f1II27SIw95eNMXjpQWwySRMP/VIoqXkTQKucyx7bBFMpUKYkwnNZUB/JuImnFHgvCIC3q2OZW8dcTNGJai7A5cGgqBx3cDVUyhj/gkq6LKcRGMk3bngPXA6QMFtY4r4zwkiOGerr3GhWPEKoY3mHjODhjdgxAQEmj+qeuS3Clg7p4rVZ2LI2gmzht0igs8ZhhCSoWfwXtE8xPW9ZcCThhKxS5b9OMVgrMtQtVeqC6D324ul47gCxrk38DsRPsLcJxWC+3NLaaUSuE6uW0W0iP0zRVGsyBYHFgsA4RM9wfhpCb0rTOUTIt4z/lNF72Il+YjBvwXpf0GlQ5lwomj2PQJ0HMueKJaFHYzzfdFg9gY+JeZFHbYCfi6aTlR4WyTdwFz4eMg5E1FBkMWsqzDp+Y2BWBtStOanIfPzKjBXNK/zhKiYBXL2FQ2iEPhA5vZ6sQi8FHJOSrQ1k3muQLmU7pL//lLGeAHwXdF2Lxbrlg47IjEXZWTGW9G7CtnCAdz6N4vV7Tax6lwu++B7MldOiHYKyhVXHcIEpsr/TOGyHWXyv1mY6Y0o19y9YklrDrHO5BRUJXbhKGCWcd5yYA4qgPZosRiZ+eWNYlmIgusjCC+ok3F9b20JcfFn4/PHs/SdOJqenU6XuL73RBQBQIqi1ch+PThEIfuV/PZlEfLeMs7ZGvieY9mTQsZ1HL0zlkC5ES6WNTVXhIi7yZPzr5n+v25cN4GyNM8SK9JBqMqR7xiXOFr2uukuKmnc3rBxAYgpzXyeFiKWZdU20gpDu68SU1I+eFG0i9GolMMgV3p7IeYZ45rTg9gC7d4Hidapw2Wu730GuFe6/v0b5fc0Ccc+wKFCWAJoC3n+tcAZru9NFkLyFTk+LgzJlFA/X4T0mQgzAZLFXB/12kW6IkCZZHczfv4LKpr9AmH880RKP5zecSDHO5a9WwRi+yCwp9yrEVXG+cuu7+2DinI2UxkPF21Gh0Uo0/9JQsi+C/xIiNAvUOmTc4R4mLnlu0vthXJZ2cL2waK+XrcPmo0ne+502Q+OWEKukLm6TDRf8wY1aNHm2v2/adCQhKyT7VHxRWfKGjlb1tRxwFeDAEgNWnNpenJujSFsADwia+cSlLvsftFK96J3lbkTHMversD52iwC09wQa2lf4e/GZwsYERKw+DHN8tAh2n82oSmbEGpWBX1XLA7ny16+QQSAKUCTKZgAqZBxfcKw4HaKsLcfKt7rCtmHX5c1d0FEfjLb+OkJlBX4atf3ml3fe9b1vcuBkw1FoQIVs2JadUeWEmnDKQtgTMgi2kjhaUjrQ0x6UQSl3wL/DILcNM2+07HsU2QD68T5QNFIW4Uo1AVWAQ2ed33PlsWakYUVBNFdjQpmazA0A13Y6AohRItd3wuk9XZtcXU4ln2PmAy/YJjOitEqTZ/nujxaeo0wNVNgqZZN/47re+8UaRk6ip5m9XdRkevrTZ+g63svOpZ9qWz+wJ9ci/KpXpbnls8B7+kNQbTrXiLrQO8fsAfKv/+BFtmcCTEh92IgkrXygCEw1hdiSi0QElmIT0uOcVai3B1mA5mE7NeVwIvF1DCXOe3Mtb9lnjY7lv2kCPZbG9Ytc52cZFxiPvA91/eWF7DWyEdzZL2NC9nvZ+lrUnuGVZKmrAfbHSJrJ58A1orysf8B1VRpvTE/pVofq4DHUP5/RMAY7/reOu1+O9AzxmEzKtaiEGveoagGYjqc5vre0rBUbceyZwpN1+ORjhPBY7Ocu6PQHtN6fI7re11ZUvjy1iWRoL9xhvJ1qet7r4fEIDzrWPaNYvGp0SwWR6CCW8sCwykIsCaL2a3QKkltFFfRKShw8uFi0YSADcDfjPOnGkwyCRxqnPN/YQRG3idE2tXhoxHGuTLb5nd9b40Qix5rJCyiNwKzqMhlAg2Bj6IajTxnHE/LZv1jkRrmtqg+Dz00Btf3XjBxZRD+VwxBOYp/vcIUQg0/7a3G+TuQJW4k1320sS4K0Y76W6jPVQxplGi1CwycLkBV5Py7aLdlAW2e1tM7Fsg0yW8Xwlj+VajQWQAcbXx+EliZLZ3X9b0/GYJFPb1dnmFwuut7R7m+d10gXJSpg2hbiBWg0VjLB9AzNmCV63v/KfA+ZmOo94HnwuZNvmsPETIOMfbJtiKM63BHNuZfgOXx48Z9npf9kA3+aVh6RhIeSxVbAEIgTGsqhpHXlFIw0hbP3fSsnT5amL5OvPc2/r4g20aVxflf4+sd85jSMoHFIUcWwlK6awYEzHwChTWPCdNiKyOY9vIRmGJggiFodQbzmo3YClFZbgg0uxRLNLX/PB4iMIzVz9FznOVzlWjL9bJekmKZqKSwgMZSQNh+qouwFnIJD0WXTw2po5DU9lWS7piFBvLHx+wSsnfeLoOmHIDpw14IrM1zn3dRWSIfMi4JFMs1h805LBWlFLY6HMt+yPh6JnClds9phtDyzyJuZQqMT6PiGXLtuycRV6bA7qj8++Cc8Sg3jw73lmC/72/w2BWBNSkL3XmNnm7SGsIzlWIBIATCTMy1RTDzMC2qFDWenw/5bry2CKvpHdU8x7Hst7Iw9QS900mimH8TeRbtBjmS2vmjinheU2BI6u6REChXSdikIVx0IcFBOZqVbHQs21xPdRI41pf+5GG51+PEz5cxTJiHofyouwnzGqUJAUExkZEhQnBZ6pHL+MLq2Y/uw2W7yOPuyMH8gzkbK8T9QJRZf7TMUz3dBWJGRRBAxxu0Yi2qR0W5mKaZTtkAXClFvrIJUhND9ns+33l/0vhFwmwDS+ShkkW0WlwepsD6x3x0KQS2CRGK8rl5zeDjsajsBf1zFHpdKJj42t+x7Muz7NGMrNNJxpxUlxNhw0YAcH1vSYjZdIwIAYU04tianpHOnZSm5ntYS9OgWlm2lJzPFXiPUnTCC/OrFlqfOyOWBJNo1+s+yBBrx+9E+BiJ8rlvXYLn2Yqe/r9MQNjzaJVhFow6iiz/KxCWERK0Pu3QKkxegIrvKFT6L9Z91Zf1lWuMG1DBWPvI+j81RGgtuH664CjjWPbRqLiMKRQeHJXIQwvbKW//CVPY34PeZuh8sJrB1X1yGSoQVndFNqLiD3aiZ/zLCtf3Hi9i7W4dsubzuReXZsO/BE2PziLMFA0S/2KuqclIZH9E6MhFq0oBw60QkElgJxCxZKMmPOxgaIwdRG9FW+hcJ/rAZLMt9EwZ5jVZhGb3dsj3k3PM/TpUFLaNStl7v0RjD2Mw1YNovwRNcwJz/9dQkcOTchCF5SjTrkkc6iJoun2BDSHC9JQc529GBaue7/reHJQLpM8CilYF8BqUmTUb89+EMqu+GLIv6geYVpViLS0fTM2FpMT1fENIDgKKd6NnavBfDbrbXuTayBRJA/LhorMMdKdQaM+iOMYCQBYwfeI7RNUMtI20YwgRe7sEYwvTZlvp9l+tzbIAgo55+Q5QpU/LAYUu5i56BtEFkDI2/YdzL8dm6bzYXkJBZoOhySXIkdaprYOaEMbb1770YfUkWjRiExSGMdefi/IZj3B9r9r1vW1c39sflaJm4qmcHcla6d2Wed9cc+n6Xrvre5tKRFR1OD9krz6KSqfaFahyfa/O9b09Uel77+RZ06ZlpzZQBMpUWGlTyJ5pj3h0yLoZjC2GX6ZnOtvHJTX1Y8Z5Nxr7rVgr1OgIfGzrbIKDZN1sjPifQoShMHdcIThuF/wuLieyhls3wMdQBXMC2F00qdeiNIGQGvmmlroReKEEYwuLdl6hp+HJotQJ01GGmWxIgMzlUpk73QJzFL0LhpQbWg1iWxHgOEcHtboQq8fmoKFIH2CfPGvAzAwB+Jzre/8w1+oACfBrRcjUI5MPyjK2soGU693PeP5ngM8YaWfBeGojWH1WGgS7HgkcLNMzmSbmO13fO6mIuRhsLYZfFYvL/hqP+SJwpMHEnyhy7MtChOqKPNcyrVSrDGF0VRZ6vaiPc2EKeb7re8cPJhwPNwvAPSHfnZqvUYo2uSl613h+2/W9KH6YTDbkBZKw8dNyQ4LfTO8yxLuVUQMpBIrRxluQaHsNZjiWXddfjS4CBmtYVyoDRpyjleiO9Aw2ygQaZDHj1v4zPURoueXpAAALxElEQVS4XJODUG0AHsrRBjjTz+tgDb2rGialzDL9uFa3oXdgZ7Pre+uyzFUn+YMN3wr5bpegsFYZnutl85mkEU3BwvZgAWFUHSg3wGbN0hL0xgjgtj6sXbPy5AHZ+Jg2l4eGXKNT+30NveO8Di/Bfn/TFColaLWg65YTx8NNAHiC3iUav4pWNlPPgzWKMYxHVRMzI95vioiwihyWhTpU+V+TAKw1mMGLxjnn6OOOsODKRYA3FvGfVahcbx3GI8V0cnQ0CxhbSZib63vv09uMtptj2fsH9w8ObaMdQM/KgR2iYebbjJkwvGgtYk0N7x16pxvqUI3h0zcq55luiq5yCgWSAfEsPV0qlcAPgy6X5h4zcFyqsSVCPlfn2Bdd9I6N6DKebQm93XCfEmEwa+vuPuy5x4zPhwJTw+avn/d6KeARuvPZE8L89fm/HegqkrGZdQN2AfbP0eCnjp5W4cBa1KHdf6kwax1OMmmvMd9tERj2c4alYSqqBkFOmt6fOB5OpYCDib/YIDZVqO5KFzmWPUnPsdaY0FGoOt/HGpddgqrpH0UKmyzSb4+FILnJ19C7o9tTBvFfF7K4j3Qs+/JAa44iIYacVwqiW3CqlgQE3RdiXvuKY9nXOpadq8JgBSUIitQ2z+OGOW5b4EdB200j735PVG3uemMOm6JYSqTok7m+dkL1BdgzRAjUfervhggA0038yrraKtBSNBhTLsKhXe8RVO61DtOAW6SLZY/5NHA8gtLEKKwy8JkApgb4DLnvASH7b3zIsz0Q8lyeY9m75Np3RVq0nqF3lsF1jmUfZs5frr0+mIQAbT6eRvXDCINlwEviey+GTj0SMm9XO5Y9OWRuEqgKraZb7V8GA38/xCKzn2PZPwrBc9Kx7ONQbo188A/juSYAPw1arUfBcch+aSslzoZTGmDw9gZUwR29tOM4VHT52Y5lLxJzX7sQzN2FGIQVCvma63ttEX0wX5Fc13eEmSdRJt2P0LszVgvwiDDJYPwdjmU/Qu+SpeehTOdBb/A2bWGMkWM7eYajgLe0sVZS3qjwfMLYw6jqd3rv7BpULfUTHMteLExvjTzPaLqLcmxbQiH2OhnDZG3uTgQecSw7jQpYHC+4OobeucYPSB+GfHCmCDabtbFNRAWjTjDG2gbcL9UXAwiLNbnWseyUjLFeW1N70Ltp1BHA1xzL/oXrey3l2F+u7y12LPs6YY4jtfk8ElVjfTkqaHaZaNnVMp+TCC+2k9eSEjKWFseyzSyRA4C/OZZ9C8p1sh0qQLEBlYJm7oNvOJb9qOt7z2j75Qp6lsEG1cjlKMeyg57wG+SZRss9xgEHB2nIcq2KCMLUem1dBrAPqlX0m4LvFs2qM1LuuTUqmv5mVDnrDYOJDmtz8B9Zp6Zl5l7CA55rIs7bSlT3U72Pwr7Ao1KIKMhC2F32sulWexZ4VhdApELi02IpTmjC92zHsk8V4aBDrIKTBBf1EfbMi45lP0XPNvUNwN1CzxdqtA9UvNRouccE4HHX987uqzK2RQgA2uJb7Vj22aia+Nsa2tR2chyURbrSYY7re+kCbl+Hav6Tz1wJquezb5qJRVP9MyrCWR/3ZDkyea6/Ez19mXVETIMsE7PocCz7AlRcxREG054gR0p7rkK1w4DB5hvHSsey56AKj1RpwlEQ86EHXyZChLUzDeKWDcaJ2TgKPBxYlzQtrlUI+xeMa/4kzxj1+bhQiIhtENdEifYXru9d71j2gcK8dMY6Wo4p9K79XwhOo6Sd3i6m3XoNn0cDn4w4V7sDtzmWfXoQaOv63nzHsm9FujRq150oR0OWZ9oZ1dVOx1kiz7psdyz7l8J0xhuWifHGvcL2++6ytzcMUnL8D+AbIQLAfXqgpgaj8ln9ZN7aHMu+GtVwbaph1Ztp4M7EwSbAc33v3RCB5XZU4yhdcayVe+wVcR1nQq57Dj3jZoIspEMRd0AOHIdp++NLiaRhFQOgEfznZIE8nWUSs6VLdYpE9nXX9+YWUTM7EXLosBnV4eu7umVBbxwkBPyOPOPOdv11IVJ1jfH/0RGeoaJI4h3GMFYKLu4ge0GlKOlr7Vn+Vx8iiIWN42axpqwPYU4VIWPoRAVlHuL63rIszL+YudkkZszTXN/boK8BSZebS++YhbAxttFd4z4Tco9cc5Iodn9pe+I8VOOStYSn9yUi4LUzZOxhlScTIWO5RUy57XnmKiNMcm2WNWRqVGejitl0FfBM5nybZa2rw9al63uvoDpxLo+41/X7FtPorK9QXQANfhRV/nyh0OHXBF8PG0JvACOJ2BLY9b03gC+Jxawrx5zpjHk58ANZN2Z8Dq7vvS3r+f0I9GkT4SmYo829IpbDzxIeR5ULxxnCq9uOLiVCh1saoI7Y5xzLPkI29OfErDxaNmegEXUIYlrEtPQk8EtZYIVGXwaRpHVy/UrZoJuFSC9D+ZG9sCYT2oJZJVHVtiycicLkamXMnUL8Nwlha5F7Pwa8FtJb/hmxegQEY36e51iBit4fpZkf1/WFYUj1v5Mcy24UfOxu4KJS7tNuPFur3Ps9IR5hxPufcr02lI/5zSzjSLi+d7Vj2Y+iCu3sL2MICE9GcNUic3AvcLHre2tzaP6viEm2UvBTq5kyq+mOPt8oa2AJcLvre7/KZlFwfe8Fx7JPRNUDCNICawQXG4WRLUH5qx+QPXyuWLW2kvGYWSsvokqbVmvPuLYvOJX3F4nGfL5oNGNEe9fT7oKc5k0y/lY5lqLy9peHCHqPo7rKBQLjM1ksEac4ln0xqj3reMFlhayFYE+/Jha3gBAfLxp6veBkg3HdTY5lHyP77yRt/wVld9uN53id3mVjH6HbjZcRRrU+yzP83bHs94Dviyl7rNCQKrpLJW/W7rlOmNS1hvsooEELNMGnhtJUBw1glcxni6z5ijBGqK2PU/K4CXR4W/Ac0LcuwktnB/9/UipB/i8qu2Ybbe0FeGqVMT+P6knwZNj9Nfpwh2PZ76Da/u4juKiVudwkc/kBqunRGlRb7g1yvy5zT2n072+OZR+uXXe00KoAxwHdC2jeepmPa0Ie/115nhEypjpgdbGZAgmGKYQ0CtlJTJPBhq6QCV8pG+pVrWBJXnOvY9krDHPMr1Ed/8YJgmtl464WpC0ImnZEqUkg72tR5TN3kjFXCdLXy8JeAnwQlvusvW4l/28NxpOrnr0EzgQaWBeq+MzyvuSihuBiojDtiXKvWiF0G4TArRSBaZHrexv1sZnBQxJdv6M8W3sObd2c221RzZe2E8bRJZv6PVRKWWuE56oTN0Y93fEYARHaStvUy1HtjN/Itb6M8SVQfvZAWGqX67whmqP535SYorcH7nF9702DIO8s62cd0CmCZp9SjELW6j5y/zEypxlNSF0l41/i+t6qPOujQvbqZlnvq/V4mZB7byMC0CR5xvVCqF8MuVfQknY7wc8/jTWmX3eECIo7aZamVlmfi1Gtn9eZGqVGb7rkHq2u77WGPKf5nymoQNHxsn465H5rRShdJFkt2eYhcJ8EFryKUuDZoCsTBCdVAZMNCegr9tr1spc/ZIQikOWbtyTKnbej7JWE/H8JKuBwUdj/wmhf8CyOZe8hpv8xso5XiYL1urY/xwmOKoE2iU3JxzemyHXHC/MOcLxG9seiYLwhYwrmKYj7Wi2C4bpiW2oPeygkSjZX+k2YAOBYdkY7vlHqsfT1OaOkmUS9ZymijYu9RinmotQ4KOezRD0nSrpYrt8HI05LuWajPqv5W6FzEyVtq5RrKWx8fZmfgdoLpaBJBdDrsu/PUuO41Ptgi7IA9INgYVoAvg382kwFjCGGGGKIIYbBCBXxFMQQQwwxxBBDLADEEEMMMcQQQwyxABCDCZqvpTqejRhiiCGGGGIBYAsBLcIzb65yDDHEEEMMMcQCwPADM02sjf7vzhZDDDHEEEMMsQDQH6C5AD4GzEA1EDoa+Cv9X5krhhhiiCGGGGIYACEghhhiiCGGGGKIIYYYYoghhhhiiCGGGGKIIYYYBin8f6QOdVsixkBdAAAAAElFTkSuQmCC" align="left">
# Glaciers in a changing climate
As we have discussed, glaciers change their shape in response to long-term trends in climate. If there was no long-term trend in the climate, that is if the environmental conditions were more or less stable, a glacier could eventually reach an equilibrium with its local climate.
In this notebook, we will investigate equilibrium states for different climates. We will also see what happens when our glacier experiences an abrupt change in climate and when it experiences a long-term climate trend.
First, we have to import all the necessary modules:
```
# The commands below are just importing the necessary modules and functions
# Plot defaults
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (9, 6) # Default plot size
# Scientific packages
import numpy as np
import pandas as pd
# Constants
from oggm import cfg
cfg.initialize()
# OGGM models
from oggm.core.massbalance import LinearMassBalance
from oggm.core.flowline import FluxBasedModel, RectangularBedFlowline, TrapezoidalBedFlowline, ParabolicBedFlowline
# There are several solvers in OGGM core. We use the default one for this experiment
from functools import partial
FlowlineModel = partial(FluxBasedModel, min_dt=0, cfl_number=0.01)
# OGGM Edu helper functions
import oggm_edu as edu
import CdeC as cdec
```
## The essentials
We follow similar steps to what we did in [glacier_bed_slope](1_glacier_bed_slope.ipynb) to set up the basic model.
```
# define horizontal resolution of the model:
# nx: number of grid points
# map_dx: grid point spacing in meters
nx = 200
map_dx = 100
# define glacier slope and how high up it is
slope = 0.1
top = 3400 #m, the peak elevation
initial_width = 300 #width in meters
# create a linear bedrock profile from top to bottom
bottom = top - nx*map_dx*slope #m, elevation of the bottom of the incline based on the slope we defined
bed_h, surface_h = edu.define_linear_bed(top, bottom, nx)
# ask the model to calculate the distance from the top to the bottom of the glacier in km
distance_along_glacier = edu.distance_along_glacier(nx, map_dx)
# Now describe the widths in "grid points" for the model, based on grid point spacing map_dx
widths = np.zeros(nx) + initial_width/map_dx
# Define our bed
init_flowline = RectangularBedFlowline(surface_h=surface_h, bed_h=bed_h, widths=widths, map_dx=map_dx)
# ELA at 3000 m a.s.l., gradient 4 mm m-1
initial_ELA = 3000
mb_model = LinearMassBalance(initial_ELA, grad=4)
spinup_model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0.)
# Run
spinup_model.run_until_equilibrium(rate=0.006)
# Plot our glacier
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Glacier after {:.0f} years'.format(spinup_model.yr), color='LightSkyBlue')
edu.plot_xz_bed(x=distance_along_glacier, bed=bed_h)
plt.xlabel('Distance [km]')
plt.ylabel('Altitude [m]')
plt.legend(loc='best');
```
## Equilibrium state
A glacier is in equilibrium with its climate when it accumulates (approximately) as much mass as it loses each year.
What do you think: does a glacier in equilibrium change its shape and size or not?
**Dicuss: what does the "equilibrium line" mean?**
### Activity: formulate a hypothesis.
_The effect of the ELA on the volume of the glacier in equilibrium is ..._
The results of our study in [notebook 2](2_mass_balance.ipynb) can inform your intuition here.
Now we can modify the ELA by changing the value `test_ELA` in the cell below. What happens when we lower the ELA? And when we raise it?
```
test_ELA = 3000
test_grad = 4 #mm/m
test_mb_model = LinearMassBalance(test_ELA, grad=test_grad)
test_glacier = FlowlineModel(init_flowline, mb_model=test_mb_model, y0=0.)
test_glacier.run_until_equilibrium(rate=0.006)
print('After {:.0f} years, test glacier stores {:.3f} km3 of ice.'.format(test_glacier.yr, test_glacier.volume_km3))
## Plot the glacier to explore
plt.plot(distance_along_glacier, test_glacier.fls[-1].surface_h, label='Test glacier after {:.0f} years'.format(test_glacier.yr), color='Blue')
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Basic glacier after {:.0f} years'.format(spinup_model.yr), color='LightSkyBlue')
edu.plot_xz_bed(x=distance_along_glacier, bed=bed_h)
plt.xlabel('Distance [km]')
plt.ylabel('Altitude [m]')
plt.legend(loc='best');
```
**Do the results of this experiment support your hypothesis or not? Discuss with a partner.**
### Response time
As we discussed, glaciers respond to their climate for many years. Test the cells in the "Equilibrium state" section again. The model reports: "After # years, test glacier stores # km3 of ice". Is the number of years the same for each new ELA you try?
The time it takes for the glacier to reach equilibrium is called the "response time". Response time depends on a number of factors, including the shape of the glacier and the climate it is adjusting to. You can read more [here](http://www.antarcticglaciers.org/glacier-processes/glacier-response-time/).
One consequence of response time variability is that different glaciers will respond differently to the same change in climate. We will learn more about this in [notebook 5, "Differing reactions"](5_differing_reactions.ipynb).
## Abrupt change in mass balance
We have seen the equilibrium states for various equilibrium line altitudes. Now, we are going to investigate what happens when there is an abrupt change in the climate.
We know that the glacier takes years (centuries!) to reach equilibrium with a new climate. How will our glacier change before it reaches equilibrium?
We are going to do a simulation with a change of climate, which we apply at a specific time (`change_time` below).
```
# Set-up based on model we used for spinup
model = FlowlineModel(spinup_model.fls, mb_model=mb_model, y0=0.)
model.run_until_equilibrium(rate=0.006)
# Time
yrs = np.arange(0, 201, 5, dtype=np.float32)
nsteps = len(yrs)
change_time = 50 # when in the simulation we want to apply the change in climate
# Output containers
ela = np.zeros(nsteps)
length = np.zeros(nsteps)
area = np.zeros(nsteps)
volume = np.zeros(nsteps)
# Loop
current_ELA = initial_ELA
change = -200 #m, the amount by which we want the initial ELA to change
for i, yr in enumerate(yrs):
model.run_until(spinup_model.yr + yr)
if yr >= change_time:
current_ELA = initial_ELA + change
model.mb_model = LinearMassBalance(current_ELA, grad=4)
ela[i] = current_ELA
length[i] = model.length_m
area[i] = model.area_km2
volume[i] = model.volume_km3
plt.figure(figsize=(9,6))
# Get the modelled flowline (model.fls[-1]) and plot its new surface
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Initial glacier after {} years in prior equilibrium'.format(spinup_model.yr), color='LightSkyBlue')
plt.plot(distance_along_glacier, model.fls[-1].surface_h, label='Perturbed glacier after {} years'.format(yrs[-1]), color='C1')
# Plot the equilibrium line altitudes
plt.axhline(ela[0], linestyle='--', color='LightSkyBlue', linewidth=0.8, label='ELA initial glacier')
plt.axhline(ela[-1], linestyle='--', color='C1', linewidth=0.8, label='Changed ELA')
plt.title('Grad={}'.format(model.mb_model.grad))
# Add the bedrock and axes labels:
edu.plot_xz_bed(x=distance_along_glacier, bed = bed_h)
```
## Slow and linear climate change
Now, we will apply a slow and steady change to the climate of our glacier, and examine how its volume changes over 200 years.
**At the end of the simulation, do you think that the glacier exposed to slow climate change will have more or less change in volume than the glacier exposed to abrupt change?** Why?
We define and run our simulation:
```
# Set-up
initial_ELA = 3000
mb_model = LinearMassBalance(initial_ELA, grad=4)
linvar_model = FlowlineModel(spinup_model.fls, mb_model=mb_model, y0=0.)
linvar_model.run_until_equilibrium(rate=0.006)
# Time
yrs = np.arange(0, 201, 5, dtype=np.float32)
nsteps = len(yrs)
# Output containers
linvar_ela = np.zeros(nsteps)
linvar_length = np.zeros(nsteps)
linvar_area = np.zeros(nsteps)
linvar_volume = np.zeros(nsteps)
# Loop
current_ELA = initial_ELA
dH = -10 #m, the change we apply at each step
for i, yr in enumerate(yrs):
linvar_model.run_until(yr)
current_ELA += dH #modify the ELA
linvar_model.mb_model = LinearMassBalance(current_ELA, grad=4)
linvar_ela[i] = current_ELA
linvar_length[i] = linvar_model.length_m
linvar_area[i] = linvar_model.area_km2
linvar_volume[i] = linvar_model.volume_km3
```
We make a graph of the volume of the glacier throughout the simulation:
```
f = plt.figure()
ax1 = plt.axes()
ax1.plot(yrs, linvar_volume)
ax1.set_xlabel('Years of simulation')
ax1.set_ylabel('Ice volume [km3]');
```
## Difference in water storage
Now, we use the `ice_to_freshwater` function to convert the volume of ice that our glacier is storing into liters of fresh water.
```
water_vols = [cdec.ice_to_freshwater(v) for v in linvar_volume]
f2 = plt.figure()
ax2 = plt.axes()
ax2.plot(yrs, water_vols)
ax2.set_xlabel('Years of simulation')
ax2.set_ylabel('Water volume [L]');
```
What is the significance of this result? Discuss your findings with a partner.
|
github_jupyter
|
# The commands below are just importing the necessary modules and functions
# Plot defaults
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (9, 6) # Default plot size
# Scientific packages
import numpy as np
import pandas as pd
# Constants
from oggm import cfg
cfg.initialize()
# OGGM models
from oggm.core.massbalance import LinearMassBalance
from oggm.core.flowline import FluxBasedModel, RectangularBedFlowline, TrapezoidalBedFlowline, ParabolicBedFlowline
# There are several solvers in OGGM core. We use the default one for this experiment
from functools import partial
FlowlineModel = partial(FluxBasedModel, min_dt=0, cfl_number=0.01)
# OGGM Edu helper functions
import oggm_edu as edu
import CdeC as cdec
# define horizontal resolution of the model:
# nx: number of grid points
# map_dx: grid point spacing in meters
nx = 200
map_dx = 100
# define glacier slope and how high up it is
slope = 0.1
top = 3400 #m, the peak elevation
initial_width = 300 #width in meters
# create a linear bedrock profile from top to bottom
bottom = top - nx*map_dx*slope #m, elevation of the bottom of the incline based on the slope we defined
bed_h, surface_h = edu.define_linear_bed(top, bottom, nx)
# ask the model to calculate the distance from the top to the bottom of the glacier in km
distance_along_glacier = edu.distance_along_glacier(nx, map_dx)
# Now describe the widths in "grid points" for the model, based on grid point spacing map_dx
widths = np.zeros(nx) + initial_width/map_dx
# Define our bed
init_flowline = RectangularBedFlowline(surface_h=surface_h, bed_h=bed_h, widths=widths, map_dx=map_dx)
# ELA at 3000 m a.s.l., gradient 4 mm m-1
initial_ELA = 3000
mb_model = LinearMassBalance(initial_ELA, grad=4)
spinup_model = FlowlineModel(init_flowline, mb_model=mb_model, y0=0.)
# Run
spinup_model.run_until_equilibrium(rate=0.006)
# Plot our glacier
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Glacier after {:.0f} years'.format(spinup_model.yr), color='LightSkyBlue')
edu.plot_xz_bed(x=distance_along_glacier, bed=bed_h)
plt.xlabel('Distance [km]')
plt.ylabel('Altitude [m]')
plt.legend(loc='best');
test_ELA = 3000
test_grad = 4 #mm/m
test_mb_model = LinearMassBalance(test_ELA, grad=test_grad)
test_glacier = FlowlineModel(init_flowline, mb_model=test_mb_model, y0=0.)
test_glacier.run_until_equilibrium(rate=0.006)
print('After {:.0f} years, test glacier stores {:.3f} km3 of ice.'.format(test_glacier.yr, test_glacier.volume_km3))
## Plot the glacier to explore
plt.plot(distance_along_glacier, test_glacier.fls[-1].surface_h, label='Test glacier after {:.0f} years'.format(test_glacier.yr), color='Blue')
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Basic glacier after {:.0f} years'.format(spinup_model.yr), color='LightSkyBlue')
edu.plot_xz_bed(x=distance_along_glacier, bed=bed_h)
plt.xlabel('Distance [km]')
plt.ylabel('Altitude [m]')
plt.legend(loc='best');
# Set-up based on model we used for spinup
model = FlowlineModel(spinup_model.fls, mb_model=mb_model, y0=0.)
model.run_until_equilibrium(rate=0.006)
# Time
yrs = np.arange(0, 201, 5, dtype=np.float32)
nsteps = len(yrs)
change_time = 50 # when in the simulation we want to apply the change in climate
# Output containers
ela = np.zeros(nsteps)
length = np.zeros(nsteps)
area = np.zeros(nsteps)
volume = np.zeros(nsteps)
# Loop
current_ELA = initial_ELA
change = -200 #m, the amount by which we want the initial ELA to change
for i, yr in enumerate(yrs):
model.run_until(spinup_model.yr + yr)
if yr >= change_time:
current_ELA = initial_ELA + change
model.mb_model = LinearMassBalance(current_ELA, grad=4)
ela[i] = current_ELA
length[i] = model.length_m
area[i] = model.area_km2
volume[i] = model.volume_km3
plt.figure(figsize=(9,6))
# Get the modelled flowline (model.fls[-1]) and plot its new surface
plt.plot(distance_along_glacier, spinup_model.fls[-1].surface_h, label='Initial glacier after {} years in prior equilibrium'.format(spinup_model.yr), color='LightSkyBlue')
plt.plot(distance_along_glacier, model.fls[-1].surface_h, label='Perturbed glacier after {} years'.format(yrs[-1]), color='C1')
# Plot the equilibrium line altitudes
plt.axhline(ela[0], linestyle='--', color='LightSkyBlue', linewidth=0.8, label='ELA initial glacier')
plt.axhline(ela[-1], linestyle='--', color='C1', linewidth=0.8, label='Changed ELA')
plt.title('Grad={}'.format(model.mb_model.grad))
# Add the bedrock and axes labels:
edu.plot_xz_bed(x=distance_along_glacier, bed = bed_h)
# Set-up
initial_ELA = 3000
mb_model = LinearMassBalance(initial_ELA, grad=4)
linvar_model = FlowlineModel(spinup_model.fls, mb_model=mb_model, y0=0.)
linvar_model.run_until_equilibrium(rate=0.006)
# Time
yrs = np.arange(0, 201, 5, dtype=np.float32)
nsteps = len(yrs)
# Output containers
linvar_ela = np.zeros(nsteps)
linvar_length = np.zeros(nsteps)
linvar_area = np.zeros(nsteps)
linvar_volume = np.zeros(nsteps)
# Loop
current_ELA = initial_ELA
dH = -10 #m, the change we apply at each step
for i, yr in enumerate(yrs):
linvar_model.run_until(yr)
current_ELA += dH #modify the ELA
linvar_model.mb_model = LinearMassBalance(current_ELA, grad=4)
linvar_ela[i] = current_ELA
linvar_length[i] = linvar_model.length_m
linvar_area[i] = linvar_model.area_km2
linvar_volume[i] = linvar_model.volume_km3
f = plt.figure()
ax1 = plt.axes()
ax1.plot(yrs, linvar_volume)
ax1.set_xlabel('Years of simulation')
ax1.set_ylabel('Ice volume [km3]');
water_vols = [cdec.ice_to_freshwater(v) for v in linvar_volume]
f2 = plt.figure()
ax2 = plt.axes()
ax2.plot(yrs, water_vols)
ax2.set_xlabel('Years of simulation')
ax2.set_ylabel('Water volume [L]');
| 0.844056 | 0.324048 |
## Parent Satisfaction Classification problem
Based on the students details we predict the parent satisfaction classification yes or no. In here we handle class imbalance problem. Its binary classification problem. In this notebook we did these following steps.
### 1. Importing Libraries
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
```
### 2. Loading Datasets
```
# Load the datasets
file = "datasets/xAPI-Edu-Data.csv"
df = pd.read_csv(file)
```
### 3. Explore Datasets
```
df.head()
df.columns
df.shape
# Checking missing values
df.isna().sum().sum()
```
### 4. Exploratory Data Analysis
```
plt.title('Gender of the student', fontsize=22)
sns.countplot(x=df.gender);
bar = df['NationalITy'].value_counts().sort_values(ascending=False)
ax = bar.plot(kind='barh', figsize=(15,8), color="blue", fontsize=13);
ax.set_alpha(0.5)
ax.set_title("Nationality of the students", fontsize=22)
ax.set_ylabel("Nationality", fontsize=15);
for i, v in enumerate(bar):
ax.text(v + 3, i + .10, str(v))
plt.show()
bar = df['PlaceofBirth'].value_counts().sort_values(ascending=False)
ax = bar.plot(kind='barh', figsize=(15,8), color="blue", fontsize=13);
ax.set_alpha(0.5)
ax.set_title("Place of Birth", fontsize=22)
ax.set_ylabel("Birt place", fontsize=15);
for i, v in enumerate(bar):
ax.text(v + 3, i + .10, str(v))
plt.show()
df['Discussion'].hist();
df['raisedhands'].hist();
df['VisITedResources'].hist();
df['AnnouncementsView'].hist();
df['ParentschoolSatisfaction'].value_counts(normalize=True)
df['ParentschoolSatisfaction'].value_counts()
# label 0 and 1
df['ParentschoolSatisfaction'].replace(['Bad','Good'],[1, 0],inplace=True)
```
### 5. Input and Target Columns
```
# Sperate the data, features and label
features_df = df.drop('ParentschoolSatisfaction', axis='columns')
labels_df = df.ParentschoolSatisfaction
#sample data for our pridict purpose
sample_data = features_df.sample(1)
```
### 6. Encode categorical columns to one-hot vectors
```
# Encoder
numeric_cols = features_df.columns[features_df.dtypes != "object"].values
print(numeric_cols)
categori_cols = features_df.columns[features_df.dtypes == "object"].values
print(categori_cols)
features_df[categori_cols].nunique()
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')
encoder.fit(features_df[categori_cols])
encoder.categories_
encoded_cols = list(encoder.get_feature_names(categori_cols))
print(encoded_cols)
features_df[encoded_cols] = encoder.transform(features_df[categori_cols])
features_df.head()
features_df.shape
```
### 7. Train Test Split
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features_df, labels_df, stratify = labels_df, test_size=0.2, random_state=42)
print(X_train.shape)
print(X_test.shape)
y_train.value_counts(normalize=True)
y_test.value_counts(normalize=True)
numeric_cols = X_train.select_dtypes(include=np.number).columns.tolist()
categorical_cols = X_train.select_dtypes('category').columns.tolist()
X_train = X_train[numeric_cols]
X_test = X_test[numeric_cols]
```
### 8. Model training
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
# Random forrest
model = RandomForestClassifier(random_state = 22, max_depth = 5, class_weight='balanced')
model.fit(X_train, y_train)
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
train_acc = accuracy_score(y_train, train_pred)
test_acc = accuracy_score(y_test, test_pred)
print(f"Train Accuracy = {train_acc} ")
print(f"Test Accuracy = {test_acc} ")
print(classification_report(y_train, train_pred, labels=[0, 1]))
print(classification_report(y_test, test_pred, labels=[0, 1]))
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, test_pred)
print(cm)
accuracy_score(y_test, test_pred)
plot_confusion_matrix(model, X_test, y_test)
plt.show()
```
### 9. Model Store
```
import pickle
filename = 'saved_model/parent_satisfiction_model.pkl'
pickle.dump(model, open(filename, 'wb'))
```
### 10. Model load and pridict the value
```
# load the model from disk
train_model = pickle.load(open(filename, 'rb'))
from sklearn.preprocessing import OneHotEncoder
def predict_the_result(input_data, model_name):
""" Predict the result based on the input values"""
numeric_cols = input_data.columns[input_data.dtypes != "object"].values
categori_cols = input_data.columns[input_data.dtypes == "object"].values
encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')
encoder.fit(features_df[categori_cols])
encoded_cols = list(encoder.get_feature_names(categori_cols))
input_data[encoded_cols] = encoder.transform(input_data[categori_cols])
numeric_cols = input_data.select_dtypes(include=np.number).columns.tolist()
categorical_cols = input_data.select_dtypes('category').columns.tolist()
input_data = input_data[numeric_cols]
return model_name.predict(input_data)
sample_data
predict_the_result(sample_data, train_model)
```
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
# Load the datasets
file = "datasets/xAPI-Edu-Data.csv"
df = pd.read_csv(file)
df.head()
df.columns
df.shape
# Checking missing values
df.isna().sum().sum()
plt.title('Gender of the student', fontsize=22)
sns.countplot(x=df.gender);
bar = df['NationalITy'].value_counts().sort_values(ascending=False)
ax = bar.plot(kind='barh', figsize=(15,8), color="blue", fontsize=13);
ax.set_alpha(0.5)
ax.set_title("Nationality of the students", fontsize=22)
ax.set_ylabel("Nationality", fontsize=15);
for i, v in enumerate(bar):
ax.text(v + 3, i + .10, str(v))
plt.show()
bar = df['PlaceofBirth'].value_counts().sort_values(ascending=False)
ax = bar.plot(kind='barh', figsize=(15,8), color="blue", fontsize=13);
ax.set_alpha(0.5)
ax.set_title("Place of Birth", fontsize=22)
ax.set_ylabel("Birt place", fontsize=15);
for i, v in enumerate(bar):
ax.text(v + 3, i + .10, str(v))
plt.show()
df['Discussion'].hist();
df['raisedhands'].hist();
df['VisITedResources'].hist();
df['AnnouncementsView'].hist();
df['ParentschoolSatisfaction'].value_counts(normalize=True)
df['ParentschoolSatisfaction'].value_counts()
# label 0 and 1
df['ParentschoolSatisfaction'].replace(['Bad','Good'],[1, 0],inplace=True)
# Sperate the data, features and label
features_df = df.drop('ParentschoolSatisfaction', axis='columns')
labels_df = df.ParentschoolSatisfaction
#sample data for our pridict purpose
sample_data = features_df.sample(1)
# Encoder
numeric_cols = features_df.columns[features_df.dtypes != "object"].values
print(numeric_cols)
categori_cols = features_df.columns[features_df.dtypes == "object"].values
print(categori_cols)
features_df[categori_cols].nunique()
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')
encoder.fit(features_df[categori_cols])
encoder.categories_
encoded_cols = list(encoder.get_feature_names(categori_cols))
print(encoded_cols)
features_df[encoded_cols] = encoder.transform(features_df[categori_cols])
features_df.head()
features_df.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features_df, labels_df, stratify = labels_df, test_size=0.2, random_state=42)
print(X_train.shape)
print(X_test.shape)
y_train.value_counts(normalize=True)
y_test.value_counts(normalize=True)
numeric_cols = X_train.select_dtypes(include=np.number).columns.tolist()
categorical_cols = X_train.select_dtypes('category').columns.tolist()
X_train = X_train[numeric_cols]
X_test = X_test[numeric_cols]
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix
# Random forrest
model = RandomForestClassifier(random_state = 22, max_depth = 5, class_weight='balanced')
model.fit(X_train, y_train)
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
train_acc = accuracy_score(y_train, train_pred)
test_acc = accuracy_score(y_test, test_pred)
print(f"Train Accuracy = {train_acc} ")
print(f"Test Accuracy = {test_acc} ")
print(classification_report(y_train, train_pred, labels=[0, 1]))
print(classification_report(y_test, test_pred, labels=[0, 1]))
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, test_pred)
print(cm)
accuracy_score(y_test, test_pred)
plot_confusion_matrix(model, X_test, y_test)
plt.show()
import pickle
filename = 'saved_model/parent_satisfiction_model.pkl'
pickle.dump(model, open(filename, 'wb'))
# load the model from disk
train_model = pickle.load(open(filename, 'rb'))
from sklearn.preprocessing import OneHotEncoder
def predict_the_result(input_data, model_name):
""" Predict the result based on the input values"""
numeric_cols = input_data.columns[input_data.dtypes != "object"].values
categori_cols = input_data.columns[input_data.dtypes == "object"].values
encoder = OneHotEncoder(sparse=False, handle_unknown='ignore')
encoder.fit(features_df[categori_cols])
encoded_cols = list(encoder.get_feature_names(categori_cols))
input_data[encoded_cols] = encoder.transform(input_data[categori_cols])
numeric_cols = input_data.select_dtypes(include=np.number).columns.tolist()
categorical_cols = input_data.select_dtypes('category').columns.tolist()
input_data = input_data[numeric_cols]
return model_name.predict(input_data)
sample_data
predict_the_result(sample_data, train_model)
| 0.772445 | 0.946498 |
*Note: This is not yet ready, but shows the direction I'm leaning in for Fourth Edition Search.*
# State-Space Search
This notebook describes several state-space search algorithms, and how they can be used to solve a variety of problems. We start with a simple algorithm and a simple domain: finding a route from city to city. Later we will explore other algorithms and domains.
## The Route-Finding Domain
Like all state-space search problems, in a route-finding problem you will be given:
- A start state (for example, `'A'` for the city Arad).
- A goal state (for example, `'B'` for the city Bucharest).
- Actions that can change state (for example, driving from `'A'` to `'S'`).
You will be asked to find:
- A path from the start state, through intermediate states, to the goal state.
We'll use this map:
<img src="http://robotics.cs.tamu.edu/dshell/cs625/images/map.jpg" height="366" width="603">
A state-space search problem can be represented by a *graph*, where the vertexes of the graph are the states of the problem (in this case, cities) and the edges of the graph are the actions (in this case, driving along a road).
We'll represent a city by its single initial letter.
We'll represent the graph of connections as a `dict` that maps each city to a list of the neighboring cities (connected by a road). For now we don't explicitly represent the actions, nor the distances
between cities.
```
romania = {
'A': ['Z', 'T', 'S'],
'B': ['F', 'P', 'G', 'U'],
'C': ['D', 'R', 'P'],
'D': ['M', 'C'],
'E': ['H'],
'F': ['S', 'B'],
'G': ['B'],
'H': ['U', 'E'],
'I': ['N', 'V'],
'L': ['T', 'M'],
'M': ['L', 'D'],
'N': ['I'],
'O': ['Z', 'S'],
'P': ['R', 'C', 'B'],
'R': ['S', 'C', 'P'],
'S': ['A', 'O', 'F', 'R'],
'T': ['A', 'L'],
'U': ['B', 'V', 'H'],
'V': ['U', 'I'],
'Z': ['O', 'A']}
```
Suppose we want to get from `A` to `B`. Where can we go from the start state, `A`?
```
romania['A']
```
We see that from `A` we can get to any of the three cities `['Z', 'T', 'S']`. Which should we choose? *We don't know.* That's the whole point of *search*: we don't know which immediate action is best, so we'll have to explore, until we find a *path* that leads to the goal.
How do we explore? We'll start with a simple algorithm that will get us from `A` to `B`. We'll keep a *frontier*—a collection of not-yet-explored states—and expand the frontier outward until it reaches the goal. To be more precise:
- Initially, the only state in the frontier is the start state, `'A'`.
- Until we reach the goal, or run out of states in the frontier to explore, do the following:
- Remove the first state from the frontier. Call it `s`.
- If `s` is the goal, we're done. Return the path to `s`.
- Otherwise, consider all the neighboring states of `s`. For each one:
- If we have not previously explored the state, add it to the end of the frontier.
- Also keep track of the previous state that led to this new neighboring state; we'll need this to reconstruct the path to the goal, and to keep us from re-visiting previously explored states.
# A Simple Search Algorithm: `breadth_first`
The function `breadth_first` implements this strategy:
```
from collections import deque # Doubly-ended queue: pop from left, append to right.
def breadth_first(start, goal, neighbors):
"Find a shortest sequence of states from start to the goal."
frontier = deque([start]) # A queue of states
previous = {start: None} # start has no previous state; other states will
while frontier:
s = frontier.popleft()
if s == goal:
return path(previous, s)
for s2 in neighbors[s]:
if s2 not in previous:
frontier.append(s2)
previous[s2] = s
def path(previous, s):
"Return a list of states that lead to state s, according to the previous dict."
return [] if (s is None) else path(previous, previous[s]) + [s]
```
A couple of things to note:
1. We always add new states to the end of the frontier queue. That means that all the states that are adjacent to the start state will come first in the queue, then all the states that are two steps away, then three steps, etc.
That's what we mean by *breadth-first* search.
2. We recover the path to an `end` state by following the trail of `previous[end]` pointers, all the way back to `start`.
The dict `previous` is a map of `{state: previous_state}`.
3. When we finally get an `s` that is the goal state, we know we have found a shortest path, because any other state in the queue must correspond to a path that is as long or longer.
3. Note that `previous` contains all the states that are currently in `frontier` as well as all the states that were in `frontier` in the past.
4. If no path to the goal is found, then `breadth_first` returns `None`. If a path is found, it returns the sequence of states on the path.
Some examples:
```
breadth_first('A', 'B', romania)
breadth_first('L', 'N', romania)
breadth_first('N', 'L', romania)
breadth_first('E', 'E', romania)
```
Now let's try a different kind of problem that can be solved with the same search function.
## Word Ladders Problem
A *word ladder* problem is this: given a start word and a goal word, find the shortest way to transform the start word into the goal word by changing one letter at a time, such that each change results in a word. For example starting with `green` we can reach `grass` in 7 steps:
`green` → `greed` → `treed` → `trees` → `tress` → `cress` → `crass` → `grass`
We will need a dictionary of words. We'll use 5-letter words from the [Stanford GraphBase](http://www-cs-faculty.stanford.edu/~uno/sgb.html) project for this purpose. Let's get that file from aimadata.
```
from search import *
sgb_words = DataFile("EN-text/sgb-words.txt")
```
We can assign `WORDS` to be the set of all the words in this file:
```
WORDS = set(sgb_words.read().split())
len(WORDS)
```
And define `neighboring_words` to return the set of all words that are a one-letter change away from a given `word`:
```
def neighboring_words(word):
"All words that are one letter away from this word."
neighbors = {word[:i] + c + word[i+1:]
for i in range(len(word))
for c in 'abcdefghijklmnopqrstuvwxyz'
if c != word[i]}
return neighbors & WORDS
```
For example:
```
neighboring_words('hello')
neighboring_words('world')
```
Now we can create `word_neighbors` as a dict of `{word: {neighboring_word, ...}}`:
```
word_neighbors = {word: neighboring_words(word)
for word in WORDS}
```
Now the `breadth_first` function can be used to solve a word ladder problem:
```
breadth_first('green', 'grass', word_neighbors)
breadth_first('smart', 'brain', word_neighbors)
breadth_first('frown', 'smile', word_neighbors)
```
# More General Search Algorithms
Now we'll embelish the `breadth_first` algorithm to make a family of search algorithms with more capabilities:
1. We distinguish between an *action* and the *result* of an action.
3. We allow different measures of the cost of a solution (not just the number of steps in the sequence).
4. We search through the state space in an order that is more likely to lead to an optimal solution quickly.
Here's how we do these things:
1. Instead of having a graph of neighboring states, we instead have an object of type *Problem*. A Problem
has one method, `Problem.actions(state)` to return a collection of the actions that are allowed in a state,
and another method, `Problem.result(state, action)` that says what happens when you take an action.
2. We keep a set, `explored` of states that have already been explored. We also have a class, `Frontier`, that makes it efficient to ask if a state is on the frontier.
3. Each action has a cost associated with it (in fact, the cost can vary with both the state and the action).
4. The `Frontier` class acts as a priority queue, allowing the "best" state to be explored next.
We represent a sequence of actions and resulting states as a linked list of `Node` objects.
The algorithm `breadth_first_search` is basically the same as `breadth_first`, but using our new conventions:
```
def breadth_first_search(problem):
"Search for goal; paths with least number of steps first."
if problem.is_goal(problem.initial):
return Node(problem.initial)
frontier = FrontierQ(Node(problem.initial), LIFO=False)
explored = set()
while frontier:
node = frontier.pop()
explored.add(node.state)
for action in problem.actions(node.state):
child = node.child(problem, action)
if child.state not in explored and child.state not in frontier:
if problem.is_goal(child.state):
return child
frontier.add(child)
```
Next is `uniform_cost_search`, in which each step can have a different cost, and we still consider first one os the states with minimum cost so far.
```
def uniform_cost_search(problem, costfn=lambda node: node.path_cost):
frontier = FrontierPQ(Node(problem.initial), costfn)
explored = set()
while frontier:
node = frontier.pop()
if problem.is_goal(node.state):
return node
explored.add(node.state)
for action in problem.actions(node.state):
child = node.child(problem, action)
if child.state not in explored and child not in frontier:
frontier.add(child)
elif child in frontier and frontier.cost[child] < child.path_cost:
frontier.replace(child)
```
Finally, `astar_search` in which the cost includes an estimate of the distance to the goal as well as the distance travelled so far.
```
def astar_search(problem, heuristic):
costfn = lambda node: node.path_cost + heuristic(node.state)
return uniform_cost_search(problem, costfn)
```
# Search Tree Nodes
The solution to a search problem is now a linked list of `Node`s, where each `Node`
includes a `state` and the `path_cost` of getting to the state. In addition, for every `Node` except for the first (root) `Node`, there is a previous `Node` (indicating the state that lead to this `Node`) and an `action` (indicating the action taken to get here).
```
class Node(object):
"""A node in a search tree. A search tree is spanning tree over states.
A Node contains a state, the previous node in the tree, the action that
takes us from the previous state to this state, and the path cost to get to
this state. If a state is arrived at by two paths, then there are two nodes
with the same state."""
def __init__(self, state, previous=None, action=None, step_cost=1):
"Create a search tree Node, derived from a previous Node by an action."
self.state = state
self.previous = previous
self.action = action
self.path_cost = 0 if previous is None else (previous.path_cost + step_cost)
def __repr__(self): return "<Node {}: {}>".format(self.state, self.path_cost)
def __lt__(self, other): return self.path_cost < other.path_cost
def child(self, problem, action):
"The Node you get by taking an action from this Node."
result = problem.result(self.state, action)
return Node(result, self, action,
problem.step_cost(self.state, action, result))
```
# Frontiers
A frontier is a collection of Nodes that acts like both a Queue and a Set. A frontier, `f`, supports these operations:
* `f.add(node)`: Add a node to the Frontier.
* `f.pop()`: Remove and return the "best" node from the frontier.
* `f.replace(node)`: add this node and remove a previous node with the same state.
* `state in f`: Test if some node in the frontier has arrived at state.
* `f[state]`: returns the node corresponding to this state in frontier.
* `len(f)`: The number of Nodes in the frontier. When the frontier is empty, `f` is *false*.
We provide two kinds of frontiers: One for "regular" queues, either first-in-first-out (for breadth-first search) or last-in-first-out (for depth-first search), and one for priority queues, where you can specify what cost function on nodes you are trying to minimize.
```
from collections import OrderedDict
import heapq
class FrontierQ(OrderedDict):
"A Frontier that supports FIFO or LIFO Queue ordering."
def __init__(self, initial, LIFO=False):
"""Initialize Frontier with an initial Node.
If LIFO is True, pop from the end first; otherwise from front first."""
self.LIFO = LIFO
self.add(initial)
def add(self, node):
"Add a node to the frontier."
self[node.state] = node
def pop(self):
"Remove and return the next Node in the frontier."
(state, node) = self.popitem(self.LIFO)
return node
def replace(self, node):
"Make this node replace the nold node with the same state."
del self[node.state]
self.add(node)
class FrontierPQ:
"A Frontier ordered by a cost function; a Priority Queue."
def __init__(self, initial, costfn=lambda node: node.path_cost):
"Initialize Frontier with an initial Node, and specify a cost function."
self.heap = []
self.states = {}
self.costfn = costfn
self.add(initial)
def add(self, node):
"Add node to the frontier."
cost = self.costfn(node)
heapq.heappush(self.heap, (cost, node))
self.states[node.state] = node
def pop(self):
"Remove and return the Node with minimum cost."
(cost, node) = heapq.heappop(self.heap)
self.states.pop(node.state, None) # remove state
return node
def replace(self, node):
"Make this node replace a previous node with the same state."
if node.state not in self:
raise ValueError('{} not there to replace'.format(node.state))
for (i, (cost, old_node)) in enumerate(self.heap):
if old_node.state == node.state:
self.heap[i] = (self.costfn(node), node)
heapq._siftdown(self.heap, 0, i)
return
def __contains__(self, state): return state in self.states
def __len__(self): return len(self.heap)
```
# Search Problems
`Problem` is the abstract class for all search problems. You can define your own class of problems as a subclass of `Problem`. You will need to override the `actions` and `result` method to describe how your problem works. You will also have to either override `is_goal` or pass a collection of goal states to the initialization method. If actions have different costs, you should override the `step_cost` method.
```
class Problem(object):
"""The abstract class for a search problem."""
def __init__(self, initial=None, goals=(), **additional_keywords):
"""Provide an initial state and optional goal states.
A subclass can have additional keyword arguments."""
self.initial = initial # The initial state of the problem.
self.goals = goals # A collection of possibe goal states.
self.__dict__.update(**additional_keywords)
def actions(self, state):
"Return a list of actions executable in this state."
raise NotImplementedError # Override this!
def result(self, state, action):
"The state that results from executing this action in this state."
raise NotImplementedError # Override this!
def is_goal(self, state):
"True if the state is a goal."
return state in self.goals # Optionally override this!
def step_cost(self, state, action, result=None):
"The cost of taking this action from this state."
return 1 # Override this if actions have different costs
def action_sequence(node):
"The sequence of actions to get to this node."
actions = []
while node.previous:
actions.append(node.action)
node = node.previous
return actions[::-1]
def state_sequence(node):
"The sequence of states to get to this node."
states = [node.state]
while node.previous:
node = node.previous
states.append(node.state)
return states[::-1]
```
# Two Location Vacuum World
```
dirt = '*'
clean = ' '
class TwoLocationVacuumProblem(Problem):
"""A Vacuum in a world with two locations, and dirt.
Each state is a tuple of (location, dirt_in_W, dirt_in_E)."""
def actions(self, state): return ('W', 'E', 'Suck')
def is_goal(self, state): return dirt not in state
def result(self, state, action):
"The state that results from executing this action in this state."
(loc, dirtW, dirtE) = state
if action == 'W': return ('W', dirtW, dirtE)
elif action == 'E': return ('E', dirtW, dirtE)
elif action == 'Suck' and loc == 'W': return (loc, clean, dirtE)
elif action == 'Suck' and loc == 'E': return (loc, dirtW, clean)
else: raise ValueError('unknown action: ' + action)
problem = TwoLocationVacuumProblem(initial=('W', dirt, dirt))
result = uniform_cost_search(problem)
result
action_sequence(result)
state_sequence(result)
problem = TwoLocationVacuumProblem(initial=('E', clean, dirt))
result = uniform_cost_search(problem)
action_sequence(result)
```
# Water Pouring Problem
Here is another problem domain, to show you how to define one. The idea is that we have a number of water jugs and a water tap and the goal is to measure out a specific amount of water (in, say, ounces or liters). You can completely fill or empty a jug, but because the jugs don't have markings on them, you can't partially fill them with a specific amount. You can, however, pour one jug into another, stopping when the seconfd is full or the first is empty.
```
class PourProblem(Problem):
"""Problem about pouring water between jugs to achieve some water level.
Each state is a tuples of levels. In the initialization, provide a tuple of
capacities, e.g. PourProblem(capacities=(8, 16, 32), initial=(2, 4, 3), goals={7}),
which means three jugs of capacity 8, 16, 32, currently filled with 2, 4, 3 units of
water, respectively, and the goal is to get a level of 7 in any one of the jugs."""
def actions(self, state):
"""The actions executable in this state."""
jugs = range(len(state))
return ([('Fill', i) for i in jugs if state[i] != self.capacities[i]] +
[('Dump', i) for i in jugs if state[i] != 0] +
[('Pour', i, j) for i in jugs for j in jugs if i != j])
def result(self, state, action):
"""The state that results from executing this action in this state."""
result = list(state)
act, i, j = action[0], action[1], action[-1]
if act == 'Fill': # Fill i to capacity
result[i] = self.capacities[i]
elif act == 'Dump': # Empty i
result[i] = 0
elif act == 'Pour':
a, b = state[i], state[j]
result[i], result[j] = ((0, a + b)
if (a + b <= self.capacities[j]) else
(a + b - self.capacities[j], self.capacities[j]))
else:
raise ValueError('unknown action', action)
return tuple(result)
def is_goal(self, state):
"""True if any of the jugs has a level equal to one of the goal levels."""
return any(level in self.goals for level in state)
p7 = PourProblem(initial=(2, 0), capacities=(5, 13), goals={7})
p7.result((2, 0), ('Fill', 1))
result = uniform_cost_search(p7)
action_sequence(result)
```
# Visualization Output
```
def showpath(searcher, problem):
"Show what happens when searcvher solves problem."
problem = Instrumented(problem)
print('\n{}:'.format(searcher.__name__))
result = searcher(problem)
if result:
actions = action_sequence(result)
state = problem.initial
path_cost = 0
for steps, action in enumerate(actions, 1):
path_cost += problem.step_cost(state, action, 0)
result = problem.result(state, action)
print(' {} =={}==> {}; cost {} after {} steps'
.format(state, action, result, path_cost, steps,
'; GOAL!' if problem.is_goal(result) else ''))
state = result
msg = 'GOAL FOUND' if result else 'no solution'
print('{} after {} results and {} goal checks'
.format(msg, problem._counter['result'], problem._counter['is_goal']))
from collections import Counter
class Instrumented:
"Instrument an object to count all the attribute accesses in _counter."
def __init__(self, obj):
self._object = obj
self._counter = Counter()
def __getattr__(self, attr):
self._counter[attr] += 1
return getattr(self._object, attr)
showpath(uniform_cost_search, p7)
p = PourProblem(initial=(0, 0), capacities=(7, 13), goals={2})
showpath(uniform_cost_search, p)
class GreenPourProblem(PourProblem):
def step_cost(self, state, action, result=None):
"The cost is the amount of water used in a fill."
if action[0] == 'Fill':
i = action[1]
return self.capacities[i] - state[i]
return 0
p = GreenPourProblem(initial=(0, 0), capacities=(7, 13), goals={2})
showpath(uniform_cost_search, p)
def compare_searchers(problem, searchers=None):
"Apply each of the search algorithms to the problem, and show results"
if searchers is None:
searchers = (breadth_first_search, uniform_cost_search)
for searcher in searchers:
showpath(searcher, problem)
compare_searchers(p)
```
# Random Grid
An environment where you can move in any of 4 directions, unless there is an obstacle there.
```
import random
N, S, E, W = DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def Grid(width, height, obstacles=0.1):
"""A 2-D grid, width x height, with obstacles that are either a collection of points,
or a fraction between 0 and 1 indicating the density of obstacles, chosen at random."""
grid = {(x, y) for x in range(width) for y in range(height)}
if isinstance(obstacles, (float, int)):
obstacles = random.sample(grid, int(width * height * obstacles))
def neighbors(x, y):
for (dx, dy) in DIRECTIONS:
(nx, ny) = (x + dx, y + dy)
if (nx, ny) not in obstacles and 0 <= nx < width and 0 <= ny < height:
yield (nx, ny)
return {(x, y): list(neighbors(x, y))
for x in range(width) for y in range(height)}
Grid(5, 5)
class GridProblem(Problem):
"Create with a call like GridProblem(grid=Grid(10, 10), initial=(0, 0), goal=(9, 9))"
def actions(self, state): return DIRECTIONS
def result(self, state, action):
#print('ask for result of', state, action)
(x, y) = state
(dx, dy) = action
r = (x + dx, y + dy)
return r if r in self.grid[state] else state
gp = GridProblem(grid=Grid(5, 5, 0.3), initial=(0, 0), goals={(4, 4)})
showpath(uniform_cost_search, gp)
```
# Finding a hard PourProblem
What solvable two-jug PourProblem requires the most steps? We can define the hardness as the number of steps, and then iterate over all PourProblems with capacities up to size M, keeping the hardest one.
```
def hardness(problem):
L = breadth_first_search(problem)
#print('hardness', problem.initial, problem.capacities, problem.goals, L)
return len(action_sequence(L)) if (L is not None) else 0
hardness(p7)
action_sequence(breadth_first_search(p7))
C = 9 # Maximum capacity to consider
phard = max((PourProblem(initial=(a, b), capacities=(A, B), goals={goal})
for A in range(C+1) for B in range(C+1)
for a in range(A) for b in range(B)
for goal in range(max(A, B))),
key=hardness)
phard.initial, phard.capacities, phard.goals
showpath(breadth_first_search, PourProblem(initial=(0, 0), capacities=(7, 9), goals={8}))
showpath(uniform_cost_search, phard)
class GridProblem(Problem):
"""A Grid."""
def actions(self, state): return ['N', 'S', 'E', 'W']
def result(self, state, action):
"""The state that results from executing this action in this state."""
(W, H) = self.size
if action == 'N' and state > W: return state - W
if action == 'S' and state + W < W * W: return state + W
if action == 'E' and (state + 1) % W !=0: return state + 1
if action == 'W' and state % W != 0: return state - 1
return state
compare_searchers(GridProblem(initial=0, goals={44}, size=(10, 10)))
def test_frontier():
#### Breadth-first search with FIFO Q
f = FrontierQ(Node(1), LIFO=False)
assert 1 in f and len(f) == 1
f.add(Node(2))
f.add(Node(3))
assert 1 in f and 2 in f and 3 in f and len(f) == 3
assert f.pop().state == 1
assert 1 not in f and 2 in f and 3 in f and len(f) == 2
assert f
assert f.pop().state == 2
assert f.pop().state == 3
assert not f
#### Depth-first search with LIFO Q
f = FrontierQ(Node('a'), LIFO=True)
for s in 'bcdef': f.add(Node(s))
assert len(f) == 6 and 'a' in f and 'c' in f and 'f' in f
for s in 'fedcba': assert f.pop().state == s
assert not f
#### Best-first search with Priority Q
f = FrontierPQ(Node(''), lambda node: len(node.state))
assert '' in f and len(f) == 1 and f
for s in ['book', 'boo', 'bookie', 'bookies', 'cook', 'look', 'b']:
assert s not in f
f.add(Node(s))
assert s in f
assert f.pop().state == ''
assert f.pop().state == 'b'
assert f.pop().state == 'boo'
assert {f.pop().state for _ in '123'} == {'book', 'cook', 'look'}
assert f.pop().state == 'bookie'
#### Romania: Two paths to Bucharest; cheapest one found first
S = Node('S')
SF = Node('F', S, 'S->F', 99)
SFB = Node('B', SF, 'F->B', 211)
SR = Node('R', S, 'S->R', 80)
SRP = Node('P', SR, 'R->P', 97)
SRPB = Node('B', SRP, 'P->B', 101)
f = FrontierPQ(S)
f.add(SF); f.add(SR), f.add(SRP), f.add(SRPB); f.add(SFB)
def cs(n): return (n.path_cost, n.state) # cs: cost and state
assert cs(f.pop()) == (0, 'S')
assert cs(f.pop()) == (80, 'R')
assert cs(f.pop()) == (99, 'F')
assert cs(f.pop()) == (177, 'P')
assert cs(f.pop()) == (278, 'B')
return 'test_frontier ok'
test_frontier()
%matplotlib inline
import matplotlib.pyplot as plt
p = plt.plot([i**2 for i in range(10)])
plt.savefig('destination_path.eps', format='eps', dpi=1200)
import itertools
import random
# http://stackoverflow.com/questions/10194482/custom-matplotlib-plot-chess-board-like-table-with-colored-cells
from matplotlib.table import Table
def main():
grid_table(8, 8)
plt.axis('scaled')
plt.show()
def grid_table(nrows, ncols):
fig, ax = plt.subplots()
ax.set_axis_off()
colors = ['white', 'lightgrey', 'dimgrey']
tb = Table(ax, bbox=[0,0,2,2])
for i,j in itertools.product(range(ncols), range(nrows)):
tb.add_cell(i, j, 2./ncols, 2./nrows, text='{:0.2f}'.format(0.1234),
loc='center', facecolor=random.choice(colors), edgecolor='grey') # facecolors=
ax.add_table(tb)
#ax.plot([0, .3], [.2, .2])
#ax.add_line(plt.Line2D([0.3, 0.5], [0.7, 0.7], linewidth=2, color='blue'))
return fig
main()
import collections
class defaultkeydict(collections.defaultdict):
"""Like defaultdict, but the default_factory is a function of the key.
>>> d = defaultkeydict(abs); d[-42]
42
"""
def __missing__(self, key):
self[key] = self.default_factory(key)
return self[key]
```
|
github_jupyter
|
romania = {
'A': ['Z', 'T', 'S'],
'B': ['F', 'P', 'G', 'U'],
'C': ['D', 'R', 'P'],
'D': ['M', 'C'],
'E': ['H'],
'F': ['S', 'B'],
'G': ['B'],
'H': ['U', 'E'],
'I': ['N', 'V'],
'L': ['T', 'M'],
'M': ['L', 'D'],
'N': ['I'],
'O': ['Z', 'S'],
'P': ['R', 'C', 'B'],
'R': ['S', 'C', 'P'],
'S': ['A', 'O', 'F', 'R'],
'T': ['A', 'L'],
'U': ['B', 'V', 'H'],
'V': ['U', 'I'],
'Z': ['O', 'A']}
romania['A']
from collections import deque # Doubly-ended queue: pop from left, append to right.
def breadth_first(start, goal, neighbors):
"Find a shortest sequence of states from start to the goal."
frontier = deque([start]) # A queue of states
previous = {start: None} # start has no previous state; other states will
while frontier:
s = frontier.popleft()
if s == goal:
return path(previous, s)
for s2 in neighbors[s]:
if s2 not in previous:
frontier.append(s2)
previous[s2] = s
def path(previous, s):
"Return a list of states that lead to state s, according to the previous dict."
return [] if (s is None) else path(previous, previous[s]) + [s]
breadth_first('A', 'B', romania)
breadth_first('L', 'N', romania)
breadth_first('N', 'L', romania)
breadth_first('E', 'E', romania)
from search import *
sgb_words = DataFile("EN-text/sgb-words.txt")
WORDS = set(sgb_words.read().split())
len(WORDS)
def neighboring_words(word):
"All words that are one letter away from this word."
neighbors = {word[:i] + c + word[i+1:]
for i in range(len(word))
for c in 'abcdefghijklmnopqrstuvwxyz'
if c != word[i]}
return neighbors & WORDS
neighboring_words('hello')
neighboring_words('world')
word_neighbors = {word: neighboring_words(word)
for word in WORDS}
breadth_first('green', 'grass', word_neighbors)
breadth_first('smart', 'brain', word_neighbors)
breadth_first('frown', 'smile', word_neighbors)
def breadth_first_search(problem):
"Search for goal; paths with least number of steps first."
if problem.is_goal(problem.initial):
return Node(problem.initial)
frontier = FrontierQ(Node(problem.initial), LIFO=False)
explored = set()
while frontier:
node = frontier.pop()
explored.add(node.state)
for action in problem.actions(node.state):
child = node.child(problem, action)
if child.state not in explored and child.state not in frontier:
if problem.is_goal(child.state):
return child
frontier.add(child)
def uniform_cost_search(problem, costfn=lambda node: node.path_cost):
frontier = FrontierPQ(Node(problem.initial), costfn)
explored = set()
while frontier:
node = frontier.pop()
if problem.is_goal(node.state):
return node
explored.add(node.state)
for action in problem.actions(node.state):
child = node.child(problem, action)
if child.state not in explored and child not in frontier:
frontier.add(child)
elif child in frontier and frontier.cost[child] < child.path_cost:
frontier.replace(child)
def astar_search(problem, heuristic):
costfn = lambda node: node.path_cost + heuristic(node.state)
return uniform_cost_search(problem, costfn)
class Node(object):
"""A node in a search tree. A search tree is spanning tree over states.
A Node contains a state, the previous node in the tree, the action that
takes us from the previous state to this state, and the path cost to get to
this state. If a state is arrived at by two paths, then there are two nodes
with the same state."""
def __init__(self, state, previous=None, action=None, step_cost=1):
"Create a search tree Node, derived from a previous Node by an action."
self.state = state
self.previous = previous
self.action = action
self.path_cost = 0 if previous is None else (previous.path_cost + step_cost)
def __repr__(self): return "<Node {}: {}>".format(self.state, self.path_cost)
def __lt__(self, other): return self.path_cost < other.path_cost
def child(self, problem, action):
"The Node you get by taking an action from this Node."
result = problem.result(self.state, action)
return Node(result, self, action,
problem.step_cost(self.state, action, result))
from collections import OrderedDict
import heapq
class FrontierQ(OrderedDict):
"A Frontier that supports FIFO or LIFO Queue ordering."
def __init__(self, initial, LIFO=False):
"""Initialize Frontier with an initial Node.
If LIFO is True, pop from the end first; otherwise from front first."""
self.LIFO = LIFO
self.add(initial)
def add(self, node):
"Add a node to the frontier."
self[node.state] = node
def pop(self):
"Remove and return the next Node in the frontier."
(state, node) = self.popitem(self.LIFO)
return node
def replace(self, node):
"Make this node replace the nold node with the same state."
del self[node.state]
self.add(node)
class FrontierPQ:
"A Frontier ordered by a cost function; a Priority Queue."
def __init__(self, initial, costfn=lambda node: node.path_cost):
"Initialize Frontier with an initial Node, and specify a cost function."
self.heap = []
self.states = {}
self.costfn = costfn
self.add(initial)
def add(self, node):
"Add node to the frontier."
cost = self.costfn(node)
heapq.heappush(self.heap, (cost, node))
self.states[node.state] = node
def pop(self):
"Remove and return the Node with minimum cost."
(cost, node) = heapq.heappop(self.heap)
self.states.pop(node.state, None) # remove state
return node
def replace(self, node):
"Make this node replace a previous node with the same state."
if node.state not in self:
raise ValueError('{} not there to replace'.format(node.state))
for (i, (cost, old_node)) in enumerate(self.heap):
if old_node.state == node.state:
self.heap[i] = (self.costfn(node), node)
heapq._siftdown(self.heap, 0, i)
return
def __contains__(self, state): return state in self.states
def __len__(self): return len(self.heap)
class Problem(object):
"""The abstract class for a search problem."""
def __init__(self, initial=None, goals=(), **additional_keywords):
"""Provide an initial state and optional goal states.
A subclass can have additional keyword arguments."""
self.initial = initial # The initial state of the problem.
self.goals = goals # A collection of possibe goal states.
self.__dict__.update(**additional_keywords)
def actions(self, state):
"Return a list of actions executable in this state."
raise NotImplementedError # Override this!
def result(self, state, action):
"The state that results from executing this action in this state."
raise NotImplementedError # Override this!
def is_goal(self, state):
"True if the state is a goal."
return state in self.goals # Optionally override this!
def step_cost(self, state, action, result=None):
"The cost of taking this action from this state."
return 1 # Override this if actions have different costs
def action_sequence(node):
"The sequence of actions to get to this node."
actions = []
while node.previous:
actions.append(node.action)
node = node.previous
return actions[::-1]
def state_sequence(node):
"The sequence of states to get to this node."
states = [node.state]
while node.previous:
node = node.previous
states.append(node.state)
return states[::-1]
dirt = '*'
clean = ' '
class TwoLocationVacuumProblem(Problem):
"""A Vacuum in a world with two locations, and dirt.
Each state is a tuple of (location, dirt_in_W, dirt_in_E)."""
def actions(self, state): return ('W', 'E', 'Suck')
def is_goal(self, state): return dirt not in state
def result(self, state, action):
"The state that results from executing this action in this state."
(loc, dirtW, dirtE) = state
if action == 'W': return ('W', dirtW, dirtE)
elif action == 'E': return ('E', dirtW, dirtE)
elif action == 'Suck' and loc == 'W': return (loc, clean, dirtE)
elif action == 'Suck' and loc == 'E': return (loc, dirtW, clean)
else: raise ValueError('unknown action: ' + action)
problem = TwoLocationVacuumProblem(initial=('W', dirt, dirt))
result = uniform_cost_search(problem)
result
action_sequence(result)
state_sequence(result)
problem = TwoLocationVacuumProblem(initial=('E', clean, dirt))
result = uniform_cost_search(problem)
action_sequence(result)
class PourProblem(Problem):
"""Problem about pouring water between jugs to achieve some water level.
Each state is a tuples of levels. In the initialization, provide a tuple of
capacities, e.g. PourProblem(capacities=(8, 16, 32), initial=(2, 4, 3), goals={7}),
which means three jugs of capacity 8, 16, 32, currently filled with 2, 4, 3 units of
water, respectively, and the goal is to get a level of 7 in any one of the jugs."""
def actions(self, state):
"""The actions executable in this state."""
jugs = range(len(state))
return ([('Fill', i) for i in jugs if state[i] != self.capacities[i]] +
[('Dump', i) for i in jugs if state[i] != 0] +
[('Pour', i, j) for i in jugs for j in jugs if i != j])
def result(self, state, action):
"""The state that results from executing this action in this state."""
result = list(state)
act, i, j = action[0], action[1], action[-1]
if act == 'Fill': # Fill i to capacity
result[i] = self.capacities[i]
elif act == 'Dump': # Empty i
result[i] = 0
elif act == 'Pour':
a, b = state[i], state[j]
result[i], result[j] = ((0, a + b)
if (a + b <= self.capacities[j]) else
(a + b - self.capacities[j], self.capacities[j]))
else:
raise ValueError('unknown action', action)
return tuple(result)
def is_goal(self, state):
"""True if any of the jugs has a level equal to one of the goal levels."""
return any(level in self.goals for level in state)
p7 = PourProblem(initial=(2, 0), capacities=(5, 13), goals={7})
p7.result((2, 0), ('Fill', 1))
result = uniform_cost_search(p7)
action_sequence(result)
def showpath(searcher, problem):
"Show what happens when searcvher solves problem."
problem = Instrumented(problem)
print('\n{}:'.format(searcher.__name__))
result = searcher(problem)
if result:
actions = action_sequence(result)
state = problem.initial
path_cost = 0
for steps, action in enumerate(actions, 1):
path_cost += problem.step_cost(state, action, 0)
result = problem.result(state, action)
print(' {} =={}==> {}; cost {} after {} steps'
.format(state, action, result, path_cost, steps,
'; GOAL!' if problem.is_goal(result) else ''))
state = result
msg = 'GOAL FOUND' if result else 'no solution'
print('{} after {} results and {} goal checks'
.format(msg, problem._counter['result'], problem._counter['is_goal']))
from collections import Counter
class Instrumented:
"Instrument an object to count all the attribute accesses in _counter."
def __init__(self, obj):
self._object = obj
self._counter = Counter()
def __getattr__(self, attr):
self._counter[attr] += 1
return getattr(self._object, attr)
showpath(uniform_cost_search, p7)
p = PourProblem(initial=(0, 0), capacities=(7, 13), goals={2})
showpath(uniform_cost_search, p)
class GreenPourProblem(PourProblem):
def step_cost(self, state, action, result=None):
"The cost is the amount of water used in a fill."
if action[0] == 'Fill':
i = action[1]
return self.capacities[i] - state[i]
return 0
p = GreenPourProblem(initial=(0, 0), capacities=(7, 13), goals={2})
showpath(uniform_cost_search, p)
def compare_searchers(problem, searchers=None):
"Apply each of the search algorithms to the problem, and show results"
if searchers is None:
searchers = (breadth_first_search, uniform_cost_search)
for searcher in searchers:
showpath(searcher, problem)
compare_searchers(p)
import random
N, S, E, W = DIRECTIONS = [(0, 1), (0, -1), (1, 0), (-1, 0)]
def Grid(width, height, obstacles=0.1):
"""A 2-D grid, width x height, with obstacles that are either a collection of points,
or a fraction between 0 and 1 indicating the density of obstacles, chosen at random."""
grid = {(x, y) for x in range(width) for y in range(height)}
if isinstance(obstacles, (float, int)):
obstacles = random.sample(grid, int(width * height * obstacles))
def neighbors(x, y):
for (dx, dy) in DIRECTIONS:
(nx, ny) = (x + dx, y + dy)
if (nx, ny) not in obstacles and 0 <= nx < width and 0 <= ny < height:
yield (nx, ny)
return {(x, y): list(neighbors(x, y))
for x in range(width) for y in range(height)}
Grid(5, 5)
class GridProblem(Problem):
"Create with a call like GridProblem(grid=Grid(10, 10), initial=(0, 0), goal=(9, 9))"
def actions(self, state): return DIRECTIONS
def result(self, state, action):
#print('ask for result of', state, action)
(x, y) = state
(dx, dy) = action
r = (x + dx, y + dy)
return r if r in self.grid[state] else state
gp = GridProblem(grid=Grid(5, 5, 0.3), initial=(0, 0), goals={(4, 4)})
showpath(uniform_cost_search, gp)
def hardness(problem):
L = breadth_first_search(problem)
#print('hardness', problem.initial, problem.capacities, problem.goals, L)
return len(action_sequence(L)) if (L is not None) else 0
hardness(p7)
action_sequence(breadth_first_search(p7))
C = 9 # Maximum capacity to consider
phard = max((PourProblem(initial=(a, b), capacities=(A, B), goals={goal})
for A in range(C+1) for B in range(C+1)
for a in range(A) for b in range(B)
for goal in range(max(A, B))),
key=hardness)
phard.initial, phard.capacities, phard.goals
showpath(breadth_first_search, PourProblem(initial=(0, 0), capacities=(7, 9), goals={8}))
showpath(uniform_cost_search, phard)
class GridProblem(Problem):
"""A Grid."""
def actions(self, state): return ['N', 'S', 'E', 'W']
def result(self, state, action):
"""The state that results from executing this action in this state."""
(W, H) = self.size
if action == 'N' and state > W: return state - W
if action == 'S' and state + W < W * W: return state + W
if action == 'E' and (state + 1) % W !=0: return state + 1
if action == 'W' and state % W != 0: return state - 1
return state
compare_searchers(GridProblem(initial=0, goals={44}, size=(10, 10)))
def test_frontier():
#### Breadth-first search with FIFO Q
f = FrontierQ(Node(1), LIFO=False)
assert 1 in f and len(f) == 1
f.add(Node(2))
f.add(Node(3))
assert 1 in f and 2 in f and 3 in f and len(f) == 3
assert f.pop().state == 1
assert 1 not in f and 2 in f and 3 in f and len(f) == 2
assert f
assert f.pop().state == 2
assert f.pop().state == 3
assert not f
#### Depth-first search with LIFO Q
f = FrontierQ(Node('a'), LIFO=True)
for s in 'bcdef': f.add(Node(s))
assert len(f) == 6 and 'a' in f and 'c' in f and 'f' in f
for s in 'fedcba': assert f.pop().state == s
assert not f
#### Best-first search with Priority Q
f = FrontierPQ(Node(''), lambda node: len(node.state))
assert '' in f and len(f) == 1 and f
for s in ['book', 'boo', 'bookie', 'bookies', 'cook', 'look', 'b']:
assert s not in f
f.add(Node(s))
assert s in f
assert f.pop().state == ''
assert f.pop().state == 'b'
assert f.pop().state == 'boo'
assert {f.pop().state for _ in '123'} == {'book', 'cook', 'look'}
assert f.pop().state == 'bookie'
#### Romania: Two paths to Bucharest; cheapest one found first
S = Node('S')
SF = Node('F', S, 'S->F', 99)
SFB = Node('B', SF, 'F->B', 211)
SR = Node('R', S, 'S->R', 80)
SRP = Node('P', SR, 'R->P', 97)
SRPB = Node('B', SRP, 'P->B', 101)
f = FrontierPQ(S)
f.add(SF); f.add(SR), f.add(SRP), f.add(SRPB); f.add(SFB)
def cs(n): return (n.path_cost, n.state) # cs: cost and state
assert cs(f.pop()) == (0, 'S')
assert cs(f.pop()) == (80, 'R')
assert cs(f.pop()) == (99, 'F')
assert cs(f.pop()) == (177, 'P')
assert cs(f.pop()) == (278, 'B')
return 'test_frontier ok'
test_frontier()
%matplotlib inline
import matplotlib.pyplot as plt
p = plt.plot([i**2 for i in range(10)])
plt.savefig('destination_path.eps', format='eps', dpi=1200)
import itertools
import random
# http://stackoverflow.com/questions/10194482/custom-matplotlib-plot-chess-board-like-table-with-colored-cells
from matplotlib.table import Table
def main():
grid_table(8, 8)
plt.axis('scaled')
plt.show()
def grid_table(nrows, ncols):
fig, ax = plt.subplots()
ax.set_axis_off()
colors = ['white', 'lightgrey', 'dimgrey']
tb = Table(ax, bbox=[0,0,2,2])
for i,j in itertools.product(range(ncols), range(nrows)):
tb.add_cell(i, j, 2./ncols, 2./nrows, text='{:0.2f}'.format(0.1234),
loc='center', facecolor=random.choice(colors), edgecolor='grey') # facecolors=
ax.add_table(tb)
#ax.plot([0, .3], [.2, .2])
#ax.add_line(plt.Line2D([0.3, 0.5], [0.7, 0.7], linewidth=2, color='blue'))
return fig
main()
import collections
class defaultkeydict(collections.defaultdict):
"""Like defaultdict, but the default_factory is a function of the key.
>>> d = defaultkeydict(abs); d[-42]
42
"""
def __missing__(self, key):
self[key] = self.default_factory(key)
return self[key]
| 0.852383 | 0.989161 |
## ERA5 data on the Planetary Computer
The [ERA5 dataset](https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation) from the ECMWF, converted to Zarr by [PlanetOS](https://planetos.com/) data on weather.
### Query using the STAC API
The data assets are a collection of Zarr groups, which can be opened with libraries like [xarray](https://xarray.pydata.org/).
Each STAC item covers a single month and the entire globe. You'll likely query on the `datetime`.
```
import pystac_client
catalog = pystac_client.Client.open(
"https://pct-apis-staging.westeurope.cloudapp.azure.com/stac/"
)
search = catalog.search(collections=["era5-pds"], datetime="1980-01")
items = search.get_all_items()
len(items)
list(items)
```
There will be two items per month, depending on the kind of variables within: `fc` (or "forecast") and `an` (or "analysis").
* An **analysis**, of the atmospheric conditions, is a blend of observations
with a previous forecast. An analysis can only provide
[instantaneous](https://confluence.ecmwf.int/display/CKB/Model+grid+box+and+time+step)
parameters (parameters valid at a specific time, e.g temperature at 12:00),
but not accumulated parameters, mean rates or min/max parameters.
* A **forecast** starts with an analysis at a specific time (the 'initialization
time'), and a model computes the atmospheric conditions for a number of
'forecast steps', at increasing 'validity times', into the future. A forecast
can provide
[instantaneous](https://confluence.ecmwf.int/display/CKB/Model+grid+box+and+time+step)
parameters, accumulated parameters, mean rates, and min/max parameters.
You can narrow your search to a specific kind using the `era5:kind` property.
```
import pystac_client
catalog = pystac_client.Client.open(
"https://pct-apis-staging.westeurope.cloudapp.azure.com/stac/"
)
search = catalog.search(
collections=["era5-pds"], datetime="1980-01", query={"era5:kind": {"eq": "an"}}
)
items = search.get_all_items()
print(len(items))
item = items[0]
```
There are several assets avaiable, one for each data variable. We can build up a dataset with all the variables using `xarray.open_dataset` and `combine_by_coords`.
```
import planetary_computer
import xarray as xr
signed_item = planetary_computer.sign(item)
datasets = [
xr.open_dataset(asset.href, **asset.extra_fields["xarray:open_kwargs"])
for asset in signed_item.assets.values()
]
ds = xr.combine_by_coords(datasets, join="exact")
ds
```
Now we can plot timeseries for the month, averaged over space:
```
ds["sea_surface_temperature"].mean(dim=["lon", "lat"]).plot();
```
Or make a map of some variable on a single date.
```
ds["sea_surface_temperature"].isel(time=0).plot(size=8);
```
|
github_jupyter
|
import pystac_client
catalog = pystac_client.Client.open(
"https://pct-apis-staging.westeurope.cloudapp.azure.com/stac/"
)
search = catalog.search(collections=["era5-pds"], datetime="1980-01")
items = search.get_all_items()
len(items)
list(items)
import pystac_client
catalog = pystac_client.Client.open(
"https://pct-apis-staging.westeurope.cloudapp.azure.com/stac/"
)
search = catalog.search(
collections=["era5-pds"], datetime="1980-01", query={"era5:kind": {"eq": "an"}}
)
items = search.get_all_items()
print(len(items))
item = items[0]
import planetary_computer
import xarray as xr
signed_item = planetary_computer.sign(item)
datasets = [
xr.open_dataset(asset.href, **asset.extra_fields["xarray:open_kwargs"])
for asset in signed_item.assets.values()
]
ds = xr.combine_by_coords(datasets, join="exact")
ds
ds["sea_surface_temperature"].mean(dim=["lon", "lat"]).plot();
ds["sea_surface_temperature"].isel(time=0).plot(size=8);
| 0.460532 | 0.982574 |
Deep Learning
=============
Assignment 1
------------
The objective of this assignment is to learn about simple data curation practices, and familiarize you with some of the data we'll be reusing later.
This notebook uses the [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) dataset to be used with python experiments. This dataset is designed to look like the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset, while looking a little more like real data: it's a harder task, and the data is a lot less 'clean' than MNIST.
```
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
```
First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labeled examples. Given these sizes, it should be possible to train models quickly on any machine.
```
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
```
Extract the dataset from the compressed .tar.gz file.
This should give you a set of directories, labeled A through J.
```
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
```
---
Problem 1
---------
Let's take a peek at some of the data to make sure it looks sensible. Each exemplar should be an image of a character A through J rendered in a different font. Display a sample of the images that we just downloaded. Hint: you can use the package IPython.display.
---
```
import random
def print_letter(data_folders, num_images=1):
"""Display a random sample of size NUM_IMAGES for each alphabet from A to J"""
for folder in data_folders:
print('Images in {}'.format(folder))
image_files = os.listdir(folder)
for image_sample in random.sample(image_files, num_images):
image_data = os.path.join(folder, image_sample)
display(Image(image_data))
print_letter(train_folders, num_images=1)
```
Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
We'll convert the entire dataset into a 3D array (image index, x, y) of floating point values, normalized to have approximately zero mean and standard deviation ~0.5 to make training easier down the road.
A few images might not be readable, we'll just skip them.
```
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
```
---
Problem 2
---------
Let's verify that the data still looks good. Displaying a sample of the labels and images from the ndarray. Hint: you can use matplotlib.pyplot.
---
```
train_datasets
import matplotlib
import re
def view_pickled_image(pickle_files, num_images=1):
for pickle_file in pickle_files:
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
sample_images = random.sample(list(letter_set), num_images)
fig = plt.figure()
plt.title(re.findall('(\w)\.pickle', pickle_file)[0])
for image in sample_images:
plt.imshow(image, cmap=matplotlib.cm.binary)
plt.show()
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
view_pickled_image(train_datasets)
```
---
Problem 3
---------
Another check: we expect the data to be balanced across classes. Verify that.
---
```
def num_images_per_class(pickle_files):
for pickle_file in pickle_files:
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
print("Number of images in file {} : {}".format(pickle_file, len(letter_set)))
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
num_images_per_class(train_datasets)
```
Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune `train_size` as needed. The labels will be stored into a separate array of integers 0 through 9.
Also create a validation dataset for hyperparameter tuning.
```
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
```
Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
```
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
```
---
Problem 4
---------
Convince yourself that the data is still good after shuffling!
---
```
legend = {i: chr(i + 65) for i in range(10)}
def disp_shuffled_samples(dataset, labels):
sample_images = random.sample(list(dataset), 10)
for i, sample_image in enumerate(sample_images):
plt.subplot(2, 5, i+1)
plt.axis('off')
plt.title(legend[i])
plt.imshow(sample_image)
disp_shuffled_samples(train_dataset, train_labels)
```
Finally, let's save the data for later reuse:
```
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
```
---
Problem 5
---------
By construction, this dataset might contain a lot of overlapping samples, including training data that's also contained in the validation and test set! Overlap between training and test can skew the results if you expect to use your model in an environment where there is never an overlap, but are actually ok if you expect to see training samples recur when you use it.
Measure how much overlap there is between training, validation and test samples.
Optional questions:
- What about near duplicates between datasets? (images that are almost identical)
- Create a sanitized validation and test set, and compare your accuracy on those in subsequent assignments.
---
```
import hashlib
def compare_datasets(dataset_1, dataset_2, labels_1):
hash_1 = np.array([hash(a_2d.data.tobytes()) for a_2d in dataset_1])
hash_2 = np.array([hash(a_2d.data.tobytes()) for a_2d in dataset_2])
duplicates = []
for i, image in enumerate(hash_1):
if (np.where(hash_2 == image)[0]).any():
duplicates.append(i)
return np.delete(dataset_1, duplicates, 0), np.delete(labels_1, duplicates, 0)
cleaned_test_dataset, cleaned_test_labels = compare_datasets(test_dataset, train_dataset, test_labels)
print("Number of overlapping images removed from test dataset: ", len(test_dataset) - len(cleaned_test_dataset))
```
---
Problem 6
---------
Let's get an idea of what an off-the-shelf classifier can give you on this data. It's always good to check that there is something to learn, and that it's a problem that is not so trivial that a canned solution solves it.
Train a simple model on this data using 50, 100, 1000 and 5000 training samples. Hint: you can use the LogisticRegression model from sklearn.linear_model.
Optional question: train an off-the-shelf model on all the data!
---
|
github_jupyter
|
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
url = 'https://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
data_root = '.' # Change me to store data elsewhere
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
dest_filename = os.path.join(data_root, filename)
if force or not os.path.exists(dest_filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, dest_filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
import random
def print_letter(data_folders, num_images=1):
"""Display a random sample of size NUM_IMAGES for each alphabet from A to J"""
for folder in data_folders:
print('Images in {}'.format(folder))
image_files = os.listdir(folder)
for image_sample in random.sample(image_files, num_images):
image_data = os.path.join(folder, image_sample)
display(Image(image_data))
print_letter(train_folders, num_images=1)
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (imageio.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except (IOError, ValueError) as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
train_datasets
import matplotlib
import re
def view_pickled_image(pickle_files, num_images=1):
for pickle_file in pickle_files:
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
sample_images = random.sample(list(letter_set), num_images)
fig = plt.figure()
plt.title(re.findall('(\w)\.pickle', pickle_file)[0])
for image in sample_images:
plt.imshow(image, cmap=matplotlib.cm.binary)
plt.show()
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
view_pickled_image(train_datasets)
def num_images_per_class(pickle_files):
for pickle_file in pickle_files:
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
print("Number of images in file {} : {}".format(pickle_file, len(letter_set)))
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
num_images_per_class(train_datasets)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
legend = {i: chr(i + 65) for i in range(10)}
def disp_shuffled_samples(dataset, labels):
sample_images = random.sample(list(dataset), 10)
for i, sample_image in enumerate(sample_images):
plt.subplot(2, 5, i+1)
plt.axis('off')
plt.title(legend[i])
plt.imshow(sample_image)
disp_shuffled_samples(train_dataset, train_labels)
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
import hashlib
def compare_datasets(dataset_1, dataset_2, labels_1):
hash_1 = np.array([hash(a_2d.data.tobytes()) for a_2d in dataset_1])
hash_2 = np.array([hash(a_2d.data.tobytes()) for a_2d in dataset_2])
duplicates = []
for i, image in enumerate(hash_1):
if (np.where(hash_2 == image)[0]).any():
duplicates.append(i)
return np.delete(dataset_1, duplicates, 0), np.delete(labels_1, duplicates, 0)
cleaned_test_dataset, cleaned_test_labels = compare_datasets(test_dataset, train_dataset, test_labels)
print("Number of overlapping images removed from test dataset: ", len(test_dataset) - len(cleaned_test_dataset))
| 0.447702 | 0.920861 |
## Reproduce DARTFISH results with Starfish
DARTFISH is a multiplexed image based transcriptomics assay from the [Zhang lab](http://genome-tech.ucsd.edu/ZhangLab/). As of this writing, this assay is not published yet. Nevertheless, here we demonstrate that Starfish can be used to process the data from raw images into spatially resolved gene expression profiles
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from starfish import display
from starfish import data, FieldOfView
from starfish.types import Features, Axes
from starfish import IntensityTable
from starfish.image import Filter
from starfish.spots import DetectPixels
```
## Load data into Starfish from the Cloud
The example data here corresopond to DARTFISHv1 2017. The group is actively working on improving the protocol. The data represent human brain tissue from the human occipital cortex from 1 field of view (FOV) of larger experiment. The data from one field of view correspond to 18 images from 6 imaging rounds (r) 3 color channels (c) and 1 z-plane (z). Each image is 988x988 (y,x)
```
use_test_data = os.getenv("USE_TEST_DATA") is not None
experiment = data.DARTFISH(use_test_data=use_test_data)
imgs = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)
print(imgs)
```
## Visualize codebook
The DARTFISH codebook maps pixel intensities across the rounds and channels to the corresponding barcodes and genes that those pixels code for. For this example dataset, the codebook specifies 96 possible barcodes. The codebook used in this experiment has 3 color channels and one blank channel, each of which contribute to codes. The presence of the blank channel will be important later when the filtering is described.
```
experiment.codebook
```
## Visualize raw data
A nice way to page through all this data is to use the display command. We have commented this out for now, because it will not render in Github. Instead, we simply show an image from the first round and color channel.
```
# %gui qt5
# display(stack)
single_plane = imgs.sel({Axes.ROUND: 0, Axes.CH: 0, Axes.ZPLANE: 0})
single_plane = single_plane.xarray.squeeze()
plt.figure(figsize=(7,7))
plt.imshow(single_plane, cmap='gray', clim=list(np.percentile(single_plane, [1, 99.9])))
plt.title('Round: 0, Chanel:0')
plt.axis('off');
```
## Filter and scale raw data before decoding into spatially resolved gene expression
First, we equalize the intensity of the images by scaling each image by its maximum intensity, which is equivalent to scaling by the 100th percentile value of the pixel values in each image.
```
sc_filt = Filter.Clip(p_max=100, expand_dynamic_range=True)
norm_imgs = sc_filt.run(imgs)
```
Next, for each imaging round, and each pixel location, we zero the intensity values across all three color channels if the magnitude of this 3 vector is below a threshold. As such, the code value associated with these pixels will be the blank. This is necessary to support euclidean decoding for codebooks that include blank values.
```
z_filt = Filter.ZeroByChannelMagnitude(thresh=.05, normalize=False)
filtered_imgs = z_filt.run(norm_imgs)
```
## Decode the processed data into spatially resolved gene expression profiles
Here, starfish decodes each pixel value, across all rounds and channels, into the corresponding target (gene) it corresponds too. Contiguous pixels that map to the same target gene are called as one RNA molecule. Intuitively, pixel vectors are matched to the codebook by computing the euclidean distance between the pixel vector and all codewords. The minimal distance gene target is then selected, if it is within `distance_threshold` of any code.
This decoding operation requires some parameter tuning, which is described below. First, we look at a distribution of pixel vector barcode magnitudes to determine the minimum magnitude threshold at which we will attempt to decode the pixel vector.
```
def compute_magnitudes(stack, norm_order=2):
pixel_intensities = IntensityTable.from_image_stack(stack)
feature_traces = pixel_intensities.stack(traces=(Axes.CH.value, Axes.ROUND.value))
norm = np.linalg.norm(feature_traces.values, ord=norm_order, axis=1)
return norm
mags = compute_magnitudes(filtered_imgs)
plt.hist(mags, bins=20);
sns.despine(offset=3)
plt.xlabel('Barcode magnitude')
plt.ylabel('Number of pixels')
plt.yscale('log');
```
Next, we decode the the data
```
# how much magnitude should a barcode have for it to be considered by decoding? this was set by looking at
# the plot above
magnitude_threshold = 0.5
# how big do we expect our spots to me, min/max size. this was set to be equivalent to the parameters
# determined by the Zhang lab.
area_threshold = (5, 30)
# how close, in euclidean space, should the pixel barcode be to the nearest barcode it was called to?
# here, I set this to be a large number, so I can inspect the distribution of decoded distances below
distance_threshold = 3
psd = DetectPixels.PixelSpotDecoder(
codebook=experiment.codebook,
metric='euclidean',
distance_threshold=distance_threshold,
magnitude_threshold=magnitude_threshold,
min_area=area_threshold[0],
max_area=area_threshold[1]
)
initial_spot_intensities, results = psd.run(filtered_imgs)
spots_df = initial_spot_intensities.to_features_dataframe()
spots_df['area'] = np.pi*spots_df['radius']**2
spots_df = spots_df.loc[spots_df[Features.PASSES_THRESHOLDS]]
spots_df.head()
```
## Compare to benchmark results
The below plot aggregates gene copy number across cells in the field of view and compares the results to the same copy numbers from the authors' pipeline. This can likely be improved by tweaking parameters in the above algorithms.
```
# load results from authors' pipeline
cnts_benchmark = pd.read_csv('https://d2nhj9g34unfro.cloudfront.net/20181005/DARTFISH/fov_001/counts.csv')
cnts_benchmark.head()
# select spots with distance less than a threshold, and count the number of each target gene
min_dist = 0.6
cnts_starfish = spots_df[spots_df.distance<=min_dist].groupby('target').count()['area']
cnts_starfish = cnts_starfish.reset_index(level=0)
cnts_starfish.rename(columns = {'target':'gene', 'area':'cnt_starfish'}, inplace=True)
benchmark_comparison = pd.merge(cnts_benchmark, cnts_starfish, on='gene', how='left')
benchmark_comparison.head(20)
x = benchmark_comparison.dropna().cnt.values
y = benchmark_comparison.dropna().cnt_starfish.values
r = np.corrcoef(x, y)
r = r[0,1]
plt.scatter(x, y, 50,zorder=2)
plt.xlabel('Gene copy number Benchmark')
plt.ylabel('Gene copy number Starfish')
plt.title('r = {}'.format(r))
sns.despine(offset=2)
```
## Visualize results
This image applies a pseudo-color to each gene channel to visualize the position and size of all called spots in the test image
```
# exclude spots that don't meet our area thresholds
area_lookup = lambda x: 0 if x == 0 else results.region_properties[x - 1].area
vfunc = np.vectorize(area_lookup)
mask = np.squeeze(vfunc(results.label_image))
new_image = np.squeeze(results.decoded_image)*(mask > area_threshold[0])*(mask < area_threshold[1])
plt.figure(figsize=(10,10))
plt.imshow(new_image, cmap = 'nipy_spectral');
plt.axis('off');
plt.title('Coded rolonies');
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
rect = [Rectangle((100, 600), width=200, height=200)]
pc = PatchCollection(rect, facecolor='none', alpha=1.0, edgecolor='w', linewidth=1.5)
plt.gca().add_collection(pc)
plt.figure(figsize=(10,10))
plt.imshow(new_image[600:800, 100:300], cmap = 'nipy_spectral');
plt.axis('off');
plt.title('Coded rolonies, zoomed in');
```
## Parameter and QC analysis
Here, we further investigate reasonable choices for each of the parameters used by the PixelSpotDecoder. By tuning these parameters, one can achieve different results
```
plt.figure(figsize=(10,3))
plt.subplot(131)
plt.hist(mags, bins=100);
plt.yscale('log')
plt.xlabel('barcode magnitude')
plt.ylabel('number of pixels')
sns.despine(offset=2)
plt.vlines(magnitude_threshold, ymin=plt.gca().get_ylim()[0], ymax=plt.gca().get_ylim()[1])
plt.title('Set magnitude threshod')
plt.subplot(132)
spots_df['area'] = np.pi*spots_df.radius**2
spots_df.area.hist(bins=30);
plt.xlabel('area')
plt.ylabel('number of spots')
sns.despine(offset=2)
plt.title('Set area threshold')
plt.subplot(133)
spots_df.distance.hist(bins=30)
plt.xlabel('min distance to code');
plt.vlines(min_dist, ymin=plt.gca().get_ylim()[0], ymax=plt.gca().get_ylim()[1])
sns.despine(offset=2)
plt.title('Set minimum distance threshold');
distance_threshold = min_dist
psd = DetectPixels.PixelSpotDecoder(
codebook=experiment.codebook,
metric='euclidean',
distance_threshold=distance_threshold,
magnitude_threshold=magnitude_threshold,
min_area=area_threshold[0],
max_area=area_threshold[1]
)
spot_intensities, results = psd.run(filtered_imgs)
spot_intensities = IntensityTable(spot_intensities.where(spot_intensities[Features.PASSES_THRESHOLDS], drop=True))
```
Here, we:
1. Pick a rolony that was succesfully decoded to a gene.
2. Pull out the average pixel trace for that rolony
3. Plot that pixel trace against the barcode of that gene
In order to assess, visually, how close decoded barcodes match their targets
```
# reshape the spot intensity table into a RxC barcode vector
pixel_traces = spot_intensities.stack(traces=(Axes.ROUND.value, Axes.CH.value))
# extract dataframe from spot intensity table for indexing purposes
pixel_traces_df = pixel_traces.to_features_dataframe()
pixel_traces_df['area'] = np.pi*pixel_traces_df.radius**2
# pick index of a barcode that was read and decoded from the ImageStack
ind = 4
# get the the corresponding gene this barcode was decoded to
gene = pixel_traces_df.loc[ind].target
# query the codebook for the actual barcode corresponding to this gene
real_barcode = experiment.codebook[experiment.codebook.target==gene].stack(traces=(Axes.ROUND.value, Axes.CH.value)).values[0]
read_out_barcode = pixel_traces[ind,:]
plt.plot(real_barcode, 'ok')
plt.stem(read_out_barcode)
sns.despine(offset=2)
plt.xticks(range(18))
plt.title(gene)
plt.xlabel('Index into R (0:5) and C(0:2)');
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from starfish import display
from starfish import data, FieldOfView
from starfish.types import Features, Axes
from starfish import IntensityTable
from starfish.image import Filter
from starfish.spots import DetectPixels
use_test_data = os.getenv("USE_TEST_DATA") is not None
experiment = data.DARTFISH(use_test_data=use_test_data)
imgs = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)
print(imgs)
experiment.codebook
# %gui qt5
# display(stack)
single_plane = imgs.sel({Axes.ROUND: 0, Axes.CH: 0, Axes.ZPLANE: 0})
single_plane = single_plane.xarray.squeeze()
plt.figure(figsize=(7,7))
plt.imshow(single_plane, cmap='gray', clim=list(np.percentile(single_plane, [1, 99.9])))
plt.title('Round: 0, Chanel:0')
plt.axis('off');
sc_filt = Filter.Clip(p_max=100, expand_dynamic_range=True)
norm_imgs = sc_filt.run(imgs)
z_filt = Filter.ZeroByChannelMagnitude(thresh=.05, normalize=False)
filtered_imgs = z_filt.run(norm_imgs)
def compute_magnitudes(stack, norm_order=2):
pixel_intensities = IntensityTable.from_image_stack(stack)
feature_traces = pixel_intensities.stack(traces=(Axes.CH.value, Axes.ROUND.value))
norm = np.linalg.norm(feature_traces.values, ord=norm_order, axis=1)
return norm
mags = compute_magnitudes(filtered_imgs)
plt.hist(mags, bins=20);
sns.despine(offset=3)
plt.xlabel('Barcode magnitude')
plt.ylabel('Number of pixels')
plt.yscale('log');
# how much magnitude should a barcode have for it to be considered by decoding? this was set by looking at
# the plot above
magnitude_threshold = 0.5
# how big do we expect our spots to me, min/max size. this was set to be equivalent to the parameters
# determined by the Zhang lab.
area_threshold = (5, 30)
# how close, in euclidean space, should the pixel barcode be to the nearest barcode it was called to?
# here, I set this to be a large number, so I can inspect the distribution of decoded distances below
distance_threshold = 3
psd = DetectPixels.PixelSpotDecoder(
codebook=experiment.codebook,
metric='euclidean',
distance_threshold=distance_threshold,
magnitude_threshold=magnitude_threshold,
min_area=area_threshold[0],
max_area=area_threshold[1]
)
initial_spot_intensities, results = psd.run(filtered_imgs)
spots_df = initial_spot_intensities.to_features_dataframe()
spots_df['area'] = np.pi*spots_df['radius']**2
spots_df = spots_df.loc[spots_df[Features.PASSES_THRESHOLDS]]
spots_df.head()
# load results from authors' pipeline
cnts_benchmark = pd.read_csv('https://d2nhj9g34unfro.cloudfront.net/20181005/DARTFISH/fov_001/counts.csv')
cnts_benchmark.head()
# select spots with distance less than a threshold, and count the number of each target gene
min_dist = 0.6
cnts_starfish = spots_df[spots_df.distance<=min_dist].groupby('target').count()['area']
cnts_starfish = cnts_starfish.reset_index(level=0)
cnts_starfish.rename(columns = {'target':'gene', 'area':'cnt_starfish'}, inplace=True)
benchmark_comparison = pd.merge(cnts_benchmark, cnts_starfish, on='gene', how='left')
benchmark_comparison.head(20)
x = benchmark_comparison.dropna().cnt.values
y = benchmark_comparison.dropna().cnt_starfish.values
r = np.corrcoef(x, y)
r = r[0,1]
plt.scatter(x, y, 50,zorder=2)
plt.xlabel('Gene copy number Benchmark')
plt.ylabel('Gene copy number Starfish')
plt.title('r = {}'.format(r))
sns.despine(offset=2)
# exclude spots that don't meet our area thresholds
area_lookup = lambda x: 0 if x == 0 else results.region_properties[x - 1].area
vfunc = np.vectorize(area_lookup)
mask = np.squeeze(vfunc(results.label_image))
new_image = np.squeeze(results.decoded_image)*(mask > area_threshold[0])*(mask < area_threshold[1])
plt.figure(figsize=(10,10))
plt.imshow(new_image, cmap = 'nipy_spectral');
plt.axis('off');
plt.title('Coded rolonies');
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
rect = [Rectangle((100, 600), width=200, height=200)]
pc = PatchCollection(rect, facecolor='none', alpha=1.0, edgecolor='w', linewidth=1.5)
plt.gca().add_collection(pc)
plt.figure(figsize=(10,10))
plt.imshow(new_image[600:800, 100:300], cmap = 'nipy_spectral');
plt.axis('off');
plt.title('Coded rolonies, zoomed in');
plt.figure(figsize=(10,3))
plt.subplot(131)
plt.hist(mags, bins=100);
plt.yscale('log')
plt.xlabel('barcode magnitude')
plt.ylabel('number of pixels')
sns.despine(offset=2)
plt.vlines(magnitude_threshold, ymin=plt.gca().get_ylim()[0], ymax=plt.gca().get_ylim()[1])
plt.title('Set magnitude threshod')
plt.subplot(132)
spots_df['area'] = np.pi*spots_df.radius**2
spots_df.area.hist(bins=30);
plt.xlabel('area')
plt.ylabel('number of spots')
sns.despine(offset=2)
plt.title('Set area threshold')
plt.subplot(133)
spots_df.distance.hist(bins=30)
plt.xlabel('min distance to code');
plt.vlines(min_dist, ymin=plt.gca().get_ylim()[0], ymax=plt.gca().get_ylim()[1])
sns.despine(offset=2)
plt.title('Set minimum distance threshold');
distance_threshold = min_dist
psd = DetectPixels.PixelSpotDecoder(
codebook=experiment.codebook,
metric='euclidean',
distance_threshold=distance_threshold,
magnitude_threshold=magnitude_threshold,
min_area=area_threshold[0],
max_area=area_threshold[1]
)
spot_intensities, results = psd.run(filtered_imgs)
spot_intensities = IntensityTable(spot_intensities.where(spot_intensities[Features.PASSES_THRESHOLDS], drop=True))
# reshape the spot intensity table into a RxC barcode vector
pixel_traces = spot_intensities.stack(traces=(Axes.ROUND.value, Axes.CH.value))
# extract dataframe from spot intensity table for indexing purposes
pixel_traces_df = pixel_traces.to_features_dataframe()
pixel_traces_df['area'] = np.pi*pixel_traces_df.radius**2
# pick index of a barcode that was read and decoded from the ImageStack
ind = 4
# get the the corresponding gene this barcode was decoded to
gene = pixel_traces_df.loc[ind].target
# query the codebook for the actual barcode corresponding to this gene
real_barcode = experiment.codebook[experiment.codebook.target==gene].stack(traces=(Axes.ROUND.value, Axes.CH.value)).values[0]
read_out_barcode = pixel_traces[ind,:]
plt.plot(real_barcode, 'ok')
plt.stem(read_out_barcode)
sns.despine(offset=2)
plt.xticks(range(18))
plt.title(gene)
plt.xlabel('Index into R (0:5) and C(0:2)');
| 0.623492 | 0.989275 |
# TCLab Overview
The `tclab` package provides a set of Python tools for interfacing with the [BYU Temperature Control Laboratory](http://apmonitor.com/pdc/index.php/Main/ArduinoTemperatureControl). The Temperature Control Laboratory consists of two heaters and two temperature sensors mounted on an Arduino microcontroller board. Together, the `tclab` package and the Temperature Control Laboratory provide a low-cost experimental platform for implementing algorithms commonly used for process control.

## TCLab Architecture
The `tclab` package is intended to be used as a teaching tool. The package provides high-level access to sensors, heaters, a pseudo-realtime clock. The package includes the following Python classes and functions:
* `TCLab()` providing access to the Temperature Control Laboratory hardware.
* `TCLabModel()` providing access to a simulation of the Temperature Control Laboratory hardware.
* `clock` for synchronizing with a real time clock.
* `Historian` for data logging.
* `Plotter` for realtime plotting.

Using these Python tools, students can create Jupyter notebooks and python codes covering a wide range of topics in process control.
* **tclab.py:** A Python package providing high-level access to sensors, heaters, a pseudo-realtime clock. The package includes `TCLab()` providing access to the device, `clock` for synchronizing with a real time clock, `Historian` for data logging, and `Plotter` for realtime plotting.
* **TCLab-sketch.ino:** Firmware for the intrisically safe operation of the Arduino board and shield. The sketch is available at [https://github.com/jckantor/TCLab-sketch](https://github.com/jckantor/TCLab-sketch).
* **Arduino:** Hardware platform for the Temperature Control Laboratory. TCLab is compatiable with Arduino Uno, Arduino Leonardo, and compatible clones.
## Getting Started
### Installation
Install using
pip install tclab
To upgrade an existing installation, use the command
pip install tclab --upgrade
The development version contains new features, but may be less stable. To install the development version use the command
pip install --upgrade https://github.com/jckantor/TCLab/archive/development.zip
### Hardware Setup
1. Plug a compatible Arduino device (UNO, Leonardo, NHduino) with the
lab attached into your computer via the USB connection. Plug the DC
power adapter into the wall.
2. (optional) Install Arduino Drivers.
*If you are using Windows 10, the Arduino board should connect without additional drivers required.*
Mac OS X users may need to install a serial driver. For Arduino clones using the CH340G, CH34G or CH34X chipset, a suitable driver can be found [here](https://github.com/MPParsley/ch340g-ch34g-ch34x-mac-os-x-driver) or [here](https://github.com/adrianmihalko/ch340g-ch34g-ch34x-mac-os-x-driver).
3. (optional) Install Arduino Firmware;
`TCLab` requires the one-time installation of custom firmware on an Arduino device. If it hasn't been pre-installed, the necessary firmware and instructions are available from the [TCLab-Sketch repository](https://github.com/jckantor/TCLab-sketch).
### Checking that everything works
Execute the following code
import tclab
with tclab.TCLab() as lab:
print(lab.T1)
If everything has worked, you should see the following output message
Connecting to TCLab
TCLab Firmware Version 1.2.1 on NHduino connected to port XXXX
21.54
TCLab disconnected successfully.
The number returned is the temperature of sensor T1 in °C.
## Next Steps
The notebook directory provides examples on how to use the TCLab module.
The latest documentation is available at
[Read the Docs](http://tclab.readthedocs.io/en/latest/index.html).
### Course Web Sites
More information, instructional videos, and Jupyter notebook
examples are available at the following course websites.
* [Arduino temperature control lab page](http://apmonitor.com/pdc/index.php/Main/ArduinoTemperatureControl) on the BYU Process Dynamics and Control course website.
* [CBE 30338](http://jckantor.github.io/CBE30338/) for the Notre Dame
Chemical Process Control course website.
* [Dynamics and Control](https://github.com/alchemyst/Dynamics-and-Control) for notebooks developed at the University of Pretoria.
|
github_jupyter
|
# TCLab Overview
The `tclab` package provides a set of Python tools for interfacing with the [BYU Temperature Control Laboratory](http://apmonitor.com/pdc/index.php/Main/ArduinoTemperatureControl). The Temperature Control Laboratory consists of two heaters and two temperature sensors mounted on an Arduino microcontroller board. Together, the `tclab` package and the Temperature Control Laboratory provide a low-cost experimental platform for implementing algorithms commonly used for process control.

## TCLab Architecture
The `tclab` package is intended to be used as a teaching tool. The package provides high-level access to sensors, heaters, a pseudo-realtime clock. The package includes the following Python classes and functions:
* `TCLab()` providing access to the Temperature Control Laboratory hardware.
* `TCLabModel()` providing access to a simulation of the Temperature Control Laboratory hardware.
* `clock` for synchronizing with a real time clock.
* `Historian` for data logging.
* `Plotter` for realtime plotting.

Using these Python tools, students can create Jupyter notebooks and python codes covering a wide range of topics in process control.
* **tclab.py:** A Python package providing high-level access to sensors, heaters, a pseudo-realtime clock. The package includes `TCLab()` providing access to the device, `clock` for synchronizing with a real time clock, `Historian` for data logging, and `Plotter` for realtime plotting.
* **TCLab-sketch.ino:** Firmware for the intrisically safe operation of the Arduino board and shield. The sketch is available at [https://github.com/jckantor/TCLab-sketch](https://github.com/jckantor/TCLab-sketch).
* **Arduino:** Hardware platform for the Temperature Control Laboratory. TCLab is compatiable with Arduino Uno, Arduino Leonardo, and compatible clones.
## Getting Started
### Installation
Install using
pip install tclab
To upgrade an existing installation, use the command
pip install tclab --upgrade
The development version contains new features, but may be less stable. To install the development version use the command
pip install --upgrade https://github.com/jckantor/TCLab/archive/development.zip
### Hardware Setup
1. Plug a compatible Arduino device (UNO, Leonardo, NHduino) with the
lab attached into your computer via the USB connection. Plug the DC
power adapter into the wall.
2. (optional) Install Arduino Drivers.
*If you are using Windows 10, the Arduino board should connect without additional drivers required.*
Mac OS X users may need to install a serial driver. For Arduino clones using the CH340G, CH34G or CH34X chipset, a suitable driver can be found [here](https://github.com/MPParsley/ch340g-ch34g-ch34x-mac-os-x-driver) or [here](https://github.com/adrianmihalko/ch340g-ch34g-ch34x-mac-os-x-driver).
3. (optional) Install Arduino Firmware;
`TCLab` requires the one-time installation of custom firmware on an Arduino device. If it hasn't been pre-installed, the necessary firmware and instructions are available from the [TCLab-Sketch repository](https://github.com/jckantor/TCLab-sketch).
### Checking that everything works
Execute the following code
import tclab
with tclab.TCLab() as lab:
print(lab.T1)
If everything has worked, you should see the following output message
Connecting to TCLab
TCLab Firmware Version 1.2.1 on NHduino connected to port XXXX
21.54
TCLab disconnected successfully.
The number returned is the temperature of sensor T1 in °C.
## Next Steps
The notebook directory provides examples on how to use the TCLab module.
The latest documentation is available at
[Read the Docs](http://tclab.readthedocs.io/en/latest/index.html).
### Course Web Sites
More information, instructional videos, and Jupyter notebook
examples are available at the following course websites.
* [Arduino temperature control lab page](http://apmonitor.com/pdc/index.php/Main/ArduinoTemperatureControl) on the BYU Process Dynamics and Control course website.
* [CBE 30338](http://jckantor.github.io/CBE30338/) for the Notre Dame
Chemical Process Control course website.
* [Dynamics and Control](https://github.com/alchemyst/Dynamics-and-Control) for notebooks developed at the University of Pretoria.
| 0.915273 | 0.91708 |
```
(defparameter plist (list :lima 7 :yankee "bar"))
(jupyter-widgets:make-interactive-plist
'((:indicator :alfa :type :bool
:label "alfa (bool check)")
(:indicator :bravo :type :bool :style :toggle
:label "bravo (bool toggle)" :description "toggle")
(:indicator :charlie :type :color :default "blue"
:label "charlie (color)")
(:indicator :delta :type :date
:label "delta (date)")
(:indicator :echo :type :file
:label "echo (file)" :accept ".txt")
(:indicator :foxtrot :type :file-multiple
:label "foxtrot (file-multiple)")
(:indicator :golf :type :float
:label "golf (float text)")
(:indicator :hotel :type :float :style :log-slider
:label "hotel (float log-slider)")
(:indicator :india :type :float :style :slider
:label "india (float slider)")
(:indicator :juliett :type :float :style :bounded-text
:label "juliett (float bounded-text)")
(:indicator :kilo :type :float-range
:label "kilo (float-range)")
(:indicator :lima :type :int :default 3
:label "lima (int text)")
(:indicator :mike :type :int :style :slider
:label "mike (int slider)")
(:indicator :november :type :int :style :bounded-text :min -10 :max 10
:label "november (int bounded-text)")
(:indicator :oscar :type :int-range
:label "oscar (int-range)")
(:indicator :papa :type :option
:label "papa (option select)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :quebec :type :option :style :dropdown
:label "quebec (option dropdown)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :romeo :type :option :style :radio
:label "romeo (option radio)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :sierra :type :option :style :slider
:label "sierra (option slider)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :tango :type :option :style :toggle
:label "tango (option toggle)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :uniform :type :option-range
:label "uniform (option-range)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :victor :type :option-multiple
:label "victor (option-multiple)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :whiskey :type :string
:label "whiskey (string text)")
(:indicator :xray :type :string :style :area
:label "xray (string area)")
(:indicator :yankee :type :string :style :combo
:label "yankee (string combo)" :options ("fu" "bar" "wibble" "gronk" "kilroy")))
plist)
plist
(defparameter alist (list (cons "golf" 3.2d0)))
(jupyter-widgets:make-interactive-alist
'((:indicator "alfa" :type :bool
:label "alfa (bool check)")
(:indicator "bravo" :type :bool :style :toggle
:label "bravo (bool toggle)" :description "toggle")
(:indicator "charlie" :type :color :default "blue"
:label "charlie (color)")
(:indicator "delta" :type :date
:label "delta (date)")
(:indicator "echo" :type :file
:label "echo (file)" :accept ".txt")
(:indicator "foxtrot" :type :file-multiple
:label "foxtrot (file-multiple)")
(:indicator "golf" :type :float
:label "golf (float text)")
(:indicator "hotel" :type :float :style :log-slider
:label "hotel (float log-slider)")
(:indicator "india" :type :float :style :slider
:label "india (float slider)")
(:indicator "juliett" :type :float :style :bounded-text
:label "juliett (float bounded-text)")
(:indicator "kilo" :type :float-range
:label "kilo (float-range)")
(:indicator "lima" :type :int :default 3
:label "lima (int text)")
(:indicator "mike" :type :int :style :slider
:label "mike (int slider)")
(:indicator "november" :type :int :style :bounded-text :min -10 :max 10
:label "november (int bounded-text)")
(:indicator "oscar" :type :int-range
:label "oscar (int-range)")
(:indicator "papa" :type :option
:label "papa (option select)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "quebec" :type :option :style :dropdown
:label "quebec (option dropdown)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "romeo" :type :option :style :radio
:label "romeo (option radio)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "sierra" :type :option :style :slider
:label "sierra (option slider)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "tango" :type :option :style :toggle
:label "tango (option toggle)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "uniform" :type :option-range
:label "uniform (option-range)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "victor" :type :option-multiple
:label "victor (option-multiple)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "whiskey" :type :string
:label "whiskey (string text)")
(:indicator "xray" :type :string :style :area
:label "xray (string area)")
(:indicator "yankee" :type :string :style :combo
:label "yankee (string combo)" :options ("fu" "bar" "wibble" "gronk" "kilroy")))
alist :test #'string=)
alist
```
|
github_jupyter
|
(defparameter plist (list :lima 7 :yankee "bar"))
(jupyter-widgets:make-interactive-plist
'((:indicator :alfa :type :bool
:label "alfa (bool check)")
(:indicator :bravo :type :bool :style :toggle
:label "bravo (bool toggle)" :description "toggle")
(:indicator :charlie :type :color :default "blue"
:label "charlie (color)")
(:indicator :delta :type :date
:label "delta (date)")
(:indicator :echo :type :file
:label "echo (file)" :accept ".txt")
(:indicator :foxtrot :type :file-multiple
:label "foxtrot (file-multiple)")
(:indicator :golf :type :float
:label "golf (float text)")
(:indicator :hotel :type :float :style :log-slider
:label "hotel (float log-slider)")
(:indicator :india :type :float :style :slider
:label "india (float slider)")
(:indicator :juliett :type :float :style :bounded-text
:label "juliett (float bounded-text)")
(:indicator :kilo :type :float-range
:label "kilo (float-range)")
(:indicator :lima :type :int :default 3
:label "lima (int text)")
(:indicator :mike :type :int :style :slider
:label "mike (int slider)")
(:indicator :november :type :int :style :bounded-text :min -10 :max 10
:label "november (int bounded-text)")
(:indicator :oscar :type :int-range
:label "oscar (int-range)")
(:indicator :papa :type :option
:label "papa (option select)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :quebec :type :option :style :dropdown
:label "quebec (option dropdown)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :romeo :type :option :style :radio
:label "romeo (option radio)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :sierra :type :option :style :slider
:label "sierra (option slider)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :tango :type :option :style :toggle
:label "tango (option toggle)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :uniform :type :option-range
:label "uniform (option-range)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :victor :type :option-multiple
:label "victor (option-multiple)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator :whiskey :type :string
:label "whiskey (string text)")
(:indicator :xray :type :string :style :area
:label "xray (string area)")
(:indicator :yankee :type :string :style :combo
:label "yankee (string combo)" :options ("fu" "bar" "wibble" "gronk" "kilroy")))
plist)
plist
(defparameter alist (list (cons "golf" 3.2d0)))
(jupyter-widgets:make-interactive-alist
'((:indicator "alfa" :type :bool
:label "alfa (bool check)")
(:indicator "bravo" :type :bool :style :toggle
:label "bravo (bool toggle)" :description "toggle")
(:indicator "charlie" :type :color :default "blue"
:label "charlie (color)")
(:indicator "delta" :type :date
:label "delta (date)")
(:indicator "echo" :type :file
:label "echo (file)" :accept ".txt")
(:indicator "foxtrot" :type :file-multiple
:label "foxtrot (file-multiple)")
(:indicator "golf" :type :float
:label "golf (float text)")
(:indicator "hotel" :type :float :style :log-slider
:label "hotel (float log-slider)")
(:indicator "india" :type :float :style :slider
:label "india (float slider)")
(:indicator "juliett" :type :float :style :bounded-text
:label "juliett (float bounded-text)")
(:indicator "kilo" :type :float-range
:label "kilo (float-range)")
(:indicator "lima" :type :int :default 3
:label "lima (int text)")
(:indicator "mike" :type :int :style :slider
:label "mike (int slider)")
(:indicator "november" :type :int :style :bounded-text :min -10 :max 10
:label "november (int bounded-text)")
(:indicator "oscar" :type :int-range
:label "oscar (int-range)")
(:indicator "papa" :type :option
:label "papa (option select)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "quebec" :type :option :style :dropdown
:label "quebec (option dropdown)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "romeo" :type :option :style :radio
:label "romeo (option radio)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "sierra" :type :option :style :slider
:label "sierra (option slider)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "tango" :type :option :style :toggle
:label "tango (option toggle)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "uniform" :type :option-range
:label "uniform (option-range)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "victor" :type :option-multiple
:label "victor (option-multiple)" :options ("fu" "bar" "wibble" "gronk" "kilroy"))
(:indicator "whiskey" :type :string
:label "whiskey (string text)")
(:indicator "xray" :type :string :style :area
:label "xray (string area)")
(:indicator "yankee" :type :string :style :combo
:label "yankee (string combo)" :options ("fu" "bar" "wibble" "gronk" "kilroy")))
alist :test #'string=)
alist
| 0.506347 | 0.334644 |
```
%matplotlib inline
import gym
import matplotlib
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.blackjack import BlackjackEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = BlackjackEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
We pass in a state to this policy to get the action probabilites
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the dobservation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA) * epsilon/nA
best_action = np.argmax(Q[observation])
A[best_action] += 1 - epsilon
return A
return policy_fn
def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1):
"""
Monte Carlo Control using Epsilon-Greedy policies.
Finds an optimal epsilon-greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities
"""
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for e in range(num_episodes):
# Evaluate the policy for one episode
state = env.reset()
episodes = []
while True:
actions = policy(state)
action_choice = np.random.choice(actions.size,p=actions)
next_state, reward, done, _ = env.step(action_choice)
episodes.append((state,reward,action_choice))
if done:
break
state = next_state
# Calculate G and add it to our estimate of Q for action
states = set([(episode[0],episode[2]) for episode in episodes])
for state in states:
first_index = next(i for i,x in enumerate(episodes) if (x[0],x[2]) == state)
G = sum([(discount_factor ** i) * state[1] for i,state in enumerate(episodes[first_index:])])
returns_count[state] += 1
Q[state[0]][state[1]] = Q[state[0]][state[1]] + (1/returns_count[state]) * (G - Q[state[0]][state[1]])
return Q, policy
Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
# For plotting: Create value function from action-value function
# by picking the best action at each state
V = defaultdict(float)
for state, actions in Q.items():
action_value = np.max(actions)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
```
|
github_jupyter
|
%matplotlib inline
import gym
import matplotlib
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.blackjack import BlackjackEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = BlackjackEnv()
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
We pass in a state to this policy to get the action probabilites
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the dobservation as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(observation):
A = np.ones(nA) * epsilon/nA
best_action = np.argmax(Q[observation])
A[best_action] += 1 - epsilon
return A
return policy_fn
def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1):
"""
Monte Carlo Control using Epsilon-Greedy policies.
Finds an optimal epsilon-greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
discount_factor: Gamma discount factor.
epsilon: Chance the sample a random action. Float betwen 0 and 1.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities
"""
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for e in range(num_episodes):
# Evaluate the policy for one episode
state = env.reset()
episodes = []
while True:
actions = policy(state)
action_choice = np.random.choice(actions.size,p=actions)
next_state, reward, done, _ = env.step(action_choice)
episodes.append((state,reward,action_choice))
if done:
break
state = next_state
# Calculate G and add it to our estimate of Q for action
states = set([(episode[0],episode[2]) for episode in episodes])
for state in states:
first_index = next(i for i,x in enumerate(episodes) if (x[0],x[2]) == state)
G = sum([(discount_factor ** i) * state[1] for i,state in enumerate(episodes[first_index:])])
returns_count[state] += 1
Q[state[0]][state[1]] = Q[state[0]][state[1]] + (1/returns_count[state]) * (G - Q[state[0]][state[1]])
return Q, policy
Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
# For plotting: Create value function from action-value function
# by picking the best action at each state
V = defaultdict(float)
for state, actions in Q.items():
action_value = np.max(actions)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
| 0.732018 | 0.813646 |
# "Ai from scratch"
> "Ai algorithms from scratch"
- comments: true
- badges: true
- categories: [ai]
- publishes: false
```
import scipy as sp
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.graph_objects as go
import sklearn
from sklearn.linear_model import LinearRegression
import pymc3
from sklearn.metrics import r2_score, precision_score, recall_score, confusion_matrix
```
# Hypothesis testing
A particular brand of tires claims that its deluxe tire averages at least 50,000 miles before it needs to be replaced. From past studies of this tire, the standard deviation is known to be 8,000. A survey of owners of that tire design is conducted. From the 28 tires surveyed, the mean lifespan was 46,500 miles with a standard deviation of 9,800 miles. Using 𝛼=0.05 , is the data highly inconsistent with the claim?
```
claim_pop_mean = 50_000
pop_std = 8000
n = 28
sample_mean = 46_500
sample_std = 9800
alpha = 0.05
# 1. How far is the sample_mean from the pop_mean ?
# H0 => pop_mean = 50_000
# H1 => pop_mean > 50_000 or pop_mean < 50_000
print("If we know the pop std")
how_far_on_the_unit_normal_of_sample_means = (46_500 - 50_000) / (8000/np.sqrt(28) )
print(how_far_on_the_unit_normal_of_sample_means, " how_far_on_the_unit_normal_of_sample_means")
how_far_we_allow = - sp.stats.norm.ppf(0.95)
print(how_far_we_allow, " how_far_we_allow")
how_far_we_are_percent = sp.stats.norm.cdf(how_far_on_the_unit_normal_of_sample_means)
print(how_far_we_are_percent, " how_far_we_are_percent")
print("\n")
print("If we don't know the pop std")
how_far_on_the_unit_normal_of_sample_means = (46_500 - 50_000) / (9800/np.sqrt(28) )
print(how_far_on_the_unit_normal_of_sample_means, " how_far_on_the_unit_normal_of_sample_means")
how_far_we_allow = - sp.stats.t.ppf(0.95, df=28-1)
print(how_far_we_allow, " how_far_we_allow")
how_far_we_are_percent = sp.stats.t.cdf(how_far_on_the_unit_normal_of_sample_means, df=28-1)
print(how_far_we_are_percent, " how_far_we_are_percent")
```
# Confidence interval
```
how_much_we_allow_on_unit_normal_distrib = sp.stats.norm.ppf(0.95)
how_much_we_allow_in_problem_domain = how_much_we_allow_on_unit_normal_distrib * (9800 / np.sqrt(n))
how_much_we_allow_in_problem_domain
[46_500 - how_much_we_allow_in_problem_domain, 46_500 + how_much_we_allow_in_problem_domain]
```
# Bayesian inference
```
fake_observed = sp.stats.norm(46, 9).rvs(size=28)
fake_observed.mean(), fake_observed.std()
# Doing it by hand with grid search approximation
from scipy.stats import norm, binom
import matplotlib.pyplot as plt
possible_probabilities_mean = np.linspace(0,100,100)
prior_mean = norm.pdf(possible_probabilities_mean, loc=60, scale=20)
prior_std = 9
plt.plot(prior_mean, label="prior")
likelihood_mean = norm.pdf(fake_observed.mean(),
loc=possible_probabilities_mean,
scale=9)
plt.plot(likelihood_mean, label="likelihood")
posterior_unnormalized = prior_mean * likelihood_mean
posterior = posterior_unnormalized / posterior_unnormalized.sum()
plt.plot(posterior, label="posterior")
plt.legend()
# Doing it with pymc3
with pymc3.Model() as model:
u_prior = pymc3.distributions.Uniform("u_prior", 0, 100)
sigma_prior = pymc3.distributions.Uniform("sigma_prior", 0, 20)
likelihood = pymc3.distributions.Normal("likelihood", mu=u_prior, sigma=sigma_prior, observed=[fake_observed])
trace = pymc3.sample()
pymc3.traceplot(trace)
```
# Linear Regression
```
df = pd.read_csv("sample_data/california_housing_train.csv")
df = df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income', 'median_house_value']]
scaled_df = (df - df.min()) / (df.max() - df.min())
X = scaled_df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']].values
y = scaled_df['median_house_value'].values
X_with_intercept = np.hstack((np.ones((len(X), 1)),X))
B = np.linalg.inv(X_with_intercept.T @ X_with_intercept) @ (X_with_intercept.T @ y.reshape(-1, 1))
lr = LinearRegression().fit(X, y)
print("Manual: ", B)
print("Manual score: ", r2_score(y, (X_with_intercept @ B).reshape(-1)))
print("")
print("Sklearn: ", lr.coef_, lr.intercept_)
print("Sklearn score: ", r2_score(y, lr.predict(X)))
```
# Logistic Regression
```
df['median_house_value'].hist()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def log_likelihood(y_hat, y_true):
return np.sum( y_true * np.log(y_hat) + (1-y_true) * np.log(1-y_hat) )
def gradients(X, y, y_hat):
# X --> Input.
# y --> true/target value.
# y_hat --> hypothesis/predictions.
# w --> weights (parameter).
# b --> bias (parameter).
# m-> number of training examples.
m = X.shape[0]
# Gradient of loss w.r.t weights.
dw = (1/m)*np.dot(X.T, (y_hat - y))
return dw
df = pd.read_csv("sample_data/california_housing_train.csv")
df = df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income', 'median_house_value']]
df['median_house_value_cat'] = (df['median_house_value'] > 150_000).astype(int)
scaled_df = (df - df.min()) / (df.max() - df.min())
X = scaled_df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']].values
y = df['median_house_value_cat'].values
X_with_intercept = np.hstack((np.ones((len(X), 1)),X))
B = np.random.normal(0, 0.1 ,(7, 1))
for i in range(20_000):
y_hat = sigmoid(X_with_intercept @ B).reshape(-1)
if i % 1000 == 0 or i ==0:
print("loss: ", log_likelihood(y_hat, y))
deltas = gradients(X_with_intercept, y, y_hat)
B -= 0.3 * deltas.reshape(-1, 1)
lr = sklearn.linear_model.LogisticRegression().fit(X, y)
print("Manual: ", B)
print("Manual score: ",
precision_score(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int) ),
recall_score(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int) ),
)
print()
print("Sklearn: ", lr.coef_, lr.intercept_)
print("Sklearn score",
precision_score(y, lr.predict(X)),
recall_score(y, lr.predict(X))
)
```
# Confusion Matrix
```
sns.heatmap(confusion_matrix(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int)), annot=True)
```
# Decision tree
```
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
load_breast_cancer()['feature_names'][22]
X, y = load_breast_cancer(return_X_y=True)
f1_score(y, np.round(y.mean().repeat(len(y))))
X = pd.DataFrame(X)
smallest_gini_score = float("inf")
threshold_saved = None
# Pick a row and column to use as threshold
for col in X.columns:
for row in X.index:
threshold = X.iloc[row, col]
left_leaf_idx = X.loc[:, col] >= threshold
right_leaf_idx = X.loc[:, col] < threshold
if sum(left_leaf_idx) > 0 and sum(right_leaf_idx) > 0:
# Compute the gini score with that threshold, and save it if it's the smallest so far
gini_score_left = (1 - y[left_leaf_idx].mean()**2 - (1-y[left_leaf_idx].mean())**2)
gini_score_right = (1 - y[right_leaf_idx].mean()**2 - (1-y[right_leaf_idx].mean())**2)
gini_score = (sum(left_leaf_idx) * gini_score_left + sum(right_leaf_idx) * gini_score_right) / len(X)
if gini_score < smallest_gini_score:
smallest_gini_score = gini_score
threshold_saved = (col, row, threshold)
# We now use that threshold to split
print(threshold_saved)
# Now predict using this split
left_leaf_idx = X.loc[:, 20] >= 16.82
right_leaf_idx = X.loc[:, 20] < 16.82
y_predict = pd.Series(y.copy())
y_predict[left_leaf_idx] = y[left_leaf_idx].mean()
y_predict[right_leaf_idx] = y[right_leaf_idx].mean()
print("Leaf sizes: ", len(left_leaf_idx), len(right_leaf_idx))
print("Leaf values: ", y[left_leaf_idx].mean(), y[right_leaf_idx].mean())
print("F1 score: ", f1_score(y, np.round(y_predict)))
# Compare with sklearn
from sklearn import tree
t = DecisionTreeClassifier(max_depth=1, criterion='gini')
t.fit(X, y)
print(tree.export_text(t))
```
# Random forest TODO
```
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
X, y = load_breast_cancer(return_X_y=True)
for i in range(30):
# 1. Use boosting to sample the data
# 2. Pick a random subset of the features
tree = DecisionTreeRegressor()
tree.fit(X, y)
```
# Boosting trees
```
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
raw = load_breast_cancer(return_X_y=True)
X = pd.DataFrame(raw[0])
y = pd.DataFrame(raw[1])
initial_prediction_proba = y.mean()
initial_prediction_classes = round(initial_prediction_proba)
initial_prediction_logodds = np.log(initial_prediction_proba / (1-initial_prediction_proba))
proba_residuals = (y - initial_prediction_proba).values.reshape(-1)
print("Score with mean: ", f1_score(y, [1]*len(y)))
trees = []
for i in range(50):
tree = DecisionTreeRegressor(max_depth=1)
tree.fit(X, proba_residuals)
trees.append(tree)
proba_residuals = proba_residuals - tree.predict(X).reshape(-1)
predictions = np.array(y.mean())
for tree in trees:
predictions = tree.predict(X).reshape(-1) + predictions
print("Final score: ", f1_score(y, predictions.round()))
```
# Gradient boosting tree
# Distributions TODO
## Normal distribution
## Poisson distribution
```
from scipy.stats import poisson
```
# PCA
The principal components are the eigenvectors+eigenvalues of the Covariance matrix of our data.
This is because we are looking for the "Direction of stretching and how much streching happens" regarding the variance of our data.
```
from sklearn.datasets import load_digits
import seaborn as sns
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
digits = pd.DataFrame(load_digits()['data'])
classes = load_digits(return_X_y=True)[1]
low_dim_digits = PCA(n_components=2).fit_transform(digits)
sns.scatterplot(x=low_dim_digits[:,0], y=low_dim_digits[:,1], hue=classes)
digits_normed = digits - digits.mean()
# compute the covariance matrix
cov_matrix = digits_normed.T @ digits_normed / len(digits_normed) # digits_normed.cov()
eigen_values, eigen_vectors = np.linalg.eig(cov_matrix)
eigen_values, eigen_vectors
# Sort eigen values end eigen vectors
sorted_index = np.argsort(eigen_values)[::-1]
sorted_eigenvalue = eigen_values[sorted_index]
sorted_eigenvectors = eigen_vectors[:,sorted_index]
# Select the 2 best
eigenvector_subset = sorted_eigenvectors[:, 0:2]
X_reduced = np.dot(eigenvector_subset.transpose(), digits_normed.transpose()).transpose()
sns.scatterplot(x=X_reduced[:,0], y=X_reduced[:,1], hue=classes)
```
```
# Ce texte est au format code
```
# Matrix factorization
## SVD
```
import pandas as pd
import numpy as np
raw = pd.read_csv("https://raw.githubusercontent.com/smanihwr/ml-latest-small/master/ratings.csv")
user_item_interactions = raw.pivot(values="rating", columns="movieId", index="userId")
user_item_interactions = user_item_interactions.fillna(0)
from scipy.linalg import eig
import numpy as np
A = np.array([
[5,5,0,1],
[5,5,0,0],
[0,1,5,5],
[0,0,5,5],
[0,0,3,5]
])
# A = np.array([
# [3, 2, 2],
# [2, 3, -2],
# ])
U_eigen_values, U_unordered = np.linalg.eig(A @ A.T)
V_eigen_values, V_unordered = np.linalg.eig(A.T @ A)
idx_U = np.argsort(U_eigen_values)[::-1]
idx_V = np.argsort(V_eigen_values)[::-1]
D = np.sqrt(np.around(V_eigen_values[idx_V], decimals=10))
U = U_unordered[:,idx_U] # Using the order of U_eigen_values to reorder U
U = U * [[1,1,-1,-1,-1]] # Each eigenvector can be in 2 directions. Pick the correct one. Very manual. I actually based it on the result of np.linalg.svd. Not sure how you should actually be doing this.
V = (V_unordered[:,idx_V] * [[-1, -1, 1, -1]]) # Using the order of V_eigen_values to reorder V. # Each eigenvector can be in 2 directions. Pick the correct one. Very manual.
np.around(np.matrix(U) @ np.vstack((np.diag(D), np.zeros((len(V))))) @ np.matrix(V.T), decimals=1)
U_, D_, Vt_ = np.linalg.svd(A)
np.around(np.matrix(U_) @ np.vstack((np.diag(D_), np.zeros((len(V_))))) @ np.matrix(Vt_), decimals=1)
```
## Truncated SVD
Truncate the SVD to 2 components by only keeping the two bigest eigenvalues
```
np.matrix(U[:, :2])
np.around(np.matrix(U[:, :2]) @ np.diag(D[:2]) @ np.matrix(V[:,:2].T), decimals=1)
```
## Using gradient descent TODO
```
import numpy as np
A = np.array([
[5,5,0,1],
[5,5,0,0],
[0,1,5,5],
[0,0,5,5],
[0,0,3,5]
])
U = np.random.rand(5, 3) * 0.01
D = np.random.rand(4, 3) * 0.01
error = ((A - U @ D.T)**2).mean()
deltas = A - U @ D.T
https://medium.com/analytics-vidhya/matrix-factorization-made-easy-recommender-systems-7e4f50504477
```
# Neural network TODO
```
import numpy as np
X = np.array([[1,0] , [0,0], [0,1], [1,1]])
y = np.array([[0,1] , [1,0], [0,1], [1,0]])
weights_input_hidden = np.random.rand(2, 4) * 0.001
weights_hidden_output = np.random.rand(4, 2) * 0.001
def relu(data):
return np.maximum(data, 0)
def relu_grad(data):
return (data > 0 )* 1
def softmax(x):
e_x = np.exp(x) - x.sum(axis=1).reshape(-1, 1)
return e_x / e_x.sum(axis=1).reshape(-1, 1)
def softmax_grad(softmax):
s = softmax.reshape(-1,1)
return np.diagflat(s) - np.dot(s, s.T)
predictions = softmax(relu(X @ weights_input_hidden) @ weights_hidden_output)
errors =
softmax_grad(predictions)
```
# Loss functions TODO
|
github_jupyter
|
import scipy as sp
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.graph_objects as go
import sklearn
from sklearn.linear_model import LinearRegression
import pymc3
from sklearn.metrics import r2_score, precision_score, recall_score, confusion_matrix
claim_pop_mean = 50_000
pop_std = 8000
n = 28
sample_mean = 46_500
sample_std = 9800
alpha = 0.05
# 1. How far is the sample_mean from the pop_mean ?
# H0 => pop_mean = 50_000
# H1 => pop_mean > 50_000 or pop_mean < 50_000
print("If we know the pop std")
how_far_on_the_unit_normal_of_sample_means = (46_500 - 50_000) / (8000/np.sqrt(28) )
print(how_far_on_the_unit_normal_of_sample_means, " how_far_on_the_unit_normal_of_sample_means")
how_far_we_allow = - sp.stats.norm.ppf(0.95)
print(how_far_we_allow, " how_far_we_allow")
how_far_we_are_percent = sp.stats.norm.cdf(how_far_on_the_unit_normal_of_sample_means)
print(how_far_we_are_percent, " how_far_we_are_percent")
print("\n")
print("If we don't know the pop std")
how_far_on_the_unit_normal_of_sample_means = (46_500 - 50_000) / (9800/np.sqrt(28) )
print(how_far_on_the_unit_normal_of_sample_means, " how_far_on_the_unit_normal_of_sample_means")
how_far_we_allow = - sp.stats.t.ppf(0.95, df=28-1)
print(how_far_we_allow, " how_far_we_allow")
how_far_we_are_percent = sp.stats.t.cdf(how_far_on_the_unit_normal_of_sample_means, df=28-1)
print(how_far_we_are_percent, " how_far_we_are_percent")
how_much_we_allow_on_unit_normal_distrib = sp.stats.norm.ppf(0.95)
how_much_we_allow_in_problem_domain = how_much_we_allow_on_unit_normal_distrib * (9800 / np.sqrt(n))
how_much_we_allow_in_problem_domain
[46_500 - how_much_we_allow_in_problem_domain, 46_500 + how_much_we_allow_in_problem_domain]
fake_observed = sp.stats.norm(46, 9).rvs(size=28)
fake_observed.mean(), fake_observed.std()
# Doing it by hand with grid search approximation
from scipy.stats import norm, binom
import matplotlib.pyplot as plt
possible_probabilities_mean = np.linspace(0,100,100)
prior_mean = norm.pdf(possible_probabilities_mean, loc=60, scale=20)
prior_std = 9
plt.plot(prior_mean, label="prior")
likelihood_mean = norm.pdf(fake_observed.mean(),
loc=possible_probabilities_mean,
scale=9)
plt.plot(likelihood_mean, label="likelihood")
posterior_unnormalized = prior_mean * likelihood_mean
posterior = posterior_unnormalized / posterior_unnormalized.sum()
plt.plot(posterior, label="posterior")
plt.legend()
# Doing it with pymc3
with pymc3.Model() as model:
u_prior = pymc3.distributions.Uniform("u_prior", 0, 100)
sigma_prior = pymc3.distributions.Uniform("sigma_prior", 0, 20)
likelihood = pymc3.distributions.Normal("likelihood", mu=u_prior, sigma=sigma_prior, observed=[fake_observed])
trace = pymc3.sample()
pymc3.traceplot(trace)
df = pd.read_csv("sample_data/california_housing_train.csv")
df = df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income', 'median_house_value']]
scaled_df = (df - df.min()) / (df.max() - df.min())
X = scaled_df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']].values
y = scaled_df['median_house_value'].values
X_with_intercept = np.hstack((np.ones((len(X), 1)),X))
B = np.linalg.inv(X_with_intercept.T @ X_with_intercept) @ (X_with_intercept.T @ y.reshape(-1, 1))
lr = LinearRegression().fit(X, y)
print("Manual: ", B)
print("Manual score: ", r2_score(y, (X_with_intercept @ B).reshape(-1)))
print("")
print("Sklearn: ", lr.coef_, lr.intercept_)
print("Sklearn score: ", r2_score(y, lr.predict(X)))
df['median_house_value'].hist()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def log_likelihood(y_hat, y_true):
return np.sum( y_true * np.log(y_hat) + (1-y_true) * np.log(1-y_hat) )
def gradients(X, y, y_hat):
# X --> Input.
# y --> true/target value.
# y_hat --> hypothesis/predictions.
# w --> weights (parameter).
# b --> bias (parameter).
# m-> number of training examples.
m = X.shape[0]
# Gradient of loss w.r.t weights.
dw = (1/m)*np.dot(X.T, (y_hat - y))
return dw
df = pd.read_csv("sample_data/california_housing_train.csv")
df = df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income', 'median_house_value']]
df['median_house_value_cat'] = (df['median_house_value'] > 150_000).astype(int)
scaled_df = (df - df.min()) / (df.max() - df.min())
X = scaled_df[['housing_median_age', 'total_rooms', 'total_bedrooms', 'population', 'households', 'median_income']].values
y = df['median_house_value_cat'].values
X_with_intercept = np.hstack((np.ones((len(X), 1)),X))
B = np.random.normal(0, 0.1 ,(7, 1))
for i in range(20_000):
y_hat = sigmoid(X_with_intercept @ B).reshape(-1)
if i % 1000 == 0 or i ==0:
print("loss: ", log_likelihood(y_hat, y))
deltas = gradients(X_with_intercept, y, y_hat)
B -= 0.3 * deltas.reshape(-1, 1)
lr = sklearn.linear_model.LogisticRegression().fit(X, y)
print("Manual: ", B)
print("Manual score: ",
precision_score(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int) ),
recall_score(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int) ),
)
print()
print("Sklearn: ", lr.coef_, lr.intercept_)
print("Sklearn score",
precision_score(y, lr.predict(X)),
recall_score(y, lr.predict(X))
)
sns.heatmap(confusion_matrix(y, (sigmoid(X_with_intercept @ B).reshape(-1) > 0.5).astype(int)), annot=True)
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
load_breast_cancer()['feature_names'][22]
X, y = load_breast_cancer(return_X_y=True)
f1_score(y, np.round(y.mean().repeat(len(y))))
X = pd.DataFrame(X)
smallest_gini_score = float("inf")
threshold_saved = None
# Pick a row and column to use as threshold
for col in X.columns:
for row in X.index:
threshold = X.iloc[row, col]
left_leaf_idx = X.loc[:, col] >= threshold
right_leaf_idx = X.loc[:, col] < threshold
if sum(left_leaf_idx) > 0 and sum(right_leaf_idx) > 0:
# Compute the gini score with that threshold, and save it if it's the smallest so far
gini_score_left = (1 - y[left_leaf_idx].mean()**2 - (1-y[left_leaf_idx].mean())**2)
gini_score_right = (1 - y[right_leaf_idx].mean()**2 - (1-y[right_leaf_idx].mean())**2)
gini_score = (sum(left_leaf_idx) * gini_score_left + sum(right_leaf_idx) * gini_score_right) / len(X)
if gini_score < smallest_gini_score:
smallest_gini_score = gini_score
threshold_saved = (col, row, threshold)
# We now use that threshold to split
print(threshold_saved)
# Now predict using this split
left_leaf_idx = X.loc[:, 20] >= 16.82
right_leaf_idx = X.loc[:, 20] < 16.82
y_predict = pd.Series(y.copy())
y_predict[left_leaf_idx] = y[left_leaf_idx].mean()
y_predict[right_leaf_idx] = y[right_leaf_idx].mean()
print("Leaf sizes: ", len(left_leaf_idx), len(right_leaf_idx))
print("Leaf values: ", y[left_leaf_idx].mean(), y[right_leaf_idx].mean())
print("F1 score: ", f1_score(y, np.round(y_predict)))
# Compare with sklearn
from sklearn import tree
t = DecisionTreeClassifier(max_depth=1, criterion='gini')
t.fit(X, y)
print(tree.export_text(t))
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
X, y = load_breast_cancer(return_X_y=True)
for i in range(30):
# 1. Use boosting to sample the data
# 2. Pick a random subset of the features
tree = DecisionTreeRegressor()
tree.fit(X, y)
from sklearn.datasets import load_wine, load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import f1_score
raw = load_breast_cancer(return_X_y=True)
X = pd.DataFrame(raw[0])
y = pd.DataFrame(raw[1])
initial_prediction_proba = y.mean()
initial_prediction_classes = round(initial_prediction_proba)
initial_prediction_logodds = np.log(initial_prediction_proba / (1-initial_prediction_proba))
proba_residuals = (y - initial_prediction_proba).values.reshape(-1)
print("Score with mean: ", f1_score(y, [1]*len(y)))
trees = []
for i in range(50):
tree = DecisionTreeRegressor(max_depth=1)
tree.fit(X, proba_residuals)
trees.append(tree)
proba_residuals = proba_residuals - tree.predict(X).reshape(-1)
predictions = np.array(y.mean())
for tree in trees:
predictions = tree.predict(X).reshape(-1) + predictions
print("Final score: ", f1_score(y, predictions.round()))
from scipy.stats import poisson
from sklearn.datasets import load_digits
import seaborn as sns
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
digits = pd.DataFrame(load_digits()['data'])
classes = load_digits(return_X_y=True)[1]
low_dim_digits = PCA(n_components=2).fit_transform(digits)
sns.scatterplot(x=low_dim_digits[:,0], y=low_dim_digits[:,1], hue=classes)
digits_normed = digits - digits.mean()
# compute the covariance matrix
cov_matrix = digits_normed.T @ digits_normed / len(digits_normed) # digits_normed.cov()
eigen_values, eigen_vectors = np.linalg.eig(cov_matrix)
eigen_values, eigen_vectors
# Sort eigen values end eigen vectors
sorted_index = np.argsort(eigen_values)[::-1]
sorted_eigenvalue = eigen_values[sorted_index]
sorted_eigenvectors = eigen_vectors[:,sorted_index]
# Select the 2 best
eigenvector_subset = sorted_eigenvectors[:, 0:2]
X_reduced = np.dot(eigenvector_subset.transpose(), digits_normed.transpose()).transpose()
sns.scatterplot(x=X_reduced[:,0], y=X_reduced[:,1], hue=classes)
# Ce texte est au format code
import pandas as pd
import numpy as np
raw = pd.read_csv("https://raw.githubusercontent.com/smanihwr/ml-latest-small/master/ratings.csv")
user_item_interactions = raw.pivot(values="rating", columns="movieId", index="userId")
user_item_interactions = user_item_interactions.fillna(0)
from scipy.linalg import eig
import numpy as np
A = np.array([
[5,5,0,1],
[5,5,0,0],
[0,1,5,5],
[0,0,5,5],
[0,0,3,5]
])
# A = np.array([
# [3, 2, 2],
# [2, 3, -2],
# ])
U_eigen_values, U_unordered = np.linalg.eig(A @ A.T)
V_eigen_values, V_unordered = np.linalg.eig(A.T @ A)
idx_U = np.argsort(U_eigen_values)[::-1]
idx_V = np.argsort(V_eigen_values)[::-1]
D = np.sqrt(np.around(V_eigen_values[idx_V], decimals=10))
U = U_unordered[:,idx_U] # Using the order of U_eigen_values to reorder U
U = U * [[1,1,-1,-1,-1]] # Each eigenvector can be in 2 directions. Pick the correct one. Very manual. I actually based it on the result of np.linalg.svd. Not sure how you should actually be doing this.
V = (V_unordered[:,idx_V] * [[-1, -1, 1, -1]]) # Using the order of V_eigen_values to reorder V. # Each eigenvector can be in 2 directions. Pick the correct one. Very manual.
np.around(np.matrix(U) @ np.vstack((np.diag(D), np.zeros((len(V))))) @ np.matrix(V.T), decimals=1)
U_, D_, Vt_ = np.linalg.svd(A)
np.around(np.matrix(U_) @ np.vstack((np.diag(D_), np.zeros((len(V_))))) @ np.matrix(Vt_), decimals=1)
np.matrix(U[:, :2])
np.around(np.matrix(U[:, :2]) @ np.diag(D[:2]) @ np.matrix(V[:,:2].T), decimals=1)
import numpy as np
A = np.array([
[5,5,0,1],
[5,5,0,0],
[0,1,5,5],
[0,0,5,5],
[0,0,3,5]
])
U = np.random.rand(5, 3) * 0.01
D = np.random.rand(4, 3) * 0.01
error = ((A - U @ D.T)**2).mean()
deltas = A - U @ D.T
https://medium.com/analytics-vidhya/matrix-factorization-made-easy-recommender-systems-7e4f50504477
import numpy as np
X = np.array([[1,0] , [0,0], [0,1], [1,1]])
y = np.array([[0,1] , [1,0], [0,1], [1,0]])
weights_input_hidden = np.random.rand(2, 4) * 0.001
weights_hidden_output = np.random.rand(4, 2) * 0.001
def relu(data):
return np.maximum(data, 0)
def relu_grad(data):
return (data > 0 )* 1
def softmax(x):
e_x = np.exp(x) - x.sum(axis=1).reshape(-1, 1)
return e_x / e_x.sum(axis=1).reshape(-1, 1)
def softmax_grad(softmax):
s = softmax.reshape(-1,1)
return np.diagflat(s) - np.dot(s, s.T)
predictions = softmax(relu(X @ weights_input_hidden) @ weights_hidden_output)
errors =
softmax_grad(predictions)
| 0.615203 | 0.865452 |
# Using Interact
The `interact` function (`ipywidgets.interact`) automatically creates user interface (UI) controls for exploring code and data interactively. It is the easiest way to get started using IPython's widgets.
```
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
```
## Basic `interact`
At the most basic level, `interact` autogenerates UI controls for function arguments, and then calls the function with those arguments when you manipulate the controls interactively. To use `interact`, you need to define a function that you want to explore. Here is a function that prints its only argument `x`.
```
def f(x):
return x
```
When you pass this function as the first argument to `interact` along with an integer keyword argument (`x=10`), a slider is generated and bound to the function parameter.
```
interact(f, x=10);
```
When you move the slider, the function is called, which prints the current value of `x`.
If you pass `True` or `False`, `interact` will generate a checkbox:
```
interact(f, x=True);
```
If you pass a string, `interact` will generate a text area.
```
interact(f, x='Hi there!');
```
`interact` can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, `interact` also works with functions that have multiple arguments.
```
@interact(x=True, y=1.0)
def g(x, y):
return (x, y)
```
## Fixing arguments using `fixed`
There are times when you may want to explore a function using `interact`, but fix one or more of its arguments to specific values. This can be accomplished by wrapping values with the `fixed` function.
```
def h(p, q):
return (p, q)
```
When we call `interact`, we pass `fixed(20)` for q to hold it fixed at a value of `20`.
```
interact(h, p=5, q=fixed(20));
```
Notice that a slider is only produced for `p` as the value of `q` is fixed.
## Widget abbreviations
When you pass an integer-valued keyword argument of `10` (`x=10`) to `interact`, it generates an integer-valued slider control with a range of $[-10,+3\times10]$. In this case, `10` is an *abbreviation* for an actual slider widget:
```python
IntSlider(min=-10,max=30,step=1,value=10)
```
In fact, we can get the same result if we pass this `IntSlider` as the keyword argument for `x`:
```
interact(f, x=widgets.IntSlider(min=-10,max=30,step=1,value=10));
```
This examples clarifies how `interact` proceses its keyword arguments:
1. If the keyword argument is a `Widget` instance with a `value` attribute, that widget is used. Any widget with a `value` attribute can be used, even custom ones.
2. Otherwise, the value is treated as a *widget abbreviation* that is converted to a widget before it is used.
The following table gives an overview of different widget abbreviations:
<table class="table table-condensed table-bordered">
<tr><td><strong>Keyword argument</strong></td><td><strong>Widget</strong></td></tr>
<tr><td>`True` or `False`</td><td>Checkbox</td></tr>
<tr><td>`'Hi there'`</td><td>Text</td></tr>
<tr><td>`value` or `(min,max)` or `(min,max,step)` if integers are passed</td><td>IntSlider</td></tr>
<tr><td>`value` or `(min,max)` or `(min,max,step)` if floats are passed</td><td>FloatSlider</td></tr>
<tr><td>`['orange','apple']` or `{'one':1,'two':2}`</td><td>Dropdown</td></tr>
</table>
Note that a dropdown is used if a list or a dict is given (signifying discrete choices), and a slider is used if a tuple is given (signifying a range).
You have seen how the checkbox and textarea widgets work above. Here, more details about the different abbreviations for sliders and dropdowns are given.
If a 2-tuple of integers is passed `(min,max)`, an integer-valued slider is produced with those minimum and maximum values (inclusively). In this case, the default step size of `1` is used.
```
interact(f, x=(0,4));
```
If a 3-tuple of integers is passed `(min,max,step)`, the step size can also be set.
```
interact(f, x=(0,8,2));
```
A float-valued slider is produced if the elements of the tuples are floats. Here the minimum is `0.0`, the maximum is `10.0` and step size is `0.1` (the default).
```
interact(f, x=(0.0,10.0));
```
The step size can be changed by passing a third element in the tuple.
```
interact(f, x=(0.0,10.0,0.01));
```
For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to `5.5`.
```
@interact(x=(0.0,20.0,0.5))
def h(x=5.5):
return x
```
Dropdown menus are constructed by passing a tuple of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function.
```
interact(f, x=['apples','oranges']);
```
If you want a dropdown menu that passes non-string values to the Python function, you can pass a dictionary. The keys in the dictionary are used for the names in the dropdown menu UI and the values are the arguments that are passed to the underlying Python function.
```
interact(f, x={'one': 10, 'two': 20});
```
## Using function annotations with `interact`
If you are using Python 3, you can also specify widget abbreviations using [function annotations](https://docs.python.org/3/tutorial/controlflow.html#function-annotations).
Define a function with a checkbox widget abbreviation for the argument `x`.
```
def f(x:True): # python 3 only
return x
```
Then, because the widget abbreviation has already been defined, you can call `interact` with a single argument.
```
interact(f);
```
If you are running Python 2, function annotations can be defined using the `@annotate` function.
```
from IPython.utils.py3compat import annotate
@annotate(x=True)
def f(x):
return x
interact(f);
```
## `interactive`
In addition to `interact`, IPython provides another function, `interactive`, that is useful when you want to reuse the widgets that are produced or access the data that is bound to the UI controls.
Here is a function that returns the sum of its two arguments.
```
def f(a, b):
return a+b
```
Unlike `interact`, `interactive` returns a `Widget` instance rather than immediately displaying the widget.
```
w = interactive(f, a=10, b=20)
```
The widget is a `Box`, which is a container for other widgets.
```
type(w)
```
The children of the `Box` are two integer-valued sliders produced by the widget abbreviations above.
```
w.children
```
To actually display the widgets, you can use IPython's `display` function.
```
from IPython.display import display
display(w)
```
At this point, the UI controls work just like they would if `interact` had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by `interactive` also gives you access to the current keyword arguments and return value of the underlying Python function. Note that unlike `interact`, the return value of the function will not be displayed automatically, but you can display a value inside the function with `IPython.display.display`.
Here are the current keyword arguments. If you rerun this cell after manipulating the sliders, the values will have changed.
```
w.kwargs
```
Here is the current return value of the function.
```
w.result
```
## Disabling continuous updates
When interacting with long running functions, realtime feedback is a burden instead of being helpful. See the following example:
```
def slow_function(i):
print(int(i),list(x for x in range(int(i)) if
str(x)==str(x)[::-1] and
str(x**2)==str(x**2)[::-1]))
return
%%time
slow_function(1e6)
```
Notice that the output is updated even while dragging the mouse on the slider. This is not useful for long running functions due to lagging:
```
from ipywidgets import FloatSlider
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
```
There are two ways to mitigate this. You can either only execute on demand, or restrict execution to mouse release events.
### `interact_manual`
The `interact_manual` function provides a variant of interaction that allows you to restrict execution so it is only done on demand. A button is added to the interact controls that allows you to trigger an execute event.
```
interact_manual(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
```
### `continuous_update`
If you are using slider widgets, you can set the `continuous_update` kwarg to `False`. `continuous_update` is a kwarg of slider widgets that restricts executions to mouse release events.
```
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5, continuous_update=False));
```
## Arguments that are dependent of each other
Arguments that are dependent of each other can be expressed manually using `observe`. See the following example, where one variable is used to describe the bounds of another. For more information, please see the [widget events example notebook](./Widget Events.ipynb).
```
x_widget = FloatSlider(min=0.0, max=10.0, step=0.05)
y_widget = FloatSlider(min=0.5, max=10.0, step=0.05, value=5.0)
def update_x_range(*args):
x_widget.max = 2.0 * y_widget.value
y_widget.observe(update_x_range, 'value')
def printer(x, y):
print(x, y)
interact(printer,x=x_widget, y=y_widget);
```
|
github_jupyter
|
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
def f(x):
return x
interact(f, x=10);
interact(f, x=True);
interact(f, x='Hi there!');
@interact(x=True, y=1.0)
def g(x, y):
return (x, y)
def h(p, q):
return (p, q)
interact(h, p=5, q=fixed(20));
IntSlider(min=-10,max=30,step=1,value=10)
interact(f, x=widgets.IntSlider(min=-10,max=30,step=1,value=10));
interact(f, x=(0,4));
interact(f, x=(0,8,2));
interact(f, x=(0.0,10.0));
interact(f, x=(0.0,10.0,0.01));
@interact(x=(0.0,20.0,0.5))
def h(x=5.5):
return x
interact(f, x=['apples','oranges']);
interact(f, x={'one': 10, 'two': 20});
def f(x:True): # python 3 only
return x
interact(f);
from IPython.utils.py3compat import annotate
@annotate(x=True)
def f(x):
return x
interact(f);
def f(a, b):
return a+b
w = interactive(f, a=10, b=20)
type(w)
w.children
from IPython.display import display
display(w)
w.kwargs
w.result
def slow_function(i):
print(int(i),list(x for x in range(int(i)) if
str(x)==str(x)[::-1] and
str(x**2)==str(x**2)[::-1]))
return
%%time
slow_function(1e6)
from ipywidgets import FloatSlider
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
interact_manual(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5, continuous_update=False));
x_widget = FloatSlider(min=0.0, max=10.0, step=0.05)
y_widget = FloatSlider(min=0.5, max=10.0, step=0.05, value=5.0)
def update_x_range(*args):
x_widget.max = 2.0 * y_widget.value
y_widget.observe(update_x_range, 'value')
def printer(x, y):
print(x, y)
interact(printer,x=x_widget, y=y_widget);
| 0.491944 | 0.988788 |
## Numpy Arrays and Vectorization
Frequently, matrices and vectors are needed for computation and are a convenient way to store and access data. Vectors are more commonly many rows with a single column. A significant amount of work has been done to make computers very fast at doing matrix math, and while the tradeoff is commonly framed as 'more memory for faster calculation', there is typically enough memory in contemporary computation devices to process chunks of matrices.
In Python's NumPy, vectors and matrices are referred to as arrays: a constant-sized collection of elements (of the same type - integer, floating point number, string of characters, etc.).
Underneath, Python arrays use C for greater efficiency.
**Note that this is different from the python list - lists are a python datatype, whereas arrays are objects that are made available via the python package numpy**.
Array restrictions:
- You can't append things to an array (i.e. you can't make it bigger without creating an entirely new array)
- You can only put things of the same type into an array
The array is the basis of all (fast) scientific computing in Python.
We need to have a solid foundation of what an array is, how to use it, and what it can do.
By the end of this file you should have seen simple examples of:
1. Arrays are faster than lists!
2. Create an array
3. Different types of arrays
4. Creating and accessing (indexing) arrays
5. Building arrays from other arrays (appending)
6. Operations on arrays of different sizes (broadcasting)
7. Arrays as Python objects
Further reading:
https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html
```
# Python imports
import numpy as np
```
## Arrays versus lists
While both data types hold a series of discrete information, arrays are stored more efficiently in memory and have significantly higher performance than Python lists. They also bring with them a host of properties and syntax that makes them more efficient, especially for numeric operations.
```
l = 20000
test_list = list(range(l))
test_array = np.arange(l)
print(type(test_list))
print(type(test_array))
print(test_list[:300]) # Print the first 300 elements
# (more on indexing in a bit):
print(test_array)
%timeit [np.sqrt(i) for i in test_list]
%timeit [np.sqrt(test_array)]
```
If statement says "10 loops, best of 3: [time]" it means the fastest of 10 repeated runs was recorded - then the 10 runs were repeated twice more, resulting in an overall fastest time.
## Creating and accessing (indexing) arrays
We can create arrays from scratch:
```
test_array = np.array([[1,2,3,4], [6,7,8,9]])
print(test_array)
```
Index arrays using square brackets, starting from zero and specifying row, column:
```
test_array[0,3]
```
Arrays are duck typed just like Python variables, that is to say that Python will try to determine what kind of variable it should be based on how it's used.
Numpy arrays are all the same type of variable. To check the data type (dtype) enter:
```
test_array.dtype
```
Different variable types use different amounts of memory and can have an effect on performance for very large arrays.
Changing the type of array is possible via:
```
test_array = test_array.astype('float64')
print(test_array)
# We can create arrays of boolean values too:
bool_array = np.array([[True, True, False,True],[False,False,True,False]])
print(bool_array)
```
We can replace values in an array:
```
test_array[0,3]=99 # Assign value directly
print(test_array)
```
Deleting values from an array is possible, but due to the way they're stored in memory, it makes sense to keep the array structure. Often, a 'nan' is used (not a number) or some nonsensical value is used, i.e.: `0` or `-1`.
Keep in mind that 'nan' only works for some types of arrays:
```
test_array[0,3] = 'nan'
print(test_array)
```
### Fancy ways of indexing
Slicing Arrays:
Slicing arrays refers to indexing >1 elements in a previous array. Slicing is often used when parallelizing computations using arrays. Indexing is array[row, column].
```
test_array[:,1] # Use the ':' to index along one dimension fully
test_array[1,1:] # Adding a colon indexes the rest of the values
# (includes the numbered index)
test_array[1,1:-1] # We can index relative to the first and last elements
test_array[1,::2] # We can specify the indexing order
test_array[1,1::-1] # We can get pretty fancy about it
# Index second row, second from first to second from
# last in reverse order.
```
Logical Indexing
We can specify only the elements we want by using an array of True/False values:
```
test_array[bool_array] # Use our bool_array from earlier
```
Using the `isnan` function in numpy:
```
nans = np.isnan(test_array)
print(nans)
test_array[nans] = 4
print(test_array)
```
## Building arrays from other arrays (appending)
We can build arrays from other array via Python stacking in a horizontal or vertical way:
```
test_array_Vstacked = np.vstack((test_array, [1,2,3,4]))
print(test_array_Vstacked)
test_array_Hstacked = np.hstack((test_array, test_array))
print(test_array_Hstacked)
```
We can bring these dimensions back down to one via `flatten`:
```
test_array_Hstacked.flatten()
```
***Caution***: appending to numpy arrays frequently is memory intensive. Every time this happens, an entirely new chunk of memory needs to be used, so the old array is moved in memory to a new location.
It's faster to 'preallocate' an array with empty values, and simply populate as the computation progresses.
## Operations on arrays of different sizes (broadcasting)
Python automatically handles arithmetic operations with arrays of different dimensions. In other words, when arrays have different (but compatible) shapes, the smaller is 'broadcast' across the larger.
```
test_array
print("The broadcasted array is: ", test_array[0,:])
test_array[0,:] * test_array
```
However, if the dimensions don't match, it won't work:
```
print("The broadcasted array is: ", test_array[:,0])
#test_array[:,0] * test_array # Uncomment the line to see that the
# dimensions don't match
# Make use of the matrix transpose (also can use array.T)
np.transpose( test_array[:,0]*np.transpose(test_array) )
```
## Arrays as Python objects
Python can be used as an object oriented language, and numpy arrays have *lots* of properties. There are many functions we can use as `numpy.<function>(<array>)` and `array.<function>`
For example, the transpose above:
```
print("The original array is: ", test_array)
print("The transposed array is: ", np.transpose(test_array) )
# Alternatively, using test_array as an opject:
print("The transposed array is: ", test_array.transpose() )
```
One of the most frequenly used properties of arrays is the dimension:
```
print("The original array dimensions are: ", test_array.shape)
print("The array transpose dimensions are: ", test_array.transpose().shape)
```
## Sorting:
Sorting arrays happens in-place, so **once the function is called on an array, the sorting happens to the original array**:
```
test_array2 = np.array([1,5,4,0,1])
print("The original array is: ", test_array2)
test_array3 = test_array2.sort() # Run the sort - note that the new variable isn't assigned
print("The reassigned array should be sorted: ", test_array3)
print("test_array2 after sort: ", test_array2)
```
|
github_jupyter
|
# Python imports
import numpy as np
l = 20000
test_list = list(range(l))
test_array = np.arange(l)
print(type(test_list))
print(type(test_array))
print(test_list[:300]) # Print the first 300 elements
# (more on indexing in a bit):
print(test_array)
%timeit [np.sqrt(i) for i in test_list]
%timeit [np.sqrt(test_array)]
test_array = np.array([[1,2,3,4], [6,7,8,9]])
print(test_array)
test_array[0,3]
test_array.dtype
test_array = test_array.astype('float64')
print(test_array)
# We can create arrays of boolean values too:
bool_array = np.array([[True, True, False,True],[False,False,True,False]])
print(bool_array)
test_array[0,3]=99 # Assign value directly
print(test_array)
test_array[0,3] = 'nan'
print(test_array)
test_array[:,1] # Use the ':' to index along one dimension fully
test_array[1,1:] # Adding a colon indexes the rest of the values
# (includes the numbered index)
test_array[1,1:-1] # We can index relative to the first and last elements
test_array[1,::2] # We can specify the indexing order
test_array[1,1::-1] # We can get pretty fancy about it
# Index second row, second from first to second from
# last in reverse order.
test_array[bool_array] # Use our bool_array from earlier
nans = np.isnan(test_array)
print(nans)
test_array[nans] = 4
print(test_array)
test_array_Vstacked = np.vstack((test_array, [1,2,3,4]))
print(test_array_Vstacked)
test_array_Hstacked = np.hstack((test_array, test_array))
print(test_array_Hstacked)
test_array_Hstacked.flatten()
test_array
print("The broadcasted array is: ", test_array[0,:])
test_array[0,:] * test_array
print("The broadcasted array is: ", test_array[:,0])
#test_array[:,0] * test_array # Uncomment the line to see that the
# dimensions don't match
# Make use of the matrix transpose (also can use array.T)
np.transpose( test_array[:,0]*np.transpose(test_array) )
print("The original array is: ", test_array)
print("The transposed array is: ", np.transpose(test_array) )
# Alternatively, using test_array as an opject:
print("The transposed array is: ", test_array.transpose() )
print("The original array dimensions are: ", test_array.shape)
print("The array transpose dimensions are: ", test_array.transpose().shape)
test_array2 = np.array([1,5,4,0,1])
print("The original array is: ", test_array2)
test_array3 = test_array2.sort() # Run the sort - note that the new variable isn't assigned
print("The reassigned array should be sorted: ", test_array3)
print("test_array2 after sort: ", test_array2)
| 0.388038 | 0.993722 |
<!--NOTEBOOK_HEADER-->
*This notebook contains course material from [CBE40455](https://jckantor.github.io/CBE40455) by
Jeffrey Kantor (jeff at nd.edu); the content is available [on Github](https://github.com/jckantor/CBE40455.git).
The text is released under the [CC-BY-NC-ND-4.0 license](https://creativecommons.org/licenses/by-nc-nd/4.0/legalcode),
and code is released under the [MIT license](https://opensource.org/licenses/MIT).*
<!--NAVIGATION-->
< [Measuring Return](http://nbviewer.jupyter.org/github/jckantor/CBE40455/blob/master/notebooks/07.01-Measuring-Return.ipynb) | [Contents](toc.ipynb) | [Binomial Model for Pricing Options](http://nbviewer.jupyter.org/github/jckantor/CBE40455/blob/master/notebooks/07.03-Binomial-Model-for-Pricing-Options.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/CBE40455/blob/master/notebooks/07.02-Geometric-Brownian-Motion.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/CBE40455/master/notebooks/07.02-Geometric-Brownian-Motion.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
# Geometric Brownian Motion
This notebook presents methods for modeling a financial time series as geometric Brownian motion. The basic outline is to:
1. Capture a data series.
2. Compute returns (we'll do both linear and log returns).
3. Test statistical properties. We need the returns to be independent and identically distributed (iid).
4. Fit distribution of returns to a normal distribution.
5. Perform simulations.
## Historical perspectives
The name [Brownian motion](http://physics.ucsc.edu/~drip/5D/brown/brown.pdf) (or Brownian movement) is a tribute to Sir Robert Brown, the Scottish botanist who, in 1827, reported the random motion of pollen grains on the surface of water when viewed under a microscope.
The explanation of that behavior waited for the genius of Albert Einstein. In the *[Annus mirabilis](https://en.wikipedia.org/wiki/Annus_mirabilis)* of 1905, while employed as a patent clerk and living in a [modest apartment in Bern](https://en.wikipedia.org/wiki/Annus_Mirabilis_papers#/media/File:Albert_einstein_house_bern.JPG), Einstein published papers describing Special Relativity, laid the foundation for quantum theory with a paper on the photoelectric effect, and demonstrated the existence of atoms and molecules with a paper on [Brownian Motion](https://www.zbp.univie.ac.at/dokumente/einstein2.pdf).
Remarkably, five earlier [Louis Bachelier](https://en.wikipedia.org/wiki/Louis_Bachelier) published his Master's thesis on the "Theory of Speculation". While this study was limited to the dynamics of prices on the Paris Bourse, and therefore didn't have the profound implications for Physics of Einstein's forthcoming work, nevertheless Bachelier should be credited with introducing random motion to describe price dynamics. Unfortunately, this work laid in relative obscurity for decades.
Other figures in this intellectual history include the Japanese [Kiyosi Ito](https://en.wikipedia.org/wiki/Kiyosi_It%C3%B4) whose work in the difficult circumstances of the second World War [laid a foundation for stochastic calculus](http://www4.math.sci.osaka-u.ac.jp/shijodanwakai/pdf/1077.pdf). Later, the [eccentric](https://www.theatlantic.com/technology/archive/2014/06/norbert-wiener-the-eccentric-genius-whose-time-may-have-finally-come-again/372607/) [Norbert Weiner](https://en.wikipedia.org/wiki/Norbert_Wiener) established a [theory for random motion -- [the Wiener process](https://en.wikipedia.org/wiki/Wiener_process) -- now widely used in engineering and finance.
The colorful history of individual genius and iconclastic research doesn't end there, but it is enough to provide some understanding behind the terminology that will be introduced below.
## Python Imports and Utility Functions
The [`pandas-datareader`](https://pandas-datareader.readthedocs.io/en/latest/#) package provides a utility for accessing on-line data sources of data. Since the interfaces to those data sources are constantly changing, the next cell updates any current installation of the data reader to the latest available version.
```
%%capture
#!pip install pandas_datareader --upgrade
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pandas as pd
import pandas_datareader as pdr
# python libraray for accessing internet resources
import requests
def lookup_yahoo(symbol):
"""Return a list of all matches for a symbol on Yahoo Finance."""
url = f"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={symbol}®ion=1&lang=en"
return requests.get(url).json()["ResultSet"]["Result"]
def get_symbol(symbol):
"""Return exact match for a symbol."""
result = [r for r in lookup_yahoo(symbol) if symbol == r['symbol']]
return result[0] if len(result) > 0 else None
```
## Statistical Properties of Returns
```
symbol = 'AAPL'
# get symbol data
symbol_data = get_symbol(symbol)
print(symbol_data)
assert symbol_data, f"Symbol {symbol} wasn't found."
# end date is today
end = datetime.datetime(2018, 8, 30).date()
start = end-datetime.timedelta(3*365)
# get stock price data
S = pdr.data.DataReader(symbol, "yahoo", start, end)['Adj Close']
rlin = (S - S.shift(1))/S.shift(1)
rlog = np.log(S/S.shift(1))
# clean up data
rlin = rlin.dropna()
rlog = rlog.dropna()
# plot data
plt.figure(figsize=(10,6))
plt.subplot(3,1,1)
title = f"{symbol_data['name']} ({symbol_data['exchDisp']} {symbol_data['typeDisp']} {symbol_data['symbol']})"
S.plot(title=title)
plt.ylabel('Adjusted Close')
plt.grid()
plt.subplot(3,1,2)
rlin.plot()
plt.title('Linear Returns (daily)')
plt.grid()
plt.tight_layout()
plt.subplot(3,1,3)
rlog.plot()
plt.title('Log Returns (daily)')
plt.grid()
plt.tight_layout()
```
### Distribution of Returns
A basic assumption in developing developing stochastic price models is that the residuals are indepdendent and identically distributed (i.i.d.) random variates. Here we show the results of several common statistical tests that would screen out non-i.i.d. random variates.
```
bins = np.linspace(-0.12,0.10,50)
plt.figure(figsize=(10,5))
plt.subplot(2,2,1)
rlog.hist(bins=bins, density=True, color='b', alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.title('Log Returns')
plt.subplot(2,2,3)
rlog.hist(bins=bins, density=True, cumulative=True, color='b',alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.subplot(2,2,2)
rlin.hist(bins=bins, density=True, color='y', alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.title('Linear Returns')
plt.subplot(2,2,4)
rlin.hist(bins=bins, density=True, cumulative=True, color='y',alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.tight_layout()
```
### Distribution of First Half versus Second Half of the Data Set
```
from scipy.stats import norm
k = int(len(rlog)/2)
r = np.linspace(rlog.min(),rlog.max())
plt.figure()
param = norm.fit(rlog[:k])
rlog[:k].hist(bins=r, density=True, alpha=0.35, color='r')
plt.plot(r,norm.pdf(r,loc=param[0],scale=param[1]),'r-',lw=3);
rlog[k:].hist(bins=r, density=True, alpha=0.35, color='c')
param = norm.fit(rlog[k:])
plt.plot(r,norm.pdf(r, loc=param[0], scale=param[1]), 'c-',lw=3);
plt.legend(['rLog[:k]', 'rLog[k:]'])
plt.title('Change in Distribution of Log Returns')
norm.fit(rlog[:k].dropna())
```
### Lag Plot of $r^{log}_{t+1}$ versus $r^{log}_t$
```
plt.plot(rlog[0:-1], rlog[1:],'.')
plt.axis('equal');
plt.xlabel('$r^{log}_{t}$')
plt.ylabel('$r^{log}_{t+1}$')
plt.grid()
plt.title('Lag Plot for Log Returns');
```
i.i.d. ==> Independent and Identically Distributed
### Autocorrelation
```
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
plot_acf(rlog, lags=min(30, len(rlog)));
plt.xlabel('Lag');
plot_pacf(rlog, lags=min(30, len(rlog)));
plt.xlabel('Lag');
```
## Fitting Returns to a Distribution
### Normal Distribution
```
from scipy.stats import norm
from statsmodels.graphics.gofplots import qqplot
r = np.linspace(rlog.min(), rlog.max())
plt.figure()
param = norm.fit(rlog)
nu = param[0]
sigma = param[1]
print(nu, sigma)
rlog.hist(bins=int(1.5*np.sqrt(len(rlog))), density=True,alpha=0.4)
plt.plot(r, norm.pdf(r, loc=param[0], scale=param[1]), 'r-', lw=3)
plt.figure()
qqplot(rlog, line='q');
```
### Student's T Distribution
```
from scipy.stats import t
from statsmodels.graphics.gofplots import qqplot
r = np.linspace(rlog.min(), rlog.max())
plt.figure()
param = t.fit(rlog)
print(param)
dof = param[0]
nu = param[1]
sigma = param[2]
rlog.hist(bins=int(1.5*np.sqrt(len(rlog))), density=True, alpha=0.4)
#plt.plot(r, t.pdf(r, loc=param[0], scale=param[1]), 'r-', lw=3)
plt.figure()
qqplot(rlog, t, distargs=(4,), loc=nu, scale=sigma, line='q');
```
## Geometric Brownian Motion
The basic notion behind this class of models is to recognize the return at each point in time, for example,
$$\frac{S_{k+1} - S_k}{S_k} = r^{lin}_{k+1}$$
can be expressed as the result of a random process.
$$r^{lin}_{k+1} = \mu\Delta t + \sigma \sqrt{\Delta t}Z_{k+1}$$
where $Z_{k+1}$ comes from a Normal distribution with zero mean and a standard deviation of 1.
### Linear Returns
A discrete-time model for prices modeled as geometric Brownian motion is given by
$$S_{k+1} = S_k + \mu S_k \Delta t + \sigma S_k \sqrt{\Delta t} Z_k$$
where $Z_k \sim N(0,1)$ and $\Delta t$ corresponds to a sampling period, typically a trading period. There are normally 252 trading days in a calendar year, 63 trading days in a quarter, and 21 trading days in a month.
Defining the linear return as
$$r^{lin}_{k} = \frac{S_k - S_{k-1}}{S_{k-1}} = \mu \Delta t + \sigma \sqrt{\Delta t} Z_k$$
then the statistical model for linear returns becomes
$$r^{lin}_{k} = \mu \Delta t + \sigma \sqrt{\Delta t} Z_k$$
This shows, for the case of Geometric Brownian Motion, $r^{lin}_k$ is a random variable drawn from a the normal distribution
$$r^{lin}_k \sim N(\mu \Delta t, \sigma\sqrt{\Delta t})$$
### Log Returns
Alternatively, geometric Brownian motion for prices can be modeled using the natural logarithm of price,
$$\ln S_{k+1} = \ln S_k + \nu \Delta t + \sigma \sqrt{\Delta t} Z_k$$
where, as for linear returns, $Z_k \sim N(0,1)$ and $\Delta t$ corresponds to a sampling period. The relationship between linear and log returns is given by
$$\nu \approx \mu - \frac{\sigma^2}{2}$$
where $\frac{\sigma^2}{2}$ is the 'volatility drag' on linear returns. Defining log return as
$$r^{log}_k = \ln S_k - \ln S_{k-1} = \nu \Delta t + \sigma \sqrt{\Delta t} Z_k$$
the statistical model for log returns becomes
\begin{align*}
r^{log}_{k} & = \nu \Delta t + \sigma \sqrt{\Delta t} Z_k \\
& \sim N(\nu \Delta t, \sigma\sqrt{\Delta t})
\end{align*}
This shows, for the case of Geometric Brownian Motion, $r^{log}_k$ is a random variable drawn from a the normal distribution. The following cells is a complete self-contained demonstration of downloading a data series, fitting a GBM price model, and performing simulations. The first cell loads a data series, computes linear and log returns, and estimates values for $\mu$, $\nu$, and $\sigma$.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
from pandas_datareader import data, wb
import requests
def get_symbol(symbol):
"""
get_symbol(symbol) uses Yahoo to look up a stock trading symbol and
return a description.
"""
url = "http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
result = requests.get(url).json()
for x in result['ResultSet']['Result']:
if x['symbol'] == symbol:
return x['name']
symbol = 'X'
# end date is today
end = datetime.datetime.today().date()
start = end-datetime.timedelta(3*365)
# get stock price data
S = data.DataReader(symbol,"yahoo",start,end)['Adj Close']
rlin = (S - S.shift(1))/S.shift(1)
rlog = np.log(S/S.shift(1))
rlin = rlin.dropna()
rlog = rlog.dropna()
print('Linear Returns')
mu,sigma = norm.fit(rlin)
print(f' mu = {mu:12.8f} (annualized = {100*252*mu:.2f}%)')
print(f'sigma = {0:12.8f} (annualized = {1:.2f}%)'.format(sigma,100*np.sqrt(252)*sigma))
print()
print('Log Returns')
nu,sigma = norm.fit(rlog)
print(' nu = {0:12.8f} (annualized = {1:.2f}%)'.format(nu,100*252*nu))
print('sigma = {0:12.8f} (annualized = {1:.2f}%)'.format(sigma,100*np.sqrt(252)*sigma))
```
### Forecasting
The second cell performs $N$ simulations over a time period $T$, and plots the results with the historical data.
```
from scipy.stats import norm
N = 1000
T = 63
dt = 1
plt.figure(figsize=(10,4))
plt.plot(S.values)
plt.title(get_symbol(symbol))
plt.xlabel('Trading Days')
Slog = [] # log of final values
for n in range(0,N):
P = S[-1] # returns the last price in the sequence
k = len(S)
Plog = []
tlog = []
for t in range(len(S)+1,len(S)+T+1):
Z = norm.rvs()
P += P*(mu*dt + sigma*np.sqrt(dt)*Z)
Plog.append(P)
tlog.append(t)
plt.plot(tlog,Plog,'b.',ms=0.4,alpha=0.5)
Slog.append(P)
plt.grid()
from scipy.stats import lognorm
plt.figure(figsize=(10, 4))
nbins = min(100, int(1.5*np.sqrt(N)))
plt.hist(Slog, bins=nbins, density=True, alpha=0.4, color='b');
shape, loc, scale = lognorm.fit(Slog, floc=0)
print(shape, loc, scale)
x=np.linspace(0, max(Slog), 100)
pdf_fitted = lognorm.pdf(x, shape, loc=loc, scale=scale) # fitted distribution
plt.plot(x, pdf_fitted, 'b-', lw=3)
plt.xlabel('Final Price')
plt.ylabel('Probability');
plt.title(get_symbol(symbol))
plt.grid()
```
<!--NAVIGATION-->
< [Measuring Return](http://nbviewer.jupyter.org/github/jckantor/CBE40455/blob/master/notebooks/07.01-Measuring-Return.ipynb) | [Contents](toc.ipynb) | [Binomial Model for Pricing Options](http://nbviewer.jupyter.org/github/jckantor/CBE40455/blob/master/notebooks/07.03-Binomial-Model-for-Pricing-Options.ipynb) ><p><a href="https://colab.research.google.com/github/jckantor/CBE40455/blob/master/notebooks/07.02-Geometric-Brownian-Motion.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a><p><a href="https://raw.githubusercontent.com/jckantor/CBE40455/master/notebooks/07.02-Geometric-Brownian-Motion.ipynb"><img align="left" src="https://img.shields.io/badge/Github-Download-blue.svg" alt="Download" title="Download Notebook"></a>
|
github_jupyter
|
%%capture
#!pip install pandas_datareader --upgrade
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import datetime
import pandas as pd
import pandas_datareader as pdr
# python libraray for accessing internet resources
import requests
def lookup_yahoo(symbol):
"""Return a list of all matches for a symbol on Yahoo Finance."""
url = f"http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={symbol}®ion=1&lang=en"
return requests.get(url).json()["ResultSet"]["Result"]
def get_symbol(symbol):
"""Return exact match for a symbol."""
result = [r for r in lookup_yahoo(symbol) if symbol == r['symbol']]
return result[0] if len(result) > 0 else None
symbol = 'AAPL'
# get symbol data
symbol_data = get_symbol(symbol)
print(symbol_data)
assert symbol_data, f"Symbol {symbol} wasn't found."
# end date is today
end = datetime.datetime(2018, 8, 30).date()
start = end-datetime.timedelta(3*365)
# get stock price data
S = pdr.data.DataReader(symbol, "yahoo", start, end)['Adj Close']
rlin = (S - S.shift(1))/S.shift(1)
rlog = np.log(S/S.shift(1))
# clean up data
rlin = rlin.dropna()
rlog = rlog.dropna()
# plot data
plt.figure(figsize=(10,6))
plt.subplot(3,1,1)
title = f"{symbol_data['name']} ({symbol_data['exchDisp']} {symbol_data['typeDisp']} {symbol_data['symbol']})"
S.plot(title=title)
plt.ylabel('Adjusted Close')
plt.grid()
plt.subplot(3,1,2)
rlin.plot()
plt.title('Linear Returns (daily)')
plt.grid()
plt.tight_layout()
plt.subplot(3,1,3)
rlog.plot()
plt.title('Log Returns (daily)')
plt.grid()
plt.tight_layout()
bins = np.linspace(-0.12,0.10,50)
plt.figure(figsize=(10,5))
plt.subplot(2,2,1)
rlog.hist(bins=bins, density=True, color='b', alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.title('Log Returns')
plt.subplot(2,2,3)
rlog.hist(bins=bins, density=True, cumulative=True, color='b',alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.subplot(2,2,2)
rlin.hist(bins=bins, density=True, color='y', alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.title('Linear Returns')
plt.subplot(2,2,4)
rlin.hist(bins=bins, density=True, cumulative=True, color='y',alpha=0.5)
plt.xlim(bins.min(),bins.max())
plt.tight_layout()
from scipy.stats import norm
k = int(len(rlog)/2)
r = np.linspace(rlog.min(),rlog.max())
plt.figure()
param = norm.fit(rlog[:k])
rlog[:k].hist(bins=r, density=True, alpha=0.35, color='r')
plt.plot(r,norm.pdf(r,loc=param[0],scale=param[1]),'r-',lw=3);
rlog[k:].hist(bins=r, density=True, alpha=0.35, color='c')
param = norm.fit(rlog[k:])
plt.plot(r,norm.pdf(r, loc=param[0], scale=param[1]), 'c-',lw=3);
plt.legend(['rLog[:k]', 'rLog[k:]'])
plt.title('Change in Distribution of Log Returns')
norm.fit(rlog[:k].dropna())
plt.plot(rlog[0:-1], rlog[1:],'.')
plt.axis('equal');
plt.xlabel('$r^{log}_{t}$')
plt.ylabel('$r^{log}_{t+1}$')
plt.grid()
plt.title('Lag Plot for Log Returns');
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
plot_acf(rlog, lags=min(30, len(rlog)));
plt.xlabel('Lag');
plot_pacf(rlog, lags=min(30, len(rlog)));
plt.xlabel('Lag');
from scipy.stats import norm
from statsmodels.graphics.gofplots import qqplot
r = np.linspace(rlog.min(), rlog.max())
plt.figure()
param = norm.fit(rlog)
nu = param[0]
sigma = param[1]
print(nu, sigma)
rlog.hist(bins=int(1.5*np.sqrt(len(rlog))), density=True,alpha=0.4)
plt.plot(r, norm.pdf(r, loc=param[0], scale=param[1]), 'r-', lw=3)
plt.figure()
qqplot(rlog, line='q');
from scipy.stats import t
from statsmodels.graphics.gofplots import qqplot
r = np.linspace(rlog.min(), rlog.max())
plt.figure()
param = t.fit(rlog)
print(param)
dof = param[0]
nu = param[1]
sigma = param[2]
rlog.hist(bins=int(1.5*np.sqrt(len(rlog))), density=True, alpha=0.4)
#plt.plot(r, t.pdf(r, loc=param[0], scale=param[1]), 'r-', lw=3)
plt.figure()
qqplot(rlog, t, distargs=(4,), loc=nu, scale=sigma, line='q');
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime
from pandas_datareader import data, wb
import requests
def get_symbol(symbol):
"""
get_symbol(symbol) uses Yahoo to look up a stock trading symbol and
return a description.
"""
url = "http://d.yimg.com/autoc.finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
result = requests.get(url).json()
for x in result['ResultSet']['Result']:
if x['symbol'] == symbol:
return x['name']
symbol = 'X'
# end date is today
end = datetime.datetime.today().date()
start = end-datetime.timedelta(3*365)
# get stock price data
S = data.DataReader(symbol,"yahoo",start,end)['Adj Close']
rlin = (S - S.shift(1))/S.shift(1)
rlog = np.log(S/S.shift(1))
rlin = rlin.dropna()
rlog = rlog.dropna()
print('Linear Returns')
mu,sigma = norm.fit(rlin)
print(f' mu = {mu:12.8f} (annualized = {100*252*mu:.2f}%)')
print(f'sigma = {0:12.8f} (annualized = {1:.2f}%)'.format(sigma,100*np.sqrt(252)*sigma))
print()
print('Log Returns')
nu,sigma = norm.fit(rlog)
print(' nu = {0:12.8f} (annualized = {1:.2f}%)'.format(nu,100*252*nu))
print('sigma = {0:12.8f} (annualized = {1:.2f}%)'.format(sigma,100*np.sqrt(252)*sigma))
from scipy.stats import norm
N = 1000
T = 63
dt = 1
plt.figure(figsize=(10,4))
plt.plot(S.values)
plt.title(get_symbol(symbol))
plt.xlabel('Trading Days')
Slog = [] # log of final values
for n in range(0,N):
P = S[-1] # returns the last price in the sequence
k = len(S)
Plog = []
tlog = []
for t in range(len(S)+1,len(S)+T+1):
Z = norm.rvs()
P += P*(mu*dt + sigma*np.sqrt(dt)*Z)
Plog.append(P)
tlog.append(t)
plt.plot(tlog,Plog,'b.',ms=0.4,alpha=0.5)
Slog.append(P)
plt.grid()
from scipy.stats import lognorm
plt.figure(figsize=(10, 4))
nbins = min(100, int(1.5*np.sqrt(N)))
plt.hist(Slog, bins=nbins, density=True, alpha=0.4, color='b');
shape, loc, scale = lognorm.fit(Slog, floc=0)
print(shape, loc, scale)
x=np.linspace(0, max(Slog), 100)
pdf_fitted = lognorm.pdf(x, shape, loc=loc, scale=scale) # fitted distribution
plt.plot(x, pdf_fitted, 'b-', lw=3)
plt.xlabel('Final Price')
plt.ylabel('Probability');
plt.title(get_symbol(symbol))
plt.grid()
| 0.681515 | 0.959913 |
# Chapter 8 - Foundational Probability Concepts and Their Applications
## 8.1 Randomness, Probability and Random Variables
### 8.1.4 Introduction to simulations with Numpy
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# line to allow the plots to be showed in the Jupyter notebook
%matplotlib inline
def toss_die():
outcome = np.random.randint(1, 7)
return outcome
for x in range(10):
print(toss_die(), end=', ')
np.random.seed(123)
for x in range(10):
print(toss_die(), end=', ')
students = ['student_' + str(i) for i in range(1,31)]
sample_students = np.random.choice(
a=students,
size=4,
replace=False)
sample_students
sample_students2 = np.random.choice(
a=students,
size=12,
replace=True)
for i,s in enumerate(sample_students2):
print(f'Week {i+1}: {s}')
```
### 8.1.5 Probability as a relative frequency
```
np.random.seed(81)
one_million_tosses = np.random.randint(low=1,
high=7,
size=int(1e6))
one_million_tosses[:10]
N_A_occurs = (one_million_tosses == 2).sum()
Prob_A = N_A_occurs/one_million_tosses.shape[0]
print(f'P(A)={Prob_A}')
N_B_occurs = (one_million_tosses == 6).sum()
Prob_B = N_B_occurs/one_million_tosses.shape[0]
print(f'P(B)={Prob_B}')
N_odd_number = (
(one_million_tosses == 1) |
(one_million_tosses == 3) |
(one_million_tosses == 5)).sum()
Prob_C = N_odd_number/one_million_tosses.shape[0]
print(f'P(C)={Prob_C}')
N_D_occurs = (one_million_tosses < 5).sum()
Prob_D = N_D_occurs/one_million_tosses.shape[0]
print(f'P(D)={Prob_D}')
```
### 8.1.7 Defining random variables
```
np.random.seed(55)
number_of_tosses = int(1e5)
die_1 = np.random.randint(1,7, size=number_of_tosses)
die_2 = np.random.randint(1,7, size=number_of_tosses)
X = die_1 + die_2
print(die_1[:10])
print(die_2[:10])
print(X[:10])
Prob_X_is_10 = (X == 10).sum()/X.shape[0]
print(f'P(X = 10) = {Prob_X_is_10}')
Prob_X_is_gt_5 = (X > 5).sum()/X.shape[0]
print(f'P(X > 5) = {Prob_X_is_gt_5}')
X = pd.Series(X)
# counts the occurrences of each value
freq_of_X_values = X.value_counts()
freq_of_X_values.sort_index().plot(kind='bar')
plt.grid();
Prob_of_X_values = freq_of_X_values/X.shape[0]
Prob_of_X_values.sort_index().plot(kind='bar')
plt.grid();
np.random.seed(97)
ten_coins_a_million_times = np.random.randint(0, 2, size=int(10e6)).reshape(-1,10)
ten_coins_a_million_times[:12, :]
Y = ten_coins_a_million_times.sum(axis=1)
Prob_Y_is_0 = (Y == 0).sum() / Y.shape[0]
print((Y == 0).sum())
print(f'P(Y = 0) = {Prob_Y_is_0}')
Y = pd.Series(Y)
# counts the occurrences of each value
freq_of_Y_values = Y.value_counts()
Prob_of_Y_values = freq_of_Y_values/Y.shape[0]
Prob_of_Y_values.sort_index().plot(kind='bar')
plt.grid();
print(Prob_of_Y_values.loc[[4,5,6]])
print(f'P(4<=Y<=6) = {Prob_of_Y_values.loc[[4,5,6]].sum()}')
```
### 8.1.7 Exercise: calculate average wins in the roulette
## 8.2 Discrete Random Variables
### 8.2.2 Defining discrete random variables
```
np.random.seed(977)
np.random.choice(
['defective', 'good'],
size=12,
p=(0.04, 0.96)
)
np.random.seed(10)
n_boxes = int(1e6)
parts_per_box = 12
one_million_boxes = np.random.choice(
[1, 0],
size=(n_boxes, parts_per_box),
p=(0.04, 0.96)
)
one_million_boxes[:5,:]
# count defective pieces per box
defective_pieces_per_box = one_million_boxes.sum(axis=1)
# count how many times we observed 0, 1, …, 12 defective pieces
defective_pieces_per_box = pd.Series(defective_pieces_per_box)
frequencies = defective_pieces_per_box.value_counts()
# probability distribution
probs_Z = frequencies/n_boxes
print(probs_Z.sort_index())
probs_Z.sort_index().plot(kind='bar')
plt.grid()
```
### 8.2.3 The Binomial distribution
```
import scipy.stats as stats
Y_rv = stats.binom(
n=10, # number of coins
p=0.5 # probability of heads (success)
)
y_values = np.arange(0,11)
Y_probs = Y_rv.pmf(y_values)
fig, ax = plt.subplots()
ax.bar(y_values, Y_probs)
ax.set_xticks(y_values)
ax.grid()
Y_rv_df = pd.DataFrame(
{
'Y_simulated_pmf': Prob_of_Y_values,
'Y_theoretical_pmf': Y_probs
},
index=y_values)
Y_rv_df.plot(kind='bar')
plt.grid();
stats.binom.pmf(k=1, n=12, p=0.04)
```
## 8.3 Continuous Random Variables
### 8.3.2 Defining continuous random variables
```
games = pd.read_csv('./data/appstore_games.csv')
original_colums_dict = {x: x.lower().replace(' ','_') for x in games.columns}
games.rename(
columns = original_colums_dict,
inplace = True
)
games['size'] = games['size']/(1e6)
# replacing the one missing value with the median
games['size'] = games['size'].fillna(games['size'].median())
games['size'].hist(bins = 50, ec='k');
# get the number of games to use as denominator
number_of_games = games['size'].size
# calculate probabilities
prob_X_gt_100 = (games['size'] > 100).sum()/number_of_games
prob_X_bt_100_and_400 = ((games['size'] >= 100) & (games['size'] <= 400)).sum()/number_of_games
prob_X_eq_152_53 = (games['size'] == 152.53).sum()/number_of_games
# print the results
print(f'P(X > 100) = {prob_X_gt_100:0.5f}')
print(f'P(100 <= X <= 400) = {prob_X_bt_100_and_400:0.5f}')
print(f'P(X = 152.53) = {prob_X_eq_152_53:0.5f}')
```
### 8.3.3 The Normal distribution
```
# set the mu and sigma parameters of the distribution
heights_mean = 170
heights_sd = 10
# instantiate the random variable object
heights_rv = stats.norm(
loc = heights_mean, # mean of the distribution
scale = heights_sd # standard deviation
)
heights_rv.pdf(175)
values = np.linspace(130, 210, num=200)
heights_rv_pdf = heights_rv.pdf(values)
plt.plot(values, heights_rv_pdf)
plt.grid();
sample_heights = heights_rv.rvs(
size = 5,
random_state = 998 # similar to np.seed
)
for i, h in enumerate(sample_heights):
print(f'Men {i + 1} height: {h:0.1f}')
# size of the simulation
sim_size = int(1e5)
# simulate the random samples
sample_heighs = heights_rv.rvs(
size = sim_size,
random_state = 88 # similar to np.seed
)
Prob_event = (sample_heighs > 190).sum()/sim_size
print(f'Probability of a male > 190 cm : {Prob_event:0.5f} (or {100*Prob_event:0.2f}%)')
```
### 8.3.4 Some properties of the Normal distribution
```
# parameters of distributions
heights_means = [170, 170, 175]
heights_sds = [10, 5, 10]
countries = ['Country A', 'Country B', 'Country C']
heights_rvs = {}
plotting_values = {}
# creating the random variables
for i, country in enumerate(countries):
heights_rvs[country] = stats.norm(
loc = heights_means[i], # mean of the distribution
scale = heights_sds[i] # standard deviation
)
# getting x and y values for plotting the distributions
for i, country in enumerate(countries):
x_values = np.linspace(heights_means[i] - 4*heights_sds[i], heights_means[i] + 4*heights_sds[i])
y_values = heights_rvs[country].pdf(x_values)
plotting_values[country] = (x_values, y_values)
# plotting the three distributions
fig, ax = plt.subplots(figsize = (8, 4))
for i, country in enumerate(countries):
ax.plot(
plotting_values[country][0],
plotting_values[country][1],
label=country,
lw = 2
)
ax.set_xticks(np.arange(130, 220, 5))
plt.legend()
plt.grid();
from matplotlib.patches import Polygon
def func(x):
return heights_rv.pdf(x)
lower_lim = 160
upper_lim = 180
x = np.linspace(130, 210)
y = func(x)
fig, ax = plt.subplots(figsize=(8,4))
ax.plot(x, y, 'blue', linewidth=2)
ax.set_ylim(bottom=0)
# Make the shaded region
ix = np.linspace(lower_lim, upper_lim)
iy = func(ix)
verts = [(lower_lim, 0), *zip(ix, iy), (upper_lim, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
ax.text(0.5 * (lower_lim + upper_lim), 0.01, r"$\int_{160}^{180} f(x)\mathrm{d}x\approx0.68$",
horizontalalignment='center', fontsize=12)
fig.text(0.85, 0.05, '$height$')
fig.text(0.08, 0.85, '$f(x)$')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((lower_lim, upper_lim))
ax.set_xticklabels(('$160$', '$180$'))
ax.set_yticks([]);
# limits of the integral
lower_lim = 160
upper_lim = 180
# calculating the area under the curve
Prob_X_in_160_180 = heights_rv.cdf(upper_lim) - heights_rv.cdf(lower_lim)
# print the result
print(f'Prob(160 <= X <= 180) = {Prob_X_in_160_180:0.4f}')
# limits of the integral
lower_lim = 190
upper_lim = np.Inf
# calculating the area under the curve
Prob_X_gt_190 = heights_rv.cdf(upper_lim) - heights_rv.cdf(lower_lim)
# print the result
print(f'Probability of a male > 190 cm : {Prob_X_gt_190:0.5f} (or {100*Prob_X_gt_190:0.2f}%)')
```
### 8.3.5 Exercise: using the normal distribution in education
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# line to allow the plots to be showed in the Jupyter notebook
%matplotlib inline
def toss_die():
outcome = np.random.randint(1, 7)
return outcome
for x in range(10):
print(toss_die(), end=', ')
np.random.seed(123)
for x in range(10):
print(toss_die(), end=', ')
students = ['student_' + str(i) for i in range(1,31)]
sample_students = np.random.choice(
a=students,
size=4,
replace=False)
sample_students
sample_students2 = np.random.choice(
a=students,
size=12,
replace=True)
for i,s in enumerate(sample_students2):
print(f'Week {i+1}: {s}')
np.random.seed(81)
one_million_tosses = np.random.randint(low=1,
high=7,
size=int(1e6))
one_million_tosses[:10]
N_A_occurs = (one_million_tosses == 2).sum()
Prob_A = N_A_occurs/one_million_tosses.shape[0]
print(f'P(A)={Prob_A}')
N_B_occurs = (one_million_tosses == 6).sum()
Prob_B = N_B_occurs/one_million_tosses.shape[0]
print(f'P(B)={Prob_B}')
N_odd_number = (
(one_million_tosses == 1) |
(one_million_tosses == 3) |
(one_million_tosses == 5)).sum()
Prob_C = N_odd_number/one_million_tosses.shape[0]
print(f'P(C)={Prob_C}')
N_D_occurs = (one_million_tosses < 5).sum()
Prob_D = N_D_occurs/one_million_tosses.shape[0]
print(f'P(D)={Prob_D}')
np.random.seed(55)
number_of_tosses = int(1e5)
die_1 = np.random.randint(1,7, size=number_of_tosses)
die_2 = np.random.randint(1,7, size=number_of_tosses)
X = die_1 + die_2
print(die_1[:10])
print(die_2[:10])
print(X[:10])
Prob_X_is_10 = (X == 10).sum()/X.shape[0]
print(f'P(X = 10) = {Prob_X_is_10}')
Prob_X_is_gt_5 = (X > 5).sum()/X.shape[0]
print(f'P(X > 5) = {Prob_X_is_gt_5}')
X = pd.Series(X)
# counts the occurrences of each value
freq_of_X_values = X.value_counts()
freq_of_X_values.sort_index().plot(kind='bar')
plt.grid();
Prob_of_X_values = freq_of_X_values/X.shape[0]
Prob_of_X_values.sort_index().plot(kind='bar')
plt.grid();
np.random.seed(97)
ten_coins_a_million_times = np.random.randint(0, 2, size=int(10e6)).reshape(-1,10)
ten_coins_a_million_times[:12, :]
Y = ten_coins_a_million_times.sum(axis=1)
Prob_Y_is_0 = (Y == 0).sum() / Y.shape[0]
print((Y == 0).sum())
print(f'P(Y = 0) = {Prob_Y_is_0}')
Y = pd.Series(Y)
# counts the occurrences of each value
freq_of_Y_values = Y.value_counts()
Prob_of_Y_values = freq_of_Y_values/Y.shape[0]
Prob_of_Y_values.sort_index().plot(kind='bar')
plt.grid();
print(Prob_of_Y_values.loc[[4,5,6]])
print(f'P(4<=Y<=6) = {Prob_of_Y_values.loc[[4,5,6]].sum()}')
np.random.seed(977)
np.random.choice(
['defective', 'good'],
size=12,
p=(0.04, 0.96)
)
np.random.seed(10)
n_boxes = int(1e6)
parts_per_box = 12
one_million_boxes = np.random.choice(
[1, 0],
size=(n_boxes, parts_per_box),
p=(0.04, 0.96)
)
one_million_boxes[:5,:]
# count defective pieces per box
defective_pieces_per_box = one_million_boxes.sum(axis=1)
# count how many times we observed 0, 1, …, 12 defective pieces
defective_pieces_per_box = pd.Series(defective_pieces_per_box)
frequencies = defective_pieces_per_box.value_counts()
# probability distribution
probs_Z = frequencies/n_boxes
print(probs_Z.sort_index())
probs_Z.sort_index().plot(kind='bar')
plt.grid()
import scipy.stats as stats
Y_rv = stats.binom(
n=10, # number of coins
p=0.5 # probability of heads (success)
)
y_values = np.arange(0,11)
Y_probs = Y_rv.pmf(y_values)
fig, ax = plt.subplots()
ax.bar(y_values, Y_probs)
ax.set_xticks(y_values)
ax.grid()
Y_rv_df = pd.DataFrame(
{
'Y_simulated_pmf': Prob_of_Y_values,
'Y_theoretical_pmf': Y_probs
},
index=y_values)
Y_rv_df.plot(kind='bar')
plt.grid();
stats.binom.pmf(k=1, n=12, p=0.04)
games = pd.read_csv('./data/appstore_games.csv')
original_colums_dict = {x: x.lower().replace(' ','_') for x in games.columns}
games.rename(
columns = original_colums_dict,
inplace = True
)
games['size'] = games['size']/(1e6)
# replacing the one missing value with the median
games['size'] = games['size'].fillna(games['size'].median())
games['size'].hist(bins = 50, ec='k');
# get the number of games to use as denominator
number_of_games = games['size'].size
# calculate probabilities
prob_X_gt_100 = (games['size'] > 100).sum()/number_of_games
prob_X_bt_100_and_400 = ((games['size'] >= 100) & (games['size'] <= 400)).sum()/number_of_games
prob_X_eq_152_53 = (games['size'] == 152.53).sum()/number_of_games
# print the results
print(f'P(X > 100) = {prob_X_gt_100:0.5f}')
print(f'P(100 <= X <= 400) = {prob_X_bt_100_and_400:0.5f}')
print(f'P(X = 152.53) = {prob_X_eq_152_53:0.5f}')
# set the mu and sigma parameters of the distribution
heights_mean = 170
heights_sd = 10
# instantiate the random variable object
heights_rv = stats.norm(
loc = heights_mean, # mean of the distribution
scale = heights_sd # standard deviation
)
heights_rv.pdf(175)
values = np.linspace(130, 210, num=200)
heights_rv_pdf = heights_rv.pdf(values)
plt.plot(values, heights_rv_pdf)
plt.grid();
sample_heights = heights_rv.rvs(
size = 5,
random_state = 998 # similar to np.seed
)
for i, h in enumerate(sample_heights):
print(f'Men {i + 1} height: {h:0.1f}')
# size of the simulation
sim_size = int(1e5)
# simulate the random samples
sample_heighs = heights_rv.rvs(
size = sim_size,
random_state = 88 # similar to np.seed
)
Prob_event = (sample_heighs > 190).sum()/sim_size
print(f'Probability of a male > 190 cm : {Prob_event:0.5f} (or {100*Prob_event:0.2f}%)')
# parameters of distributions
heights_means = [170, 170, 175]
heights_sds = [10, 5, 10]
countries = ['Country A', 'Country B', 'Country C']
heights_rvs = {}
plotting_values = {}
# creating the random variables
for i, country in enumerate(countries):
heights_rvs[country] = stats.norm(
loc = heights_means[i], # mean of the distribution
scale = heights_sds[i] # standard deviation
)
# getting x and y values for plotting the distributions
for i, country in enumerate(countries):
x_values = np.linspace(heights_means[i] - 4*heights_sds[i], heights_means[i] + 4*heights_sds[i])
y_values = heights_rvs[country].pdf(x_values)
plotting_values[country] = (x_values, y_values)
# plotting the three distributions
fig, ax = plt.subplots(figsize = (8, 4))
for i, country in enumerate(countries):
ax.plot(
plotting_values[country][0],
plotting_values[country][1],
label=country,
lw = 2
)
ax.set_xticks(np.arange(130, 220, 5))
plt.legend()
plt.grid();
from matplotlib.patches import Polygon
def func(x):
return heights_rv.pdf(x)
lower_lim = 160
upper_lim = 180
x = np.linspace(130, 210)
y = func(x)
fig, ax = plt.subplots(figsize=(8,4))
ax.plot(x, y, 'blue', linewidth=2)
ax.set_ylim(bottom=0)
# Make the shaded region
ix = np.linspace(lower_lim, upper_lim)
iy = func(ix)
verts = [(lower_lim, 0), *zip(ix, iy), (upper_lim, 0)]
poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')
ax.add_patch(poly)
ax.text(0.5 * (lower_lim + upper_lim), 0.01, r"$\int_{160}^{180} f(x)\mathrm{d}x\approx0.68$",
horizontalalignment='center', fontsize=12)
fig.text(0.85, 0.05, '$height$')
fig.text(0.08, 0.85, '$f(x)$')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((lower_lim, upper_lim))
ax.set_xticklabels(('$160$', '$180$'))
ax.set_yticks([]);
# limits of the integral
lower_lim = 160
upper_lim = 180
# calculating the area under the curve
Prob_X_in_160_180 = heights_rv.cdf(upper_lim) - heights_rv.cdf(lower_lim)
# print the result
print(f'Prob(160 <= X <= 180) = {Prob_X_in_160_180:0.4f}')
# limits of the integral
lower_lim = 190
upper_lim = np.Inf
# calculating the area under the curve
Prob_X_gt_190 = heights_rv.cdf(upper_lim) - heights_rv.cdf(lower_lim)
# print the result
print(f'Probability of a male > 190 cm : {Prob_X_gt_190:0.5f} (or {100*Prob_X_gt_190:0.2f}%)')
| 0.30054 | 0.909626 |
<a href="https://colab.research.google.com/github/qls0ulp/30-seconds-of-python/blob/master/Generating_Piano_Music_with_Transformer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
##### Copyright 2019 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
# Generating Piano Music with Transformer
### ___Ian Simon, Anna Huang, Jesse Engel, Curtis "Fjord" Hawthorne___
This Colab notebook lets you play with pretrained [Transformer](https://arxiv.org/abs/1706.03762) models for piano music generation, based on the [Music Transformer](http://g.co/magenta/music-transformer) model introduced by [Huang et al.](https://arxiv.org/abs/1809.04281) in 2018.
The models used here were trained on over 10,000 hours of piano recordings from YouTube, transcribed using [Onsets and Frames](http://g.co/magenta/onsets-frames) and represented using the event vocabulary from [Performance RNN](http://g.co/magenta/performance-rnn).
Unlike the original Music Transformer paper, this notebook uses attention based on absolute instead of relative position; we may add models that use relative attention at some point in the future.
# Environment Setup
```
#@title Setup Environment
#@markdown Copy model checkpoints and some auxiliary data from
#@markdown Google Cloud Storage. Also install and import
#@markdown Python dependencies needed for running the
#@markdown Transformer models.
#@markdown
#@markdown This cell may take a few minutes to run.
print('Copying checkpoints and Salamander piano SoundFont (via https://sites.google.com/site/soundfonts4u) from GCS...')
!gsutil -q -m cp -r gs://magentadata/models/music_transformer/* /content/
!gsutil -q -m cp gs://magentadata/soundfonts/Yamaha-C5-Salamander-JNv5.1.sf2 /content/
print('Installing dependencies...')
!apt-get update -qq && apt-get install -qq libfluidsynth1 build-essential libasound2-dev libjack-dev
!pip install -qU google-cloud magenta pyfluidsynth
import ctypes.util
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return ctypes.util.find_library(lib)
ctypes.util.find_library = proxy_find_library
print('Importing libraries...')
import numpy as np
import os
import tensorflow as tf
from google.colab import files
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
import magenta.music as mm
from magenta.models.score2perf import score2perf
print('Done!')
#@title Definitions
#@markdown Define a few constants and helper functions.
SF2_PATH = '/content/Yamaha-C5-Salamander-JNv5.1.sf2'
SAMPLE_RATE = 16000
# Upload a MIDI file and convert to NoteSequence.
def upload_midi():
data = list(files.upload().values())
if len(data) > 1:
print('Multiple files uploaded; using only one.')
return mm.midi_to_note_sequence(data[0])
# Decode a list of IDs.
def decode(ids, encoder):
ids = list(ids)
if text_encoder.EOS_ID in ids:
ids = ids[:ids.index(text_encoder.EOS_ID)]
return encoder.decode(ids)
```
# Piano Performance Language Model
```
#@title Setup and Load Checkpoint
#@markdown Set up generation from an unconditional Transformer
#@markdown model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/unconditional_model_16.ckpt'
class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = PianoPerformanceLanguageModelProblem()
unconditional_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# Create input generator (so we can adjust priming and
# decode length on the fly).
def input_generator():
global targets
global decode_length
while True:
yield {
'targets': np.array([targets], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# These values will be changed by subsequent cells.
targets = []
decode_length = 0
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
unconditional_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(unconditional_samples)
#@title Generate from Scratch
#@markdown Generate a piano performance from scratch.
#@markdown
#@markdown This can take a minute or so depending on the length
#@markdown of the performance the model ends up generating.
#@markdown Because we use a
#@markdown [representation](http://g.co/magenta/performance-rnn)
#@markdown where each event corresponds to a variable amount of
#@markdown time, the actual number of seconds generated may vary.
targets = []
decode_length = 1024
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
unconditional_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)
#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')
#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model. We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
#@markdown Set `max_primer_seconds` below to trim the primer to a
#@markdown fixed number of seconds (this will have no effect if
#@markdown the primer is already shorter than `max_primer_seconds`).
filenames = {
'C major arpeggio': '/content/primers/c_major_arpeggio.mid',
'C major scale': '/content/primers/c_major_scale.mid',
'Clair de Lune': '/content/primers/clair_de_lune.mid',
}
primer = 'Upload your own!' #@param ['C major arpeggio', 'C major scale', 'Clair de Lune', 'Upload your own!']
if primer == 'Upload your own!':
primer_ns = upload_midi()
else:
# Use one of the provided primers.
primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
# Handle sustain pedal in the primer.
primer_ns = mm.apply_sustain_control_changes(primer_ns)
# Trim to desired number of seconds.
max_primer_seconds = 20 #@param {type:"slider", min:1, max:120}
if primer_ns.total_time > max_primer_seconds:
print('Primer is longer than %d seconds, truncating.' % max_primer_seconds)
primer_ns = mm.extract_subsequence(
primer_ns, 0, max_primer_seconds)
# Remove drums from primer if present.
if any(note.is_drum for note in primer_ns.notes):
print('Primer contains drums; they will be removed.')
notes = [note for note in primer_ns.notes if not note.is_drum]
del primer_ns.notes[:]
primer_ns.notes.extend(notes)
# Set primer instrument and program.
for note in primer_ns.notes:
note.instrument = 1
note.program = 0
# Play and plot the primer.
mm.play_sequence(
primer_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(primer_ns)
#@title Generate Continuation
#@markdown Continue a piano performance, starting with the
#@markdown chosen priming sequence.
targets = unconditional_encoders['targets'].encode_note_sequence(
primer_ns)
# Remove the end token from the encoded primer.
targets = targets[:-1]
decode_length = max(0, 4096 - len(targets))
if len(targets) >= 4096:
print('Primer has more events than maximum sequence length; nothing will be generated.')
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
ns = mm.midi_file_to_note_sequence(midi_filename)
# Append continuation to primer.
continuation_ns = mm.concatenate_sequences([primer_ns, ns])
# Play and plot.
mm.play_sequence(
continuation_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(continuation_ns)
#@title Download Continuation as MIDI
#@markdown Download performance (primer + generated continuation)
#@markdown as MIDI (optional).
mm.sequence_proto_to_midi_file(
continuation_ns, '/tmp/continuation.mid')
files.download('/tmp/continuation.mid')
```
# Melody-Conditioned Piano Performance Model
```
#@title Setup and Load Checkpoint
#@markdown Set up generation from a melody-conditioned
#@markdown Transformer model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/melody_conditioned_model_16.ckpt'
class MelodyToPianoPerformanceProblem(score2perf.AbsoluteMelody2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = MelodyToPianoPerformanceProblem()
melody_conditioned_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# These values will be changed by the following cell.
inputs = []
decode_length = 0
# Create input generator.
def input_generator():
global inputs
while True:
yield {
'inputs': np.array([[inputs]], dtype=np.int32),
'targets': np.zeros([1, 0], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
melody_conditioned_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(melody_conditioned_samples)
#@title Choose Melody
#@markdown Here you can choose a melody to be accompanied by the
#@markdown model. We have provided a few, or you can upload a
#@markdown MIDI file; if your MIDI file is polyphonic, the notes
#@markdown with highest pitch will be used as the melody.
# Tokens to insert between melody events.
event_padding = 2 * [mm.MELODY_NO_EVENT]
melodies = {
'Mary Had a Little Lamb': [
64, 62, 60, 62, 64, 64, 64, mm.MELODY_NO_EVENT,
62, 62, 62, mm.MELODY_NO_EVENT,
64, 67, 67, mm.MELODY_NO_EVENT,
64, 62, 60, 62, 64, 64, 64, 64,
62, 62, 64, 62, 60, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Row Row Row Your Boat': [
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
64, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, 65,
67, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
72, 72, 72, 67, 67, 67, 64, 64, 64, 60, 60, 60,
67, mm.MELODY_NO_EVENT, 65,
64, mm.MELODY_NO_EVENT, 62,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Twinkle Twinkle Little Star': [
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT
]
}
melody = 'Twinkle Twinkle Little Star' #@param ['Mary Had a Little Lamb', 'Row Row Row Your Boat', 'Twinkle Twinkle Little Star', 'Upload your own!']
if melody == 'Upload your own!':
# Extract melody from user-uploaded MIDI file.
melody_ns = upload_midi()
melody_instrument = mm.infer_melody_for_sequence(melody_ns)
notes = [note for note in melody_ns.notes
if note.instrument == melody_instrument]
del melody_ns.notes[:]
melody_ns.notes.extend(
sorted(notes, key=lambda note: note.start_time))
for i in range(len(melody_ns.notes) - 1):
melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time
inputs = melody_conditioned_encoders['inputs'].encode_note_sequence(
melody_ns)
else:
# Use one of the provided melodies.
events = [event + 12 if event != mm.MELODY_NO_EVENT else event
for e in melodies[melody]
for event in [e] + event_padding]
inputs = melody_conditioned_encoders['inputs'].encode(
' '.join(str(e) for e in events))
melody_ns = mm.Melody(events).to_sequence(qpm=150)
# Play and plot the melody.
mm.play_sequence(
melody_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(melody_ns)
#@title Generate Accompaniment for Melody
#@markdown Generate a piano performance consisting of the chosen
#@markdown melody plus accompaniment.
# Generate sample events.
decode_length = 4096
sample_ids = next(melody_conditioned_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=melody_conditioned_encoders['targets'])
accompaniment_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
accompaniment_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(accompaniment_ns)
#@title Download Accompaniment as MIDI
#@markdown Download accompaniment performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
accompaniment_ns, '/tmp/accompaniment.mid')
files.download('/tmp/accompaniment.mid')
```
|
github_jupyter
|
#@title Setup Environment
#@markdown Copy model checkpoints and some auxiliary data from
#@markdown Google Cloud Storage. Also install and import
#@markdown Python dependencies needed for running the
#@markdown Transformer models.
#@markdown
#@markdown This cell may take a few minutes to run.
print('Copying checkpoints and Salamander piano SoundFont (via https://sites.google.com/site/soundfonts4u) from GCS...')
!gsutil -q -m cp -r gs://magentadata/models/music_transformer/* /content/
!gsutil -q -m cp gs://magentadata/soundfonts/Yamaha-C5-Salamander-JNv5.1.sf2 /content/
print('Installing dependencies...')
!apt-get update -qq && apt-get install -qq libfluidsynth1 build-essential libasound2-dev libjack-dev
!pip install -qU google-cloud magenta pyfluidsynth
import ctypes.util
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return ctypes.util.find_library(lib)
ctypes.util.find_library = proxy_find_library
print('Importing libraries...')
import numpy as np
import os
import tensorflow as tf
from google.colab import files
from tensor2tensor import models
from tensor2tensor import problems
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import decoding
from tensor2tensor.utils import trainer_lib
import magenta.music as mm
from magenta.models.score2perf import score2perf
print('Done!')
#@title Definitions
#@markdown Define a few constants and helper functions.
SF2_PATH = '/content/Yamaha-C5-Salamander-JNv5.1.sf2'
SAMPLE_RATE = 16000
# Upload a MIDI file and convert to NoteSequence.
def upload_midi():
data = list(files.upload().values())
if len(data) > 1:
print('Multiple files uploaded; using only one.')
return mm.midi_to_note_sequence(data[0])
# Decode a list of IDs.
def decode(ids, encoder):
ids = list(ids)
if text_encoder.EOS_ID in ids:
ids = ids[:ids.index(text_encoder.EOS_ID)]
return encoder.decode(ids)
#@title Setup and Load Checkpoint
#@markdown Set up generation from an unconditional Transformer
#@markdown model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/unconditional_model_16.ckpt'
class PianoPerformanceLanguageModelProblem(score2perf.Score2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = PianoPerformanceLanguageModelProblem()
unconditional_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# Create input generator (so we can adjust priming and
# decode length on the fly).
def input_generator():
global targets
global decode_length
while True:
yield {
'targets': np.array([targets], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# These values will be changed by subsequent cells.
targets = []
decode_length = 0
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
unconditional_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(unconditional_samples)
#@title Generate from Scratch
#@markdown Generate a piano performance from scratch.
#@markdown
#@markdown This can take a minute or so depending on the length
#@markdown of the performance the model ends up generating.
#@markdown Because we use a
#@markdown [representation](http://g.co/magenta/performance-rnn)
#@markdown where each event corresponds to a variable amount of
#@markdown time, the actual number of seconds generated may vary.
targets = []
decode_length = 1024
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
unconditional_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
unconditional_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(unconditional_ns)
#@title Download Performance as MIDI
#@markdown Download generated performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
unconditional_ns, '/tmp/unconditional.mid')
files.download('/tmp/unconditional.mid')
#@title Choose Priming Sequence
#@markdown Here you can choose a priming sequence to be continued
#@markdown by the model. We have provided a few, or you can
#@markdown upload your own MIDI file.
#@markdown
#@markdown Set `max_primer_seconds` below to trim the primer to a
#@markdown fixed number of seconds (this will have no effect if
#@markdown the primer is already shorter than `max_primer_seconds`).
filenames = {
'C major arpeggio': '/content/primers/c_major_arpeggio.mid',
'C major scale': '/content/primers/c_major_scale.mid',
'Clair de Lune': '/content/primers/clair_de_lune.mid',
}
primer = 'Upload your own!' #@param ['C major arpeggio', 'C major scale', 'Clair de Lune', 'Upload your own!']
if primer == 'Upload your own!':
primer_ns = upload_midi()
else:
# Use one of the provided primers.
primer_ns = mm.midi_file_to_note_sequence(filenames[primer])
# Handle sustain pedal in the primer.
primer_ns = mm.apply_sustain_control_changes(primer_ns)
# Trim to desired number of seconds.
max_primer_seconds = 20 #@param {type:"slider", min:1, max:120}
if primer_ns.total_time > max_primer_seconds:
print('Primer is longer than %d seconds, truncating.' % max_primer_seconds)
primer_ns = mm.extract_subsequence(
primer_ns, 0, max_primer_seconds)
# Remove drums from primer if present.
if any(note.is_drum for note in primer_ns.notes):
print('Primer contains drums; they will be removed.')
notes = [note for note in primer_ns.notes if not note.is_drum]
del primer_ns.notes[:]
primer_ns.notes.extend(notes)
# Set primer instrument and program.
for note in primer_ns.notes:
note.instrument = 1
note.program = 0
# Play and plot the primer.
mm.play_sequence(
primer_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(primer_ns)
#@title Generate Continuation
#@markdown Continue a piano performance, starting with the
#@markdown chosen priming sequence.
targets = unconditional_encoders['targets'].encode_note_sequence(
primer_ns)
# Remove the end token from the encoded primer.
targets = targets[:-1]
decode_length = max(0, 4096 - len(targets))
if len(targets) >= 4096:
print('Primer has more events than maximum sequence length; nothing will be generated.')
# Generate sample events.
sample_ids = next(unconditional_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=unconditional_encoders['targets'])
ns = mm.midi_file_to_note_sequence(midi_filename)
# Append continuation to primer.
continuation_ns = mm.concatenate_sequences([primer_ns, ns])
# Play and plot.
mm.play_sequence(
continuation_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(continuation_ns)
#@title Download Continuation as MIDI
#@markdown Download performance (primer + generated continuation)
#@markdown as MIDI (optional).
mm.sequence_proto_to_midi_file(
continuation_ns, '/tmp/continuation.mid')
files.download('/tmp/continuation.mid')
#@title Setup and Load Checkpoint
#@markdown Set up generation from a melody-conditioned
#@markdown Transformer model.
model_name = 'transformer'
hparams_set = 'transformer_tpu'
ckpt_path = '/content/checkpoints/melody_conditioned_model_16.ckpt'
class MelodyToPianoPerformanceProblem(score2perf.AbsoluteMelody2PerfProblem):
@property
def add_eos_symbol(self):
return True
problem = MelodyToPianoPerformanceProblem()
melody_conditioned_encoders = problem.get_feature_encoders()
# Set up HParams.
hparams = trainer_lib.create_hparams(hparams_set=hparams_set)
trainer_lib.add_problem_hparams(hparams, problem)
hparams.num_hidden_layers = 16
hparams.sampling_method = 'random'
# Set up decoding HParams.
decode_hparams = decoding.decode_hparams()
decode_hparams.alpha = 0.0
decode_hparams.beam_size = 1
# Create Estimator.
run_config = trainer_lib.create_run_config(hparams)
estimator = trainer_lib.create_estimator(
model_name, hparams, run_config,
decode_hparams=decode_hparams)
# These values will be changed by the following cell.
inputs = []
decode_length = 0
# Create input generator.
def input_generator():
global inputs
while True:
yield {
'inputs': np.array([[inputs]], dtype=np.int32),
'targets': np.zeros([1, 0], dtype=np.int32),
'decode_length': np.array(decode_length, dtype=np.int32)
}
# Start the Estimator, loading from the specified checkpoint.
input_fn = decoding.make_input_fn_from_generator(input_generator())
melody_conditioned_samples = estimator.predict(
input_fn, checkpoint_path=ckpt_path)
# "Burn" one.
_ = next(melody_conditioned_samples)
#@title Choose Melody
#@markdown Here you can choose a melody to be accompanied by the
#@markdown model. We have provided a few, or you can upload a
#@markdown MIDI file; if your MIDI file is polyphonic, the notes
#@markdown with highest pitch will be used as the melody.
# Tokens to insert between melody events.
event_padding = 2 * [mm.MELODY_NO_EVENT]
melodies = {
'Mary Had a Little Lamb': [
64, 62, 60, 62, 64, 64, 64, mm.MELODY_NO_EVENT,
62, 62, 62, mm.MELODY_NO_EVENT,
64, 67, 67, mm.MELODY_NO_EVENT,
64, 62, 60, 62, 64, 64, 64, 64,
62, 62, 64, 62, 60, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Row Row Row Your Boat': [
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
60, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
64, mm.MELODY_NO_EVENT, 62,
64, mm.MELODY_NO_EVENT, 65,
67, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
72, 72, 72, 67, 67, 67, 64, 64, 64, 60, 60, 60,
67, mm.MELODY_NO_EVENT, 65,
64, mm.MELODY_NO_EVENT, 62,
60, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT,
mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT, mm.MELODY_NO_EVENT
],
'Twinkle Twinkle Little Star': [
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
67, 67, 65, 65, 64, 64, 62, mm.MELODY_NO_EVENT,
60, 60, 67, 67, 69, 69, 67, mm.MELODY_NO_EVENT,
65, 65, 64, 64, 62, 62, 60, mm.MELODY_NO_EVENT
]
}
melody = 'Twinkle Twinkle Little Star' #@param ['Mary Had a Little Lamb', 'Row Row Row Your Boat', 'Twinkle Twinkle Little Star', 'Upload your own!']
if melody == 'Upload your own!':
# Extract melody from user-uploaded MIDI file.
melody_ns = upload_midi()
melody_instrument = mm.infer_melody_for_sequence(melody_ns)
notes = [note for note in melody_ns.notes
if note.instrument == melody_instrument]
del melody_ns.notes[:]
melody_ns.notes.extend(
sorted(notes, key=lambda note: note.start_time))
for i in range(len(melody_ns.notes) - 1):
melody_ns.notes[i].end_time = melody_ns.notes[i + 1].start_time
inputs = melody_conditioned_encoders['inputs'].encode_note_sequence(
melody_ns)
else:
# Use one of the provided melodies.
events = [event + 12 if event != mm.MELODY_NO_EVENT else event
for e in melodies[melody]
for event in [e] + event_padding]
inputs = melody_conditioned_encoders['inputs'].encode(
' '.join(str(e) for e in events))
melody_ns = mm.Melody(events).to_sequence(qpm=150)
# Play and plot the melody.
mm.play_sequence(
melody_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(melody_ns)
#@title Generate Accompaniment for Melody
#@markdown Generate a piano performance consisting of the chosen
#@markdown melody plus accompaniment.
# Generate sample events.
decode_length = 4096
sample_ids = next(melody_conditioned_samples)['outputs']
# Decode to NoteSequence.
midi_filename = decode(
sample_ids,
encoder=melody_conditioned_encoders['targets'])
accompaniment_ns = mm.midi_file_to_note_sequence(midi_filename)
# Play and plot.
mm.play_sequence(
accompaniment_ns,
synth=mm.fluidsynth, sample_rate=SAMPLE_RATE, sf2_path=SF2_PATH)
mm.plot_sequence(accompaniment_ns)
#@title Download Accompaniment as MIDI
#@markdown Download accompaniment performance as MIDI (optional).
mm.sequence_proto_to_midi_file(
accompaniment_ns, '/tmp/accompaniment.mid')
files.download('/tmp/accompaniment.mid')
| 0.737442 | 0.877004 |
```
!pip install folium
%matplotlib inline
%%html
<link rel="import" href="urth_components/paper-dropdown-menu/paper-dropdown-menu.html" is='urth-core-import' package='PolymerElements/paper-dropdown-menu'>
<link rel="import" href="urth_components/paper-menu/paper-menu.html" is='urth-core-import' package='PolymerElements/paper-menu'>
<link rel="import" href="urth_components/paper-item/paper-item.html" is='urth-core-import' package='PolymerElements/paper-item'>
<link rel="import" href="urth_components/paper-button/paper-button.html" is='urth-core-import' package='PolymerElements/paper-button'>
<link rel="import" href="urth_components/paper-card/paper-card.html" is='urth-core-import' package='PolymerElements/paper-card'>
<link rel="import" href="urth_components/paper-slider/paper-slider.html" is='urth-core-import' package='PolymerElements/paper-slider'>
<link rel="import" href="urth_components/google-map/google-map.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/google-map/google-map-marker.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/urth-viz-table/urth-viz-table.html" is='urth-core-import'>
<link rel="import" href="urth_components/urth-viz-chart/urth-viz-chart.html" is='urth-core-import'>
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.5/leaflet.css" />
<link rel="import" href="./urth-raw-html.html">
import os
import struct
import glob
import pandas as pd
import datetime as dt
# Use this global variable to specify the path for station summary files.
NOAA_STATION_SUMMARY_PATH = "/home/jovyan/work/noaa/data/"
# Use this global variable to specify the path for the GHCND Station Directory
STATION_DETAIL_FILE = '/home/jovyan/work/noaa/data/ghcnd-stations.txt'
# Station detail structures for building station lists
station_detail_colnames = ['StationID','State','Name',
'Latitude','Longitude','QueryTag']
station_detail_rec_template = {'StationID': "",
'State': "",
'Name': "",
'Latitude': "",
'Longitude': "",
'QueryTag': ""
}
# -----------------------------------
# Station Detail Processing
# -----------------------------------
def get_filename(pathname):
'''Fetch filename portion of pathname.'''
plist = pathname.split('/')
fname, fext = os.path.splitext(plist[len(plist)-1])
return fname
def fetch_station_list():
'''Return list of available stations given collection of summary files on disk.'''
station_list = []
raw_files = os.path.join(NOAA_STATION_SUMMARY_PATH,'','*_sum.csv')
for index, fname in enumerate(glob.glob(raw_files)):
f = get_filename(fname).split('_')[0]
station_list.append(str(f))
return station_list
USA_STATION_LIST = fetch_station_list()
def gather_states(fname,stations):
'''Return a list of unique State abbreviations. Weather station data exists for these states.'''
state_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_gather_station_detail(line,stations)
state_list += r
df_unique_states = pd.DataFrame(state_list,columns=station_detail_colnames).sort('State').State.unique()
return df_unique_states.tolist()
def noaa_gather_station_detail(line,slist):
'''Build a list of station tuples for stations in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if fields[0].decode().strip() in slist:
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "{0} at {1} in {2}".format(fields[0].decode().strip(),fields[5].decode().strip(),fields[4].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
USA_STATES_WITH_STATIONS = gather_states(STATION_DETAIL_FILE,USA_STATION_LIST)
def process_station_detail_for_state(fname,stations,statecode):
'''Return dataframe of station detail for specified state.'''
station_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_build_station_detail_for_state(line,stations,statecode)
station_list += r
return pd.DataFrame(station_list,columns=station_detail_colnames)
def noaa_build_station_detail_for_state(line,slist,statecode):
'''Build a list of station tuples for the specified state in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if ((fields[0].decode().strip() in slist) and (fields[4].decode().strip() == statecode)):
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "Station {0} in {1} at {2}".format(fields[0].decode().strip(),fields[4].decode().strip(),fields[5].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
df = process_station_detail_for_state(STATION_DETAIL_FILE,USA_STATION_LIST,"NY")
df.tail()
import numpy as np
import folium
from IPython.display import HTML
def display_map(m, height=500):
"""Takes a folium instance and embed HTML."""
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = '<iframe srcdoc="{0}" style="width: 100%; height: {1}px; border: none"></iframe>'.format(srcdoc, height)
return embed
def render_map(df,height=500):
centerpoint_latitude = np.mean(df.Latitude.astype(float))
centerpoint_longitude = np.mean(df.Longitude.astype(float))
map_obj = folium.Map(location=[centerpoint_latitude, centerpoint_longitude],zoom_start=6)
for index, row in df.iterrows():
map_obj.simple_marker([row.Latitude, row.Longitude], popup=row.QueryTag)
return display_map(map_obj)
map_doc = render_map(df)
from urth.widgets.widget_channels import channel
channel("noaaquery").set("theMap", map_doc)
%%html
<template id="narrationContent" is="urth-core-bind" channel="noaaquery">
<div id="map">
<urth-raw-html html="{{theMap}}"/>
</div>
</template>
map_doc
```
|
github_jupyter
|
!pip install folium
%matplotlib inline
%%html
<link rel="import" href="urth_components/paper-dropdown-menu/paper-dropdown-menu.html" is='urth-core-import' package='PolymerElements/paper-dropdown-menu'>
<link rel="import" href="urth_components/paper-menu/paper-menu.html" is='urth-core-import' package='PolymerElements/paper-menu'>
<link rel="import" href="urth_components/paper-item/paper-item.html" is='urth-core-import' package='PolymerElements/paper-item'>
<link rel="import" href="urth_components/paper-button/paper-button.html" is='urth-core-import' package='PolymerElements/paper-button'>
<link rel="import" href="urth_components/paper-card/paper-card.html" is='urth-core-import' package='PolymerElements/paper-card'>
<link rel="import" href="urth_components/paper-slider/paper-slider.html" is='urth-core-import' package='PolymerElements/paper-slider'>
<link rel="import" href="urth_components/google-map/google-map.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/google-map/google-map-marker.html" is='urth-core-import' package='GoogleWebComponents/google-map'>
<link rel="import" href="urth_components/urth-viz-table/urth-viz-table.html" is='urth-core-import'>
<link rel="import" href="urth_components/urth-viz-chart/urth-viz-chart.html" is='urth-core-import'>
<link rel="stylesheet" href="http://cdn.leafletjs.com/leaflet-0.5/leaflet.css" />
<link rel="import" href="./urth-raw-html.html">
import os
import struct
import glob
import pandas as pd
import datetime as dt
# Use this global variable to specify the path for station summary files.
NOAA_STATION_SUMMARY_PATH = "/home/jovyan/work/noaa/data/"
# Use this global variable to specify the path for the GHCND Station Directory
STATION_DETAIL_FILE = '/home/jovyan/work/noaa/data/ghcnd-stations.txt'
# Station detail structures for building station lists
station_detail_colnames = ['StationID','State','Name',
'Latitude','Longitude','QueryTag']
station_detail_rec_template = {'StationID': "",
'State': "",
'Name': "",
'Latitude': "",
'Longitude': "",
'QueryTag': ""
}
# -----------------------------------
# Station Detail Processing
# -----------------------------------
def get_filename(pathname):
'''Fetch filename portion of pathname.'''
plist = pathname.split('/')
fname, fext = os.path.splitext(plist[len(plist)-1])
return fname
def fetch_station_list():
'''Return list of available stations given collection of summary files on disk.'''
station_list = []
raw_files = os.path.join(NOAA_STATION_SUMMARY_PATH,'','*_sum.csv')
for index, fname in enumerate(glob.glob(raw_files)):
f = get_filename(fname).split('_')[0]
station_list.append(str(f))
return station_list
USA_STATION_LIST = fetch_station_list()
def gather_states(fname,stations):
'''Return a list of unique State abbreviations. Weather station data exists for these states.'''
state_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_gather_station_detail(line,stations)
state_list += r
df_unique_states = pd.DataFrame(state_list,columns=station_detail_colnames).sort('State').State.unique()
return df_unique_states.tolist()
def noaa_gather_station_detail(line,slist):
'''Build a list of station tuples for stations in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if fields[0].decode().strip() in slist:
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "{0} at {1} in {2}".format(fields[0].decode().strip(),fields[5].decode().strip(),fields[4].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
USA_STATES_WITH_STATIONS = gather_states(STATION_DETAIL_FILE,USA_STATION_LIST)
def process_station_detail_for_state(fname,stations,statecode):
'''Return dataframe of station detail for specified state.'''
station_list = []
with open(fname, 'r', encoding='utf-8') as f:
lines = f.readlines()
f.close()
for line in lines:
r = noaa_build_station_detail_for_state(line,stations,statecode)
station_list += r
return pd.DataFrame(station_list,columns=station_detail_colnames)
def noaa_build_station_detail_for_state(line,slist,statecode):
'''Build a list of station tuples for the specified state in the USA.'''
station_tuple_list = []
station_id_key = line[0:3]
if station_id_key == 'USC' or station_id_key == 'USW':
fields = struct.unpack('12s9s10s7s2s30s', line[0:70].encode())
if ((fields[0].decode().strip() in slist) and (fields[4].decode().strip() == statecode)):
station_tuple = dict(station_detail_rec_template)
station_tuple['StationID'] = fields[0].decode().strip()
station_tuple['State'] = fields[4].decode().strip()
station_tuple['Name'] = fields[5].decode().strip()
station_tuple['Latitude'] = fields[1].decode().strip()
station_tuple['Longitude'] = fields[2].decode().strip()
qt = "Station {0} in {1} at {2}".format(fields[0].decode().strip(),fields[4].decode().strip(),fields[5].decode().strip())
station_tuple['QueryTag'] = qt
station_tuple_list.append(station_tuple)
return station_tuple_list
df = process_station_detail_for_state(STATION_DETAIL_FILE,USA_STATION_LIST,"NY")
df.tail()
import numpy as np
import folium
from IPython.display import HTML
def display_map(m, height=500):
"""Takes a folium instance and embed HTML."""
m._build_map()
srcdoc = m.HTML.replace('"', '"')
embed = '<iframe srcdoc="{0}" style="width: 100%; height: {1}px; border: none"></iframe>'.format(srcdoc, height)
return embed
def render_map(df,height=500):
centerpoint_latitude = np.mean(df.Latitude.astype(float))
centerpoint_longitude = np.mean(df.Longitude.astype(float))
map_obj = folium.Map(location=[centerpoint_latitude, centerpoint_longitude],zoom_start=6)
for index, row in df.iterrows():
map_obj.simple_marker([row.Latitude, row.Longitude], popup=row.QueryTag)
return display_map(map_obj)
map_doc = render_map(df)
from urth.widgets.widget_channels import channel
channel("noaaquery").set("theMap", map_doc)
%%html
<template id="narrationContent" is="urth-core-bind" channel="noaaquery">
<div id="map">
<urth-raw-html html="{{theMap}}"/>
</div>
</template>
map_doc
| 0.354545 | 0.09739 |
# Azure Kubernetes Service (AKS) Deep MNIST
In this example we will deploy a tensorflow MNIST model in the Azure Kubernetes Service (AKS).
This tutorial will break down in the following sections:
1) Train a tensorflow model to predict mnist locally
2) Containerise the tensorflow model with our docker utility
3) Send some data to the docker model to test it
4) Install and configure Azure tools to interact with your cluster
5) Use the Azure tools to create and setup AKS cluster with Seldon
6) Push and run docker image through the Azure Container Registry
7) Test our Elastic Kubernetes deployment by sending some data
#### Let's get started! 🚀🔥
## Dependencies:
* Helm v3.0.0+
* A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM)
* kubectl v1.14+
* az CLI v2.0.66+
* Python 3.6+
* Python DEV requirements
## 1) Train a tensorflow model to predict mnist locally
We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
if __name__ == '__main__':
x = tf.placeholder(tf.float32, [None,784], name="x")
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
saver = tf.train.Saver()
saver.save(sess, "model/deep_mnist_model")
```
## 2) Containerise the tensorflow model with our docker utility
First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content:
```
!cat .s2i/environment
```
Now we can build a docker image named "deep-mnist" with the tag 0.1
```
!s2i build . seldonio/seldon-core-s2i-python36:1.2.3-dev deep-mnist:0.1
```
## 3) Send some data to the docker model to test it
We first run the docker image we just created as a container called "mnist_predictor"
```
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1
```
Send some random features that conform to the contract
```
import matplotlib.pyplot as plt
import numpy as np
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
# We now test the REST endpoint expecting the same result
endpoint = "0.0.0.0:5000"
batch = x
payload_type = "ndarray"
sc = SeldonClient(microservice_endpoint=endpoint)
# We use the microservice, instead of the "predict" function
client_prediction = sc.microservice(
data=batch,
method="predict",
payload_type=payload_type,
names=["tfidf"])
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
!docker rm mnist_predictor --force
```
## 4) Install and configure Azure tools
First we install the azure cli - follow specific instructions at https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest
```
!curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
```
#### Configure the azure CLI so it can talk to your server
(if you are getting issues, make sure you have the permmissions to create clusters)
You must run this through a terminal and follow the instructions:
```
az login
```
Once you are logged in, we can create our cluster. Run the following command, it may take a while so feel free to get a ☕.
```
%%bash
# We'll create a resource group
az group create --name SeldonResourceGroup --location westus
# Now we create the cluster
az aks create \
--resource-group SeldonResourceGroup \
--name SeldonCluster \
--node-count 1 \
--enable-addons monitoring \
--generate-ssh-keys
--kubernetes-version 1.13.5
```
Once it's created we can authenticate our local `kubectl` to make sure we can talk to the azure cluster:
```
!az aks get-credentials --resource-group SeldonResourceGroup --name SeldonCluster
```
And now we can check that this has been successful by making sure that our `kubectl` context is pointing to the cluster:
```
!kubectl config get-contexts
```
## Setup Seldon Core
Use the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html).
## Push docker image
In order for the EKS seldon deployment to access the image we just built, we need to push it to the Azure Container Registry (ACR) - you can check if it's been successfully created in the dashboard https://portal.azure.com/#blade/HubsExtension/BrowseResourceBlade/resourceType/Microsoft.ContainerRegistry%2Fregistries
If you have any issues please follow the official Azure documentation: https://docs.microsoft.com/en-us/azure/container-registry/container-registry-get-started-azure-cli
### First we create a registry
Make sure you keep the `loginServer` value in the output dictionary as we'll use it below.
```
!az acr create --resource-group SeldonResourceGroup --name SeldonContainerRegistry --sku Basic
```
### Make sure your local docker instance has access to the registry
```
!az acr login --name SeldonContainerRegistry
```
### Now prepare docker image
We need to first tag the docker image before we can push it.
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker tag deep-mnist:0.1 seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
### And push the image
NOTE: if you named your registry different make sure you change the value of `seldoncontainerregistry.azurecr.io`
```
!docker push seldoncontainerregistry.azurecr.io/deep-mnist:0.1
```
## Running the Model
We will now run the model. As you can see we have a placeholder `"REPLACE_FOR_IMAGE_AND_TAG"`, which we'll replace to point to our registry.
Let's first have a look at the file we'll be using to trigger the model:
```
!cat deep_mnist.json
```
Now let's trigger seldon to run the model.
### Run the deployment in your cluster
NOTE: In order for this to work you need to make sure that your cluster has the permissions to pull the images. You can do this by:
1) Go into the Azure Container Registry
2) Select the SeldonContainerRegistry you created
3) Click on "Add a role assignment"
4) Select the AcrPull role
5) Select service principle
6) Find the SeldonCluster
7) Wait until the role has been added
We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed
```
%%bash
# Change accordingly if your registry is called differently
sed 's|REPLACE_FOR_IMAGE_AND_TAG|seldoncontainerregistry.azurecr.io/deep-mnist:0.1|g' deep_mnist.json | kubectl apply -f -
```
And let's check that it's been created.
You should see an image called "deep-mnist-single-model...".
We'll wait until STATUS changes from "ContainerCreating" to "Running"
```
!kubectl get pods
```
## Test the model
Now we can test the model, let's first find out what is the URL that we'll have to use:
```
!kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
```
We'll use a random example from our dataset
```
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
```
We can now add the URL above to send our request:
```
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
host = "52.160.64.65"
port = "80" # Make sure you use the port above
batch = x
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador",
ambassador_endpoint=host + ":" + port,
namespace="default",
oauth_key="oauth-key",
oauth_secret="oauth-secret")
client_prediction = sc.predict(
data=batch,
deployment_name="deep-mnist",
names=["text"],
payload_type=payload_type)
print(client_prediction)
```
### Let's visualise the probability for each label
It seems that it correctly predicted the number 7
```
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
```
|
github_jupyter
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
import tensorflow as tf
if __name__ == '__main__':
x = tf.placeholder(tf.float32, [None,784], name="x")
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x,W) + b, name="y")
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels}))
saver = tf.train.Saver()
saver.save(sess, "model/deep_mnist_model")
!cat .s2i/environment
!s2i build . seldonio/seldon-core-s2i-python36:1.2.3-dev deep-mnist:0.1
!docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1
import matplotlib.pyplot as plt
import numpy as np
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
# We now test the REST endpoint expecting the same result
endpoint = "0.0.0.0:5000"
batch = x
payload_type = "ndarray"
sc = SeldonClient(microservice_endpoint=endpoint)
# We use the microservice, instead of the "predict" function
client_prediction = sc.microservice(
data=batch,
method="predict",
payload_type=payload_type,
names=["tfidf"])
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
!docker rm mnist_predictor --force
!curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az login
%%bash
# We'll create a resource group
az group create --name SeldonResourceGroup --location westus
# Now we create the cluster
az aks create \
--resource-group SeldonResourceGroup \
--name SeldonCluster \
--node-count 1 \
--enable-addons monitoring \
--generate-ssh-keys
--kubernetes-version 1.13.5
!az aks get-credentials --resource-group SeldonResourceGroup --name SeldonCluster
!kubectl config get-contexts
!az acr create --resource-group SeldonResourceGroup --name SeldonContainerRegistry --sku Basic
!az acr login --name SeldonContainerRegistry
!docker tag deep-mnist:0.1 seldoncontainerregistry.azurecr.io/deep-mnist:0.1
!docker push seldoncontainerregistry.azurecr.io/deep-mnist:0.1
!cat deep_mnist.json
%%bash
# Change accordingly if your registry is called differently
sed 's|REPLACE_FOR_IMAGE_AND_TAG|seldoncontainerregistry.azurecr.io/deep-mnist:0.1|g' deep_mnist.json | kubectl apply -f -
!kubectl get pods
!kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].ip}'
import matplotlib.pyplot as plt
# This is the variable that was initialised at the beginning of the file
i = [0]
x = mnist.test.images[i]
y = mnist.test.labels[i]
plt.imshow(x.reshape((28, 28)), cmap='gray')
plt.show()
print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y)
from seldon_core.seldon_client import SeldonClient
import math
import numpy as np
host = "52.160.64.65"
port = "80" # Make sure you use the port above
batch = x
payload_type = "ndarray"
sc = SeldonClient(
gateway="ambassador",
ambassador_endpoint=host + ":" + port,
namespace="default",
oauth_key="oauth-key",
oauth_secret="oauth-secret")
client_prediction = sc.predict(
data=batch,
deployment_name="deep-mnist",
names=["text"],
payload_type=payload_type)
print(client_prediction)
for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)):
print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %")
| 0.642657 | 0.948489 |
# UTS Deep Learning
**Raymoond/2301872381**
# Import Library and Dataset
disini saya import semua library dan dataset yang nantinya akan dipakai di dalam code
```
pip install keras-tuner --upgrade
import numpy as np
import pandas as pd
import keras_tuner as kt
import keras
import tensorflow as tf
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from kerastuner.tuners import RandomSearch
```
Pertama kita import semua fuction dan library yang dibutuhkan
```
#Import Data
train_data = pd.read_csv("train.csv")
x_data = train_data[['age','fnlwgt','educational-num','capital-gain','capital-loss','hours-per-week']]
y_data = train_data[['income_>50K']]
train_data.describe()
```
Disini kita ambil datanya dan memasukan ke train_data, disini saya hanya mengambil data yang valuen ya berupa integer,
# Preprocess& split data
disini saya melakukan preproceesing terhadap data menggukan scaler dan categorical dan nantinya akan melakukan split data dengan size 80 % dan 20 %
```
#Preprocessing
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
from tensorflow.keras.utils import to_categorical
y_data = to_categorical(y_data)
```
Lalu data di preprocess dimana x data digunakan scaler dan y data digunakan encoder categorical
```
#split data
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.2, random_state=1, stratify=y_data)
```
Lalu data di split ke 80% dan 20 % untuk dimasukan ke dalam model
# Keras Mode
Disini saya membuat keras model untuk mencari accuracynya
```
#Keras Model 1
model = Sequential()
model.add(Dense(32, input_dim=6, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, batch_size=10)
```
Data yang sudah di preproces tadi kita masukin ke dalam model keras dengan binary crossentroy dan mengunakan adam optimizer, hal ini digunakan un tuk bisa mencari accuracy dari ini
# Tuning
DIsini saya melakukan tuning untuk mengubah layer sesuai yang soal inginkan yaitu n,2,1
```
def build_model(hp):
model = keras.Sequential()
for i in range(hp.Int('num_layers',2,5)):
model.add(layers.Dense(units=hp.Int('units_',
min_value = 32,
max_value = 512,
step = 64),
activation = 'relu'))
model.add(layers.Dense(2, activation ='sigmoid'))
model.compile(
optimizer = tf.keras.optimizers.Adam(
hp.Choice('learning_rate',
values = [1e-2, 1e-3, 1e-4])),
loss = 'binary_crossentropy',
metrics=['accuracy'])
return model
tuner = kt.RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='project',
project_name='classification')
tuner.search(x_train,y_train,
epochs=5,
validation_data=(x_test,y_test))
```
Disini kita melakukan tuning seperti yang soal inginkan yaitu agar layernya mejadi (n,2,1) yang nantinya akan dibandingkan dengan model di atas
# Keras model2
disni saya melakukan modeling lagi namun dengan data yang sudah di tunin g
```
#Keras Model 2
model = Sequential()
model.add(Dense(224, input_dim=6, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
# compile the keras model
model.compile(loss='binary_crossentropy',
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
metrics=['accuracy'])
# fit the keras model on the dataset
model.fit(x_train,y_train, epochs=5, validation_data=(x_test,y_test), batch_size=10)
```
Nah disini kita melakukan modeling lagi setelah di tuning, untuk mengecek perbedaan dari ke 2 model tersebut
# Classification report & Conffusion matrix
disini saya juga melakkan predict dan mecari classification report dan coffusion matrix terhadap data yang tadi sudah digunakan
```
Predict = model.predict(x_test)
Predict = (Predict > 0.5).astype(int)
# Confusion Matrix
Conf = confusion_matrix(
y_test.argmax(axis=1), Predict.argmax(axis=1))
sns.heatmap(Conf/np.sum(Conf), annot=True,
fmt='.2%', cmap='Oranges')
print(classification_report(y_test, Predict, labels=[0,1]))
accuracy_score(y_test, Predict)
```
Setelah melakukan semua modeling tadi , kita tinggal menampilkanClassification Report(percision,recall,f1 score dan accuracy score) dan confusion matrix dari data yang di atas
Dapat dilihat Dari Confusion matrix diatas, model ini mempunyai value false positive sebesar 4.64% dan False negative sebesar 12.88%.
Dan Accuracy Socrenye sebesar 82.43 %
|
github_jupyter
|
pip install keras-tuner --upgrade
import numpy as np
import pandas as pd
import keras_tuner as kt
import keras
import tensorflow as tf
import seaborn as sns
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from kerastuner.tuners import RandomSearch
#Import Data
train_data = pd.read_csv("train.csv")
x_data = train_data[['age','fnlwgt','educational-num','capital-gain','capital-loss','hours-per-week']]
y_data = train_data[['income_>50K']]
train_data.describe()
#Preprocessing
scaler = StandardScaler()
x_data = scaler.fit_transform(x_data)
from tensorflow.keras.utils import to_categorical
y_data = to_categorical(y_data)
#split data
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.2, random_state=1, stratify=y_data)
#Keras Model 1
model = Sequential()
model.add(Dense(32, input_dim=6, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, batch_size=10)
def build_model(hp):
model = keras.Sequential()
for i in range(hp.Int('num_layers',2,5)):
model.add(layers.Dense(units=hp.Int('units_',
min_value = 32,
max_value = 512,
step = 64),
activation = 'relu'))
model.add(layers.Dense(2, activation ='sigmoid'))
model.compile(
optimizer = tf.keras.optimizers.Adam(
hp.Choice('learning_rate',
values = [1e-2, 1e-3, 1e-4])),
loss = 'binary_crossentropy',
metrics=['accuracy'])
return model
tuner = kt.RandomSearch(
build_model,
objective='val_accuracy',
max_trials=5,
executions_per_trial=3,
directory='project',
project_name='classification')
tuner.search(x_train,y_train,
epochs=5,
validation_data=(x_test,y_test))
#Keras Model 2
model = Sequential()
model.add(Dense(224, input_dim=6, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid'))
# compile the keras model
model.compile(loss='binary_crossentropy',
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
metrics=['accuracy'])
# fit the keras model on the dataset
model.fit(x_train,y_train, epochs=5, validation_data=(x_test,y_test), batch_size=10)
Predict = model.predict(x_test)
Predict = (Predict > 0.5).astype(int)
# Confusion Matrix
Conf = confusion_matrix(
y_test.argmax(axis=1), Predict.argmax(axis=1))
sns.heatmap(Conf/np.sum(Conf), annot=True,
fmt='.2%', cmap='Oranges')
print(classification_report(y_test, Predict, labels=[0,1]))
accuracy_score(y_test, Predict)
| 0.854672 | 0.893913 |
```
import pandas as pd
import numpy as np
f = '../data/text_cat.xlsx'
D = pd.read_excel(f)
D.head()
import spacy
nlp = spacy.load("it_core_news_sm")
def proc(text, model, pos_list=None):
try:
t = text.lower().replace("'", " ")
doc = nlp(t)
if pos_list is not None:
tokens = [x.lemma_ for x in doc if x.pos_ in pos_list]
else:
tokens = [x.lemma_ for x in doc]
except:
tokens = []
return tokens
from collections import defaultdict
I = defaultdict(lambda: defaultdict(lambda: 0))
for i, row in D.iloc[:1000].iterrows():
tokens = proc(row.body, model=nlp, pos_list=['NOUN', 'VERB'])
for token in tokens:
I[token][i] += 1
T = pd.DataFrame(I).fillna(0)
T.head()
docs, dictionary = list(T.index), list(T.columns)
T = (T.T / T.max(axis=1)).T
T.head()
idf = {}
for word in T.columns:
idf[word] = np.log(T.shape[0] / len(np.where(T[word] > 0)[0]))
tfidf = defaultdict(lambda: defaultdict(lambda: 0))
for i, row in T.iterrows():
for column, weight in row.items():
if weight > 0:
tfidf[i][column] = weight * idf[column]
else:
pass
TD = pd.DataFrame(tfidf).fillna(0).T
TD.head()
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
V = pca.fit_transform(TD)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(V[:,0], V[:,1], alpha=0.2)
plt.show()
from sklearn.metrics.pairwise import cosine_similarity
```
# SEARCH
```
sim = cosine_similarity(qiv.reshape(1, -1), TD)
q = ['renault', 'ricambio', 'veicolo']
qi = [idf[x] for x in q]
qiv = np.zeros(len(TD.columns))
for w in q:
iw = list(TD.columns).index(w)
qiv[iw] = idf[w]
D.loc[696].body
R = dict((i, score) for i, score in enumerate(sim[0]))
for doc, score in sorted(R.items(), key=lambda x: -x[1]):
print(doc, score)
TD.index[695]
```
# CLASSIFICATION
```
categories = [D.loc[x].category_id for x in TD.index]
V = pca.fit_transform(TD)
fig, ax = plt.subplots()
ax.scatter(V[:,0], V[:,1], alpha=0.2, c=categories)
plt.show()
from sklearn.naive_bayes import GaussianNB
k = GaussianNB()
prediction = k.fit(TD, categories).predict(TD)
import itertools
from mpl_toolkits.axes_grid1 import make_axes_locatable
def cm_plot(ax, classes, CM, title, figure):
im = ax.imshow(CM, interpolation='nearest', cmap=plt.cm.Blues)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
figure.colorbar(im, cax=cax, orientation='vertical')
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation=90, fontsize=12)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation=0, fontsize=12)
ax.set_title(title, fontsize=16)
thresh = CM.max() / 2.
for i, j in itertools.product(range(CM.shape[0]), range(CM.shape[1])):
ax.text(j, i, CM[i, j], horizontalalignment="center",
color="white" if CM[i, j] > thresh else "black", fontsize=12)
ax.set_ylabel('True label', fontsize=16)
ax.set_xlabel('Predicted label', fontsize=16)
import sklearn.metrics as mx
cm = mx.confusion_matrix(categories, prediction)
fig, ax = plt.subplots(figsize=(14,14))
cm_plot(ax, list(set(categories)), cm, 'Gaussian', fig)
plt.show()
D[D.category_id==23].head(2)
q = nlp('vendo gatti di razza con renault e ricambi')
q = [x.lemma_ for x in q]
qiv = np.zeros(len(TD.columns))
for w in q:
try:
iw = list(TD.columns).index(w)
qiv[iw] = idf[w]
except:
pass
qiv
k.predict(qiv.reshape(1, -1))
k.predict_proba(qiv.reshape(1, -1))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
f = '../data/text_cat.xlsx'
D = pd.read_excel(f)
D.head()
import spacy
nlp = spacy.load("it_core_news_sm")
def proc(text, model, pos_list=None):
try:
t = text.lower().replace("'", " ")
doc = nlp(t)
if pos_list is not None:
tokens = [x.lemma_ for x in doc if x.pos_ in pos_list]
else:
tokens = [x.lemma_ for x in doc]
except:
tokens = []
return tokens
from collections import defaultdict
I = defaultdict(lambda: defaultdict(lambda: 0))
for i, row in D.iloc[:1000].iterrows():
tokens = proc(row.body, model=nlp, pos_list=['NOUN', 'VERB'])
for token in tokens:
I[token][i] += 1
T = pd.DataFrame(I).fillna(0)
T.head()
docs, dictionary = list(T.index), list(T.columns)
T = (T.T / T.max(axis=1)).T
T.head()
idf = {}
for word in T.columns:
idf[word] = np.log(T.shape[0] / len(np.where(T[word] > 0)[0]))
tfidf = defaultdict(lambda: defaultdict(lambda: 0))
for i, row in T.iterrows():
for column, weight in row.items():
if weight > 0:
tfidf[i][column] = weight * idf[column]
else:
pass
TD = pd.DataFrame(tfidf).fillna(0).T
TD.head()
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
V = pca.fit_transform(TD)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.scatter(V[:,0], V[:,1], alpha=0.2)
plt.show()
from sklearn.metrics.pairwise import cosine_similarity
sim = cosine_similarity(qiv.reshape(1, -1), TD)
q = ['renault', 'ricambio', 'veicolo']
qi = [idf[x] for x in q]
qiv = np.zeros(len(TD.columns))
for w in q:
iw = list(TD.columns).index(w)
qiv[iw] = idf[w]
D.loc[696].body
R = dict((i, score) for i, score in enumerate(sim[0]))
for doc, score in sorted(R.items(), key=lambda x: -x[1]):
print(doc, score)
TD.index[695]
categories = [D.loc[x].category_id for x in TD.index]
V = pca.fit_transform(TD)
fig, ax = plt.subplots()
ax.scatter(V[:,0], V[:,1], alpha=0.2, c=categories)
plt.show()
from sklearn.naive_bayes import GaussianNB
k = GaussianNB()
prediction = k.fit(TD, categories).predict(TD)
import itertools
from mpl_toolkits.axes_grid1 import make_axes_locatable
def cm_plot(ax, classes, CM, title, figure):
im = ax.imshow(CM, interpolation='nearest', cmap=plt.cm.Blues)
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
figure.colorbar(im, cax=cax, orientation='vertical')
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation=90, fontsize=12)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation=0, fontsize=12)
ax.set_title(title, fontsize=16)
thresh = CM.max() / 2.
for i, j in itertools.product(range(CM.shape[0]), range(CM.shape[1])):
ax.text(j, i, CM[i, j], horizontalalignment="center",
color="white" if CM[i, j] > thresh else "black", fontsize=12)
ax.set_ylabel('True label', fontsize=16)
ax.set_xlabel('Predicted label', fontsize=16)
import sklearn.metrics as mx
cm = mx.confusion_matrix(categories, prediction)
fig, ax = plt.subplots(figsize=(14,14))
cm_plot(ax, list(set(categories)), cm, 'Gaussian', fig)
plt.show()
D[D.category_id==23].head(2)
q = nlp('vendo gatti di razza con renault e ricambi')
q = [x.lemma_ for x in q]
qiv = np.zeros(len(TD.columns))
for w in q:
try:
iw = list(TD.columns).index(w)
qiv[iw] = idf[w]
except:
pass
qiv
k.predict(qiv.reshape(1, -1))
k.predict_proba(qiv.reshape(1, -1))
| 0.339171 | 0.661452 |
# PolyFill: Example of D3 in Jupyter
This example shows the combined use of Python and D3 for a randomized 2D space-filling algorithm and visualization. It uses [D3's force-directed graph methods](http://bl.ocks.org/mbostock/4062045). For description of the example's motivations and space-filling algorithm see this [blog post](http://bl.ocks.org/mbostock/4062045).
### Libraries
```
from IPython.core.display import HTML
from string import Template
import pandas as pd
import random, math
HTML('<script src="lib/d3/d3.min.js"></script>')
def dotproduct (v1, v2):
return sum((a*b) for a,b in zip (v1, v2))
def vectorLength(v):
return math.sqtr(dotproduct(v,v))
def angleBtwnVectors(v1, v2):
dot = dotproduct(v1, v2)
det = v1[0]*v2[1] - v1[1]*v2[0]
r = math.atan2(det, dot)
return r * 180.0 /math.pi
def angleViolation(vectorList,maxAllowed):
violation = False
angleList = []
for i in range(0,len(vectorList)):
angleList.append( angleBtwnVectors([1,0],vectorList[i]))
angleList.append(angleList[0] + 360.0)
for i in range(1,len(angleList)):
if abs(angleList[i] - angleList[i-1]) > maxAllowed:
violation = True
return violation
def addVertex(vertices):
r = len(vertices['x'])
vertices.ix[r,'x'] = 0
vertices.ix[r,'y'] = 0
return r
def locateVertex(p,r,vertices):
vertices.ix[r,'x'] = p[0]
vertices.ix[r,'y'] = p[1]
def addEdge(r1,r2,vertices):
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r1,c] > -1:
vertices.ix[r1,c] = r2
break
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r2,c] > -1:
vertices.ix[r2,c] = r1
break
config = {
'random_seed' : 17,
'xExt': [-0.1,1.1] ,
'yExt': [-0.1,1.1] ,
'densityX' : 40 ,
'densityY' : 20 ,
'prob_0' : 0.1 ,
'prob_3' : 0.075 ,
'num_mod_steps' : 10 ,
'mod_step_ratio' : 0.1
}
random.seed(config['random_seed'])
vertices = pd.DataFrame({'x':[],'y':[],
'a1':[],'a2':[],'a3':[],'a4':[],'a5':[],'a6':[] })
y = 0
densityX = config['densityX']
densityY = config['densityY']
nextLine = range(densityX)
for i in range(len(nextLine)):
r = addVertex(vertices)
for line in range(densityY):
currentLine = nextLine
nextLine = []
numPointsInLine = len(currentLine)
previousNone = False
for i in range(numPointsInLine):
p = [i/float(numPointsInLine-1),y]
locateVertex(p,currentLine[i],vertices)
if i > 0:
addEdge(currentLine[i-1],currentLine[i],vertices)
if line < densityY-1:
# push either 0, 1 or 3 new vertices
rnd = random.uniform(0,1)
valid = (not previousNone) and line > 0 and i > 0 and i < (numPointsInLine - 1)
if rnd < config['prob_0'] and valid:
# 0 vertices
previousNone = True
elif rnd < (config['prob_3'] + config['prob_0']) and line < densityY-2:
# 3 vertices
nv = []
for j in range(3):
if j == 0 and previousNone:
nv.append(len(vertices['x']) - 1)
else:
nv.append(addVertex(vertices))
nextLine.append(nv[j])
addEdge(currentLine[i],nv[0],vertices)
addEdge(currentLine[i],nv[2],vertices)
previousNone = False
else:
# 1 vertex
if previousNone:
nv = len(vertices['x']) - 1
else:
nv = addVertex(vertices)
nextLine.append(nv)
addEdge(currentLine[i],nv,vertices)
previousNone = False
y += 1.0 / float(densityY-1)
vertices.head(10)
graph_config = pd.DataFrame(vertices).copy()
adjacencies = []
for i in range(len(graph_config['x'])):
ve = []
for j in range(1,7):
if graph_config.ix[i,'a'+str(j)] > -1:
ve.append( int(graph_config.ix[i,'a'+str(j)]) )
adjacencies.append(ve)
graph_config['adjacencies'] = adjacencies
graph_config['vertex'] = graph_config.index
graph_config = graph_config.drop(['a1','a2','a3','a4','a5','a6'],axis=1)
graph_config.head()
graph_template = Template('''
<style>
.vertex {
fill: #777;
}
.edge {
stroke: #111;
stroke-opacity: 1;
stroke-width: 0.5;
}
.link {
stroke: #000;
stroke-width: 0.5px;
}
.node {
cursor: move;
fill: #ccc;
stroke: #000;
stroke-width: 0.25px;
}
.node.fixed {
fill: #f00;
}
</style>
<button id="restart" type="button">re-start animation</button>
<div>
<svg width="100%" height="352px" id="graph"></svg>
</div>
<script>
var width = 750;
var height = 350;
var svg = d3.select("#graph").append("g")
.attr("transform", "translate(" + 1 + "," + 1 + ")");
var draw_graph = function() {
svg.selectAll(".link").remove();
svg.selectAll(".node").remove();
var force = d3.layout.force()
.size([width, height])
.linkStrength(0.9)
.friction(0.9)
.linkDistance(1)
.charge(-1)
.gravity(0.007)
.theta(0.8)
.alpha(0.1)
.on("tick", tick);
var drag = force.drag()
.on("dragstart", dragstart);
var link = svg.selectAll(".link"),
node = svg.selectAll(".node");
var vertices = $vertices ;
graph = {'nodes': [], 'links': []}
vertices.forEach(function(v) {
var f = false;
if ( (v.x <= 0) || (v.x >= 1) || (v.y <= 0) || (v.y >= 0.999999999999999) ) {
f = true;
}
graph.nodes.push({'x': v.x * width, 'y': v.y * height, 'fixed': f })
var e = v.adjacencies;
for (var i=0; i<e.length; i++){
graph.links.push({'source': v.vertex, 'target': e[i] })
};
});
force
.nodes(graph.nodes)
.links(graph.links)
.start();
link = link.data(graph.links)
.enter().append("line")
.attr("class", "link");
node = node.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", 1.5)
.on("dblclick", dblclick)
.call(drag);
function tick() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; });
}
function dblclick(d) {
d3.select(this).classed("fixed", d.fixed = false);
}
function dragstart(d) {
d3.select(this).classed("fixed", d.fixed = true);
}
}
$( "#restart" ).on('click touchstart', function() {
draw_graph();
});
draw_graph();
</script>
''')
HTML(graph_template.safe_substitute({'vertices': graph_config.to_dict(orient='records')}))
from IPython.core.display import HTML
from string import Template
import pandas as pd
import random, math
HTML('<script src="lib/d3/d3.min.js"></script>')
```
### Methods
#### Geometry methods
```
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def vectorLength(v):
return math.sqrt(dotproduct(v, v))
def angleBtwnVectors(v1, v2):
dot = dotproduct(v1, v2)
det = v1[0]*v2[1] - v1[1]*v2[0]
r = math.atan2(det, dot)
return r * 180.0 / math.pi
def angleViolation(vectorList,maxAllowed):
violation = False
angleList = []
for i in range(0,len(vectorList)):
angleList.append( angleBtwnVectors([1,0],vectorList[i]) )
angleList.sort()
angleList.append(angleList[0] + 360.0)
for i in range(1,len(angleList)):
if abs(angleList[i] - angleList[i-1]) > maxAllowed:
violation = True
return violation
```
#### Methods to add vertices and edges to table
```
def addVertex(vertices):
r = len(vertices['x'])
vertices.ix[r,'x'] = 0
vertices.ix[r,'y'] = 0
return r
def locateVertex(p,r,vertices):
vertices.ix[r,'x'] = p[0]
vertices.ix[r,'y'] = p[1]
def addEdge(r1,r2,vertices):
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r1,c] > -1:
vertices.ix[r1,c] = r2
break
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r2,c] > -1:
vertices.ix[r2,c] = r1
break
```
### Main Script
#### Set initial vertices
```
config = {
'random_seed' : 17,
'xExt': [-0.1,1.1] ,
'yExt': [-0.1,1.1] ,
'densityX' : 40 ,
'densityY' : 20 ,
'prob_0' : 0.1 ,
'prob_3' : 0.075 ,
'num_mod_steps' : 10 ,
'mod_step_ratio' : 0.1
}
random.seed(config['random_seed'])
vertices = pd.DataFrame({'x':[],'y':[],
'a1':[],'a2':[],'a3':[],'a4':[],'a5':[],'a6':[] })
y = 0
densityX = config['densityX']
densityY = config['densityY']
nextLine = range(densityX)
for i in range(len(nextLine)):
r = addVertex(vertices)
for line in range(densityY):
currentLine = nextLine
nextLine = []
numPointsInLine = len(currentLine)
previousNone = False
for i in range(numPointsInLine):
p = [i/float(numPointsInLine-1),y]
locateVertex(p,currentLine[i],vertices)
if i > 0:
addEdge(currentLine[i-1],currentLine[i],vertices)
if line < densityY-1:
# push either 0, 1 or 3 new vertices
rnd = random.uniform(0,1)
valid = (not previousNone) and line > 0 and i > 0 and i < (numPointsInLine - 1)
if rnd < config['prob_0'] and valid:
# 0 vertices
previousNone = True
elif rnd < (config['prob_3'] + config['prob_0']) and line < densityY-2:
# 3 vertices
nv = []
for j in range(3):
if j == 0 and previousNone:
nv.append(len(vertices['x']) - 1)
else:
nv.append(addVertex(vertices))
nextLine.append(nv[j])
addEdge(currentLine[i],nv[0],vertices)
addEdge(currentLine[i],nv[2],vertices)
previousNone = False
else:
# 1 vertex
if previousNone:
nv = len(vertices['x']) - 1
else:
nv = addVertex(vertices)
nextLine.append(nv)
addEdge(currentLine[i],nv,vertices)
previousNone = False
y += 1.0 / float(densityY-1)
vertices.head(10)
```
#### Force-directed graph
```
graph_config = pd.DataFrame(vertices).copy()
adjacencies = []
for i in range(len(graph_config['x'])):
ve = []
for j in range(1,7):
if graph_config.ix[i,'a'+str(j)] > -1:
ve.append( int(graph_config.ix[i,'a'+str(j)]) )
adjacencies.append(ve)
graph_config['adjacencies'] = adjacencies
graph_config['vertex'] = graph_config.index
graph_config = graph_config.drop(['a1','a2','a3','a4','a5','a6'],axis=1)
graph_config.head()
graph_template = Template('''
<style>
.vertex {
fill: #777;
}
.edge {
stroke: #111;
stroke-opacity: 1;
stroke-width: 0.5;
}
.link {
stroke: #000;
stroke-width: 0.5px;
}
.node {
cursor: move;
fill: #ccc;
stroke: #000;
stroke-width: 0.25px;
}
.node.fixed {
fill: #f00;
}
</style>
<button id="restart" type="button">re-start animation</button>
<div>
<svg width="100%" height="352px" id="graph"></svg>
</div>
<script>
var width = 750;
var height = 350;
var svg = d3.select("#graph").append("g")
.attr("transform", "translate(" + 1 + "," + 1 + ")");
var draw_graph = function() {
svg.selectAll(".link").remove();
svg.selectAll(".node").remove();
var force = d3.layout.force()
.size([width, height])
.linkStrength(0.9)
.friction(0.9)
.linkDistance(1)
.charge(-1)
.gravity(0.007)
.theta(0.8)
.alpha(0.1)
.on("tick", tick);
var drag = force.drag()
.on("dragstart", dragstart);
var link = svg.selectAll(".link"),
node = svg.selectAll(".node");
var vertices = $vertices ;
graph = {'nodes': [], 'links': []}
vertices.forEach(function(v) {
var f = false;
if ( (v.x <= 0) || (v.x >= 1) || (v.y <= 0) || (v.y >= 0.999999999999999) ) {
f = true;
}
graph.nodes.push({'x': v.x * width, 'y': v.y * height, 'fixed': f })
var e = v.adjacencies;
for (var i=0; i<e.length; i++){
graph.links.push({'source': v.vertex, 'target': e[i] })
};
});
force
.nodes(graph.nodes)
.links(graph.links)
.start();
link = link.data(graph.links)
.enter().append("line")
.attr("class", "link");
node = node.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", 1.5)
.on("dblclick", dblclick)
.call(drag);
function tick() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; });
}
function dblclick(d) {
d3.select(this).classed("fixed", d.fixed = false);
}
function dragstart(d) {
d3.select(this).classed("fixed", d.fixed = true);
}
}
$( "#restart" ).on('click touchstart', function() {
draw_graph();
});
draw_graph();
</script>
''')
HTML(graph_template.safe_substitute({'vertices': graph_config.to_dict(orient='records')}))
```
(Note that you can click on vertices and move them. The graph will update accordingly.)
|
github_jupyter
|
from IPython.core.display import HTML
from string import Template
import pandas as pd
import random, math
HTML('<script src="lib/d3/d3.min.js"></script>')
def dotproduct (v1, v2):
return sum((a*b) for a,b in zip (v1, v2))
def vectorLength(v):
return math.sqtr(dotproduct(v,v))
def angleBtwnVectors(v1, v2):
dot = dotproduct(v1, v2)
det = v1[0]*v2[1] - v1[1]*v2[0]
r = math.atan2(det, dot)
return r * 180.0 /math.pi
def angleViolation(vectorList,maxAllowed):
violation = False
angleList = []
for i in range(0,len(vectorList)):
angleList.append( angleBtwnVectors([1,0],vectorList[i]))
angleList.append(angleList[0] + 360.0)
for i in range(1,len(angleList)):
if abs(angleList[i] - angleList[i-1]) > maxAllowed:
violation = True
return violation
def addVertex(vertices):
r = len(vertices['x'])
vertices.ix[r,'x'] = 0
vertices.ix[r,'y'] = 0
return r
def locateVertex(p,r,vertices):
vertices.ix[r,'x'] = p[0]
vertices.ix[r,'y'] = p[1]
def addEdge(r1,r2,vertices):
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r1,c] > -1:
vertices.ix[r1,c] = r2
break
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r2,c] > -1:
vertices.ix[r2,c] = r1
break
config = {
'random_seed' : 17,
'xExt': [-0.1,1.1] ,
'yExt': [-0.1,1.1] ,
'densityX' : 40 ,
'densityY' : 20 ,
'prob_0' : 0.1 ,
'prob_3' : 0.075 ,
'num_mod_steps' : 10 ,
'mod_step_ratio' : 0.1
}
random.seed(config['random_seed'])
vertices = pd.DataFrame({'x':[],'y':[],
'a1':[],'a2':[],'a3':[],'a4':[],'a5':[],'a6':[] })
y = 0
densityX = config['densityX']
densityY = config['densityY']
nextLine = range(densityX)
for i in range(len(nextLine)):
r = addVertex(vertices)
for line in range(densityY):
currentLine = nextLine
nextLine = []
numPointsInLine = len(currentLine)
previousNone = False
for i in range(numPointsInLine):
p = [i/float(numPointsInLine-1),y]
locateVertex(p,currentLine[i],vertices)
if i > 0:
addEdge(currentLine[i-1],currentLine[i],vertices)
if line < densityY-1:
# push either 0, 1 or 3 new vertices
rnd = random.uniform(0,1)
valid = (not previousNone) and line > 0 and i > 0 and i < (numPointsInLine - 1)
if rnd < config['prob_0'] and valid:
# 0 vertices
previousNone = True
elif rnd < (config['prob_3'] + config['prob_0']) and line < densityY-2:
# 3 vertices
nv = []
for j in range(3):
if j == 0 and previousNone:
nv.append(len(vertices['x']) - 1)
else:
nv.append(addVertex(vertices))
nextLine.append(nv[j])
addEdge(currentLine[i],nv[0],vertices)
addEdge(currentLine[i],nv[2],vertices)
previousNone = False
else:
# 1 vertex
if previousNone:
nv = len(vertices['x']) - 1
else:
nv = addVertex(vertices)
nextLine.append(nv)
addEdge(currentLine[i],nv,vertices)
previousNone = False
y += 1.0 / float(densityY-1)
vertices.head(10)
graph_config = pd.DataFrame(vertices).copy()
adjacencies = []
for i in range(len(graph_config['x'])):
ve = []
for j in range(1,7):
if graph_config.ix[i,'a'+str(j)] > -1:
ve.append( int(graph_config.ix[i,'a'+str(j)]) )
adjacencies.append(ve)
graph_config['adjacencies'] = adjacencies
graph_config['vertex'] = graph_config.index
graph_config = graph_config.drop(['a1','a2','a3','a4','a5','a6'],axis=1)
graph_config.head()
graph_template = Template('''
<style>
.vertex {
fill: #777;
}
.edge {
stroke: #111;
stroke-opacity: 1;
stroke-width: 0.5;
}
.link {
stroke: #000;
stroke-width: 0.5px;
}
.node {
cursor: move;
fill: #ccc;
stroke: #000;
stroke-width: 0.25px;
}
.node.fixed {
fill: #f00;
}
</style>
<button id="restart" type="button">re-start animation</button>
<div>
<svg width="100%" height="352px" id="graph"></svg>
</div>
<script>
var width = 750;
var height = 350;
var svg = d3.select("#graph").append("g")
.attr("transform", "translate(" + 1 + "," + 1 + ")");
var draw_graph = function() {
svg.selectAll(".link").remove();
svg.selectAll(".node").remove();
var force = d3.layout.force()
.size([width, height])
.linkStrength(0.9)
.friction(0.9)
.linkDistance(1)
.charge(-1)
.gravity(0.007)
.theta(0.8)
.alpha(0.1)
.on("tick", tick);
var drag = force.drag()
.on("dragstart", dragstart);
var link = svg.selectAll(".link"),
node = svg.selectAll(".node");
var vertices = $vertices ;
graph = {'nodes': [], 'links': []}
vertices.forEach(function(v) {
var f = false;
if ( (v.x <= 0) || (v.x >= 1) || (v.y <= 0) || (v.y >= 0.999999999999999) ) {
f = true;
}
graph.nodes.push({'x': v.x * width, 'y': v.y * height, 'fixed': f })
var e = v.adjacencies;
for (var i=0; i<e.length; i++){
graph.links.push({'source': v.vertex, 'target': e[i] })
};
});
force
.nodes(graph.nodes)
.links(graph.links)
.start();
link = link.data(graph.links)
.enter().append("line")
.attr("class", "link");
node = node.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", 1.5)
.on("dblclick", dblclick)
.call(drag);
function tick() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; });
}
function dblclick(d) {
d3.select(this).classed("fixed", d.fixed = false);
}
function dragstart(d) {
d3.select(this).classed("fixed", d.fixed = true);
}
}
$( "#restart" ).on('click touchstart', function() {
draw_graph();
});
draw_graph();
</script>
''')
HTML(graph_template.safe_substitute({'vertices': graph_config.to_dict(orient='records')}))
from IPython.core.display import HTML
from string import Template
import pandas as pd
import random, math
HTML('<script src="lib/d3/d3.min.js"></script>')
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def vectorLength(v):
return math.sqrt(dotproduct(v, v))
def angleBtwnVectors(v1, v2):
dot = dotproduct(v1, v2)
det = v1[0]*v2[1] - v1[1]*v2[0]
r = math.atan2(det, dot)
return r * 180.0 / math.pi
def angleViolation(vectorList,maxAllowed):
violation = False
angleList = []
for i in range(0,len(vectorList)):
angleList.append( angleBtwnVectors([1,0],vectorList[i]) )
angleList.sort()
angleList.append(angleList[0] + 360.0)
for i in range(1,len(angleList)):
if abs(angleList[i] - angleList[i-1]) > maxAllowed:
violation = True
return violation
def addVertex(vertices):
r = len(vertices['x'])
vertices.ix[r,'x'] = 0
vertices.ix[r,'y'] = 0
return r
def locateVertex(p,r,vertices):
vertices.ix[r,'x'] = p[0]
vertices.ix[r,'y'] = p[1]
def addEdge(r1,r2,vertices):
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r1,c] > -1:
vertices.ix[r1,c] = r2
break
for c in ['a1','a2','a3','a4','a5']:
if not vertices.ix[r2,c] > -1:
vertices.ix[r2,c] = r1
break
config = {
'random_seed' : 17,
'xExt': [-0.1,1.1] ,
'yExt': [-0.1,1.1] ,
'densityX' : 40 ,
'densityY' : 20 ,
'prob_0' : 0.1 ,
'prob_3' : 0.075 ,
'num_mod_steps' : 10 ,
'mod_step_ratio' : 0.1
}
random.seed(config['random_seed'])
vertices = pd.DataFrame({'x':[],'y':[],
'a1':[],'a2':[],'a3':[],'a4':[],'a5':[],'a6':[] })
y = 0
densityX = config['densityX']
densityY = config['densityY']
nextLine = range(densityX)
for i in range(len(nextLine)):
r = addVertex(vertices)
for line in range(densityY):
currentLine = nextLine
nextLine = []
numPointsInLine = len(currentLine)
previousNone = False
for i in range(numPointsInLine):
p = [i/float(numPointsInLine-1),y]
locateVertex(p,currentLine[i],vertices)
if i > 0:
addEdge(currentLine[i-1],currentLine[i],vertices)
if line < densityY-1:
# push either 0, 1 or 3 new vertices
rnd = random.uniform(0,1)
valid = (not previousNone) and line > 0 and i > 0 and i < (numPointsInLine - 1)
if rnd < config['prob_0'] and valid:
# 0 vertices
previousNone = True
elif rnd < (config['prob_3'] + config['prob_0']) and line < densityY-2:
# 3 vertices
nv = []
for j in range(3):
if j == 0 and previousNone:
nv.append(len(vertices['x']) - 1)
else:
nv.append(addVertex(vertices))
nextLine.append(nv[j])
addEdge(currentLine[i],nv[0],vertices)
addEdge(currentLine[i],nv[2],vertices)
previousNone = False
else:
# 1 vertex
if previousNone:
nv = len(vertices['x']) - 1
else:
nv = addVertex(vertices)
nextLine.append(nv)
addEdge(currentLine[i],nv,vertices)
previousNone = False
y += 1.0 / float(densityY-1)
vertices.head(10)
graph_config = pd.DataFrame(vertices).copy()
adjacencies = []
for i in range(len(graph_config['x'])):
ve = []
for j in range(1,7):
if graph_config.ix[i,'a'+str(j)] > -1:
ve.append( int(graph_config.ix[i,'a'+str(j)]) )
adjacencies.append(ve)
graph_config['adjacencies'] = adjacencies
graph_config['vertex'] = graph_config.index
graph_config = graph_config.drop(['a1','a2','a3','a4','a5','a6'],axis=1)
graph_config.head()
graph_template = Template('''
<style>
.vertex {
fill: #777;
}
.edge {
stroke: #111;
stroke-opacity: 1;
stroke-width: 0.5;
}
.link {
stroke: #000;
stroke-width: 0.5px;
}
.node {
cursor: move;
fill: #ccc;
stroke: #000;
stroke-width: 0.25px;
}
.node.fixed {
fill: #f00;
}
</style>
<button id="restart" type="button">re-start animation</button>
<div>
<svg width="100%" height="352px" id="graph"></svg>
</div>
<script>
var width = 750;
var height = 350;
var svg = d3.select("#graph").append("g")
.attr("transform", "translate(" + 1 + "," + 1 + ")");
var draw_graph = function() {
svg.selectAll(".link").remove();
svg.selectAll(".node").remove();
var force = d3.layout.force()
.size([width, height])
.linkStrength(0.9)
.friction(0.9)
.linkDistance(1)
.charge(-1)
.gravity(0.007)
.theta(0.8)
.alpha(0.1)
.on("tick", tick);
var drag = force.drag()
.on("dragstart", dragstart);
var link = svg.selectAll(".link"),
node = svg.selectAll(".node");
var vertices = $vertices ;
graph = {'nodes': [], 'links': []}
vertices.forEach(function(v) {
var f = false;
if ( (v.x <= 0) || (v.x >= 1) || (v.y <= 0) || (v.y >= 0.999999999999999) ) {
f = true;
}
graph.nodes.push({'x': v.x * width, 'y': v.y * height, 'fixed': f })
var e = v.adjacencies;
for (var i=0; i<e.length; i++){
graph.links.push({'source': v.vertex, 'target': e[i] })
};
});
force
.nodes(graph.nodes)
.links(graph.links)
.start();
link = link.data(graph.links)
.enter().append("line")
.attr("class", "link");
node = node.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", 1.5)
.on("dblclick", dblclick)
.call(drag);
function tick() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; });
}
function dblclick(d) {
d3.select(this).classed("fixed", d.fixed = false);
}
function dragstart(d) {
d3.select(this).classed("fixed", d.fixed = true);
}
}
$( "#restart" ).on('click touchstart', function() {
draw_graph();
});
draw_graph();
</script>
''')
HTML(graph_template.safe_substitute({'vertices': graph_config.to_dict(orient='records')}))
| 0.298491 | 0.748122 |
# Python: APIs scraping
**Goal**: Collect data from an API to exploit them!
## Introduction to APIs and GET query
### What's an API?
In computer science, **API** stands for Application Programming Interface. The API is a computer solution that allows applications to communicate with each other and exchange services or data.
### Request on APIs
The **requests** module is the basic library for making requests on APIs.
```
# Example
import requests
```
#### The GET request
The **GET** method of the **requests** module is the one used to **get information** from an API.
Let's make a **request** to **get** the **last position** of the **ISS station** from the **OpenNotify API**: **http://api.open-notify.org/iss-now.json**.
```
response = requests.get("http://api.open-notify.org/iss-now.json")
```
## Status codes
```
response
```
Queries return status codes that give us information about the result of the query (success or failure). For each failure, there is a different code. Here are some useful codes with their meanings:
* **200**: Everything is normal and the server returned the requested result
* **301**: The server redirects to another parameter
* **400**: Bad request
* **401**: The server thinks that you are not able to authenticate
* **403**: The server indicates that you are not allowed to access the API
* **404**: The server did not find the resource
```
# Code 200
status_code = response.status_code
status_code
# Code 400
response = requests.get("http://api.open-notify.org/iss-pass.json")
response
# Code 404
response = requests.get("http://api.open-notify.org/iss-before.json")
response
```
## Query parameters
Some requests need parameters to work.
```
# Example: Latitude and longitude of Paris city
parameters = {"lat" : 48.87, "lon" : 2.33} # http://api.open-notify.org/iss-pass.json?lat=48.87&lon=2.33
response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters)
```
To retrieve the content of the GET request, we use the method **content**.
```
response_content = response.content
response_content
```
### Training
Apply the GET request to the city of San Francisco.
```
sf_parameters = {"lat" : 37.78, "lon" : -122.41}
sf_response = requests.get("http://api.open-notify.org/iss-pass.json", params=sf_parameters)
sf_content = sf_response.content
sf_content
```
## JSON format
**JSON** is the main format for sending or receiving data when using an API. There is the **JSON** library with two key functions **dumps** and **loads**. The **dumps** function takes as input a Python object and returns a string. As for the **loads** function, it takes a string as input and returns a Python object (lists, dictionaries, etc).
```
# Example
data_science = ["Mathematics", "Statistics", "Computer Science"]
data_science
type(data_science)
import json
# dumps
data_science_string = json.dumps(data_science)
data_science_string
type(data_science_string)
# loads
data_science_list = json.loads(data_science_string)
data_science_list
type(data_science_list)
```
### Training
```
# Training with dictionaries
animals = {
"dog" : 15,
"cat" : 5,
"mouse" : 25,
"chiken" : 10
}
type(animals)
animals_string = json.dumps(animals)
animals_string
type(animals_string)
animals_dict = json.loads(animals_string)
animals_dict
type(animals_dict)
```
## Get a json from a request
The **json()** method allows to convert the result of a query into a Python object (dictionary).
```
# Example
parameters = {"lat" : 48.87, "lon" : 2.33}
response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters)
json_response = response.json()
json_response
type(json_response)
first_iss_pass_duration = json_response['response'][0]['duration']
first_iss_pass_duration
```
## Type of content
When we make a **GET** request, the server provides us with a **status code**, **data** and also **metadata** that contains information about how the data was generated. This information can be found on the **header** of the response. It is accessed with the method **hearders**.
```
# Example
response.headers
```
The parameter that interests us most in this header data is the **Content-Type**.
```
response_content_type = response.headers['Content-Type']
response_content_type
```
## Training
Let's find the number of people in the space with the API **http://api.open-notify.org/astros.json**.
```
response = requests.get("http://api.open-notify.org/astros.json")
response
response.headers
response.content
response_json_data = response.json()
response_json_data
nb_people_in_space = response_json_data['number']
nb_people_in_space
```
|
github_jupyter
|
# Example
import requests
response = requests.get("http://api.open-notify.org/iss-now.json")
response
# Code 200
status_code = response.status_code
status_code
# Code 400
response = requests.get("http://api.open-notify.org/iss-pass.json")
response
# Code 404
response = requests.get("http://api.open-notify.org/iss-before.json")
response
# Example: Latitude and longitude of Paris city
parameters = {"lat" : 48.87, "lon" : 2.33} # http://api.open-notify.org/iss-pass.json?lat=48.87&lon=2.33
response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters)
response_content = response.content
response_content
sf_parameters = {"lat" : 37.78, "lon" : -122.41}
sf_response = requests.get("http://api.open-notify.org/iss-pass.json", params=sf_parameters)
sf_content = sf_response.content
sf_content
# Example
data_science = ["Mathematics", "Statistics", "Computer Science"]
data_science
type(data_science)
import json
# dumps
data_science_string = json.dumps(data_science)
data_science_string
type(data_science_string)
# loads
data_science_list = json.loads(data_science_string)
data_science_list
type(data_science_list)
# Training with dictionaries
animals = {
"dog" : 15,
"cat" : 5,
"mouse" : 25,
"chiken" : 10
}
type(animals)
animals_string = json.dumps(animals)
animals_string
type(animals_string)
animals_dict = json.loads(animals_string)
animals_dict
type(animals_dict)
# Example
parameters = {"lat" : 48.87, "lon" : 2.33}
response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters)
json_response = response.json()
json_response
type(json_response)
first_iss_pass_duration = json_response['response'][0]['duration']
first_iss_pass_duration
# Example
response.headers
response_content_type = response.headers['Content-Type']
response_content_type
response = requests.get("http://api.open-notify.org/astros.json")
response
response.headers
response.content
response_json_data = response.json()
response_json_data
nb_people_in_space = response_json_data['number']
nb_people_in_space
| 0.591605 | 0.917154 |
# Lockman-SWIRE
# Lockman-SWIRE Photometric Redshifts - V1 (20170802)
master_catalogue_Lockman-SWIRE_20170710_photoz_20170802_r_and_irac1_optimised.fits
## Key information
#### Masterlist used:
dmu1/dmu1_ml_Lockman-SWIRE/data/master_catalogue_Lockman-SWIRE_20170710.fits
#### Spectroscopic redshift sample used:
dmu23/dmu23_Lockman-SWIRE/data/Lockman-SWIRE-specz-v2.1.fits
#### Templates used:
#### Filters used:
| Telescope / Instrument | Filter | Available | Used |
|------------------------|----------------|-----------|------|
| CFHT/MegaPrime/MegaCam* | cfht_megacam_u | Yes | Yes |
| CFHT/MegaPrime/MegaCam* | cfht_megacam_g | Yes | Yes |
| CFHT/MegaPrime/MegaCam* | cfht_megacam_r | Yes | Yes |
| CFHT/MegaPrime/MegaCam* | cfht_megacam_z | Yes | Yes |
| CFHT/MegaPrime/MegaCam** | cfht_megacam_g | Yes | Yes |
| CFHT/MegaPrime/MegaCam** | cfht_megacam_r | Yes | Yes |
| CFHT/MegaPrime/MegaCam** | cfht_megacam_i | Yes | Yes |
| CFHT/MegaPrime/MegaCam** | cfht_megacam_i_0 | Yes | Yes |
| CFHT/MegaPrime/MegaCam** | cfht_megacam_z | Yes | Yes |
| INT/WFC | wfc_u | Yes | Yes |
| INT/WFC | wfc_g | Yes | Yes |
| INT/WFC | wfc_r | Yes | Yes |
| INT/WFC | wfc_i | Yes | Yes |
| INT/WFC | wfc_z | Yes | Yes |
| Pan-STARRS1/Pan-STARRS1| gpc1_g | Yes | Yes |
| Pan-STARRS1/Pan-STARRS1| gpc1_r | Yes | Yes |
| Pan-STARRS1/Pan-STARRS1| gpc1_i | Yes | Yes |
| Pan-STARRS1/Pan-STARRS1| gpc1_z | Yes | Yes |
| Pan-STARRS1/Pan-STARRS1| gpc1_y | Yes | Yes |
| UKIRT/WFCAM | ukidss_j | Yes | Yes |
| UKIRT/WFCAM | ukidss_k | Yes | Yes |
| Spitzer/IRAC | irac_1 | Yes | Yes |
| Spitzer/IRAC | irac_2 | Yes | Yes |
| Spitzer/IRAC | irac_3 | Yes | Yes |
| Spitzer/IRAC | irac_4 | Yes | Yes |
- * SpARCS, **RCSLenS - Where sources are detected in both SpARCS and RCSLenS catalogs for g, r or z, the deeper SpARCS catalog value is chosen.
- Differing from EN1, INT/WFC and Pan-STARRS1 were included fitting despite the small systematic offsets to the deeper CFHT/MegaPrime optical datasets. Once the optical prior was folded in, photo-z performance remained excellent when all bands were included.
#### Additional selections applied:
- In order to have a fully calibrated redshift estimate, sources must have a
magnitude detection in either any of the optical r-bands (CFHT, INT or PS1) or Spitzer/IRAC Ch1. For sources which are detected in both r and IRAC Ch1, the redshift and P(z) are taken from the r-band calibrated version.
- Sources must also be detected in 5 bands at different wavelengths for a redshift to have been estimated (i.e. multiple r-band detections count only as one). Multiple detections in similar optical bands do not count due to the issues described below.
#### Field-specific issues encountered:
Issues with eazy not reaching convergence were also encountered for Lockman-SWIRE and with more frequency then were encountered for EN1. The larger sample of problematic objects made it easier to identify the source type where this problem actually occurs: objects for which there are detections in only 2-3 unique optical wavelengths, but with multiple detections in one or more of those bands.
Moving to the stricter criteria of requiring five detections in different wavelengths solved this problem, at the expense of about 300k masterlist sources no longer being fit (out of ~1.6million). However, given the fact these sources are typically very optically faint and do not have near/mid-IR detections, they would not be included in the subsequent physical modelling steps.
## Plots for diagnostics and quality checking
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import properscoring as ps
from astropy.table import Table
from scipy.stats import ks_2samp
import h5py as h
import matplotlib as mpl
import corner
from astropy.convolution import Gaussian1DKernel, convolve
def pz2d_stack(pz, zspec, zgrid, smooth=3):
""" Build a 2D stack of the photometric redshift P(z) predictions
Inputs
------
pz : 2D array, (N x M)
Photo-z PDF array of N sources with M redshift steps
zspec : array, len(N)
Corresponding spectroscopic redshifts for the N sources
zgrid : array, len(M)
Redshift grid on which the redshift PDFs are samples
smooth : int
Number of redshift bins along which to smooth the stacked PDF
"""
kernel = Gaussian1DKernel(smooth)
photoz_stack = np.zeros((len(zgrid), len(zgrid)))
for iz, z in enumerate(zspec):
znearest = np.argmin(np.abs(z - zgrid))
# Axis 0 = zspec, axis 1 = photoz
pdf = pz[iz, :]
pdf[np.isnan(pdf)] = 0.
pdf[np.isinf(pdf)] = 0.
#print('{0} {1}'.format(z, zgrid[znearest]))
photoz_stack[znearest,:] += pdf
photoz_stack /= np.trapz(photoz_stack, zgrid, axis=1)[:, None]
photoz_stack[np.isnan(photoz_stack)] = 0.
photoz_stack_smoothed = np.zeros((len(zgrid), len(zgrid)))
for i in range(len(photoz_zgrid)):
photoz_stack_smoothed[:,i] = convolve(photoz_stack[:,i], kernel)
return photoz_stack_smoothed.T
def calcStats(photoz, specz):
pzgood = (photoz >= 0.)
szgood = (specz >= 0.)
cut = np.logical_and(pzgood, szgood)
pc_pzbad = (len(photoz) - float(pzgood.sum())) / len(photoz)*100.
photoz = photoz[cut]
specz = specz[cut]
dz = photoz - specz
sigma_all = np.sqrt( np.sum((dz/(1+specz))**2) / float(len(dz)))
nmad = 1.48 * np.median( np.abs((dz - np.median(dz)) / (1+specz)))
#nmad = 1.48 * np.median( np.abs(dz) / (1+specz))
bias = np.median(dz/(1+specz))
ol1 = (np.abs(dz)/(1+specz) > 0.2 )
OLF1 = np.sum( ol1 ) / float(len(dz))
sigma_ol1 = np.sqrt( np.sum((dz[np.invert(ol1)]/(1+specz[np.invert(ol1)]))**2) / float(len(dz[np.invert(ol1)])))
ol2 = (np.abs(dz)/(1+specz) > 5*nmad )
OLF2 = np.sum( ol2 ) / float(len(dz))
sigma_ol2 = np.sqrt( np.sum((dz[np.invert(ol2)]/(1+specz[np.invert(ol2)]))**2) / float(len(dz[np.invert(ol2)])))
KSscore = ks_2samp(specz, photoz)[0]
#print('Sigma_all: {0:.3f}'.format(sigma_all))
#print('Sigma_NMAD: {0:.3f}'.format(nmad))
#print('Bias: {0:.3f}'.format(bias))
#print('OLF: Def1 = {0:.3f} Def2 = {1:0.3f}'.format(OLF1, OLF2))
#print('Sigma_OL: Def 1 = {0:.3f} Def2 = {1:0.3f}'.format(sigma_ol1, sigma_ol2))
#print('KS: {0:.3f}'.format(KSscore))
return [sigma_all, nmad, bias, OLF1, sigma_ol1, OLF2, sigma_ol2, KSscore, pc_pzbad]
def calc_HPDciv(pz, zgrid, specz, dz = 0.005):
dz = np.diff(zgrid[:2])
i_zspec = np.argmin((np.abs(specz[:,None] - zgrid[None,:])), axis=1)
pz_s = pz[np.arange(len(i_zspec)), i_zspec]
mask = (pz < pz_s[:, None])
ipz_masked = np.copy(pz)
ipz_masked[mask] *= 0.
CI = np.trapz(ipz_masked, zgrid, axis=1) / np.trapz(pz, zgrid, axis=1)
return CI
def calc_ci_dist(pz, zgrid, specz):
ci_pdf = calc_HPDciv(pz, zgrid, specz)
nbins = 100
hist, bin_edges = np.histogram(ci_pdf, bins=nbins, range=(0,1), normed=True)
cumhist = np.cumsum(hist)/nbins
bin_max = 0.5*(bin_edges[:-1]+bin_edges[1:])
return cumhist, bin_max
photometry = Table.read('master_catalogue_lockman-SWIRE_20170710_processed.fits')
photoz = Table.read('full/photoz_all_merged_r_any.fits')
best = np.abs(np.array([photoz['chi_r_eazy'], photoz['chi_r_atlas'],photoz['chi_r_cosmos']])).min(0)
good = np.logical_and(best != 99., photoz['chi_r_stellar'] > 0.)
z_spec = photometry['z_spec']
AGN = photometry['AGN']
zs_gal = np.logical_and(z_spec >= 0, AGN == 0)
zs_agn = np.logical_and(z_spec >= 0, AGN == 1)
photoz_hdf = h.File('full/pz_all_hb.hdf', mode='r')
#photoz_hdf = h.File('../XMM-LSS/full/pz_all_hb.hdf', mode='r')
photoz_pdf = photoz_hdf['Pz']
photoz_zgrid = photoz_hdf['zgrid'][:]
#photometry = Table.read('../XMM-LSS/xmm-lss_processed.fits')
#photoz = Table.read('../XMM-LSS/full/photoz_all_merged.fits')
```
### $\chi^{2}$ Properties
#### Distribution of normalised $\chi^{2}$
Due to the number of filters used per source varying between template sets and between sources themselves, in the following plots we use a normalised $\chi^{2}$ defined as $\chi^{2}_{\rm{r}} = \chi^{2} / (N_{\rm{filt}}-1)$. However we note this is not the formal reduced $\chi^{2}$ and therefore should not necessarily be used to judge the goodness-of-fit for individual sources.
```
Fig, Ax = plt.subplots(1)
Ax.hist(np.log10(photoz['chi_r_eazy'][good]), bins=100, range=(-3,2.5), histtype='step', label='EAZY')
Ax.hist(np.log10(photoz['chi_r_atlas'][good]), bins=100, range=(-3,2.5), histtype='step', label='Atlas')
Ax.hist(np.log10(photoz['chi_r_cosmos'][good]), bins=100, range=(-3,2.5), histtype='step', label='XMM-COSMOS')
#Ax.hist(np.log10(photoz['chi_r_stellar']), bins=100, range=(-3,2.5), histtype='step')
Leg = Ax.legend(loc='upper right', prop={'size':10}, frameon=False)
Ax.set_xlabel(r'$\log_{10}\chi^{2}_{\rm{r}}$')
Ax.set_ylabel('N')
Ax.set_xlim([-2, 2.5])
Fig.savefig('plots/dmu24_Lockman-SWIRE_chi2_distributions.png', format='png', bbox_inches='tight')
```
#### Galaxy/AGN vs stellar templates - $\chi^{2}$
We now plot the distribution of 'Best' normalised $\chi^{2}$ from the Galaxy/AGN template libraries vs the normalised $\chi^{2}$ from fits to the Pickles library of stellar templates. Sources are split by apparent optical magnitude with the relative number of sources indicated in each panel.
```
Fig, Ax = plt.subplots(3, 2, figsize=(6.5, 10))
mag_lims = [16, 18, 20, 22, 24, 26, 28]
for im in range(len(mag_lims)-1):
ax = Ax.flatten()[im]
ax.axes.set_aspect('equal')
mag = np.logical_and(photometry['r_any_mag'] >= mag_lims[im], photometry['r_any_mag'] < mag_lims[im+1])
fraction = (np.sum(mag*good) / np.sum(good).astype('float'))
corner.hist2d(np.log10(best[good*mag]), np.log10(photoz['chi_r_stellar'])[good*mag], ax=ax, bins=50,
plot_datapoints=True, plot_density=True)
ax.plot([-3,3],[-3,3], zorder=10000, color='orange', lw=2, ls='--')
ax.set_xlim([-2, 3])
ax.set_ylim([-2, 3])
ax.set_title('{0:d} < {1} < {2:d}'.format(mag_lims[im], '$r$',mag_lims[im+1]), size=10)
ax.set_xlabel(r'$\log_{10}\chi^{2}_{\rm{r, Star}}$')
ax.set_ylabel(r'$\log_{10}\chi^{2}_{\rm{r, Gal}}$')
ax.text(2.8, -1.8, '{0:.1f}% of sources'.format(fraction*100),
horizontalalignment='right', verticalalignment='bottom')
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_chi2_galaxy_star_comparison.png', format='png', bbox_inches='tight')
```
### Photo-z vs Spec-z Visual Comparison
As plots of photo-z vs spec-z are commonly presented in order to allow the visual inspection of the relative precision and biases of photo-z estimates, we provide a version here for reference. However, instead of choosing a single value to represent the photometric redshift (e.g. median or peak of the $P(z)$) we stack the full $P(z)$ of all the sources within each spectroscopic redshift bin.
```
pz2d_gal = pz2d_stack(photoz_pdf[zs_gal,:], z_spec[zs_gal], photoz_zgrid, 5)
pz2d_agn = pz2d_stack(photoz_pdf[zs_agn,:], z_spec[zs_agn], photoz_zgrid, 10)
Fig, Ax = plt.subplots(1, 2, figsize=(8,4))
X, Y = np.meshgrid(photoz_zgrid, photoz_zgrid)
Ax[0].pcolormesh(X, Y, pz2d_gal, cmap='magma_r',
vmin=0, vmax=np.percentile(pz2d_gal, 99.9))
# extent=[photoz_zgrid.min(), photoz_zgrid.max(),
# photoz_zgrid.min(), photoz_zgrid.max()],
Ax[1].pcolormesh(X, Y, pz2d_agn, cmap='magma_r',
vmin=0, vmax=np.percentile(pz2d_agn, 99.9))
Ax[0].set_title('Galaxies')
Ax[1].set_title('AGN')
Ax[0].set_xlim([0, np.ceil(z_spec[zs_gal].max())])
Ax[0].set_ylim([0, np.ceil(z_spec[zs_gal].max())])
Ax[1].set_xlim([0, np.ceil(z_spec[zs_agn].max())])
Ax[1].set_ylim([0, np.ceil(z_spec[zs_agn].max())])
for ax in Ax:
ax.set_xlabel('$z_{spec}$', size=12)
ax.set_ylabel('$z_{phot}$', size=12)
ax.plot(photoz_zgrid, photoz_zgrid, color='0.5', lw=2)
ax.plot(photoz_zgrid, (1+0.15)*photoz_zgrid, color='0.5', ls='--')
ax.plot(photoz_zgrid, (1-0.15)*photoz_zgrid, color='0.5', ls='--')
Fig.savefig('plots/dmu24_Lockman-SWIRE_specz_vs_photz_pz.png', format='png', bbox_inches='tight')
```
### Photo-z Statistics
Metrics for photometric redshift precision and accuracy are defined as in Duncan et al. (2017, and references therein).
| Metric | | Definition |
|----------------|-------------|----------------------|
|$\sigma_{f}$ | Scatter - all galaxies | $\text{rms}(\Delta z / (1+z_{\text{spec}}))$ |
|$\sigma_{\text{NMAD}}$ | Normalised median absolute deviation | $1.48 \times \text{median} ( \left | \Delta z \right | / (1+z_{\text{spec}}))$ |
|Bias | | $\text{median} (\Delta z )$|
|O$_{f}$ | Outlier fraction | Outliers defined as $\left | \Delta z \right | / (1+z_{\text{spec}}) > 0.2$ |
|$\sigma_{\text{O}_{f}}$ | Scatter excluding O$_{f}$ outliers | $ \text{rms}[\Delta z / (1+z_{\text{spec}})]$ |
|$\overline{\rm{CRPS}}$ | Mean continuous ranked probability score | $\overline{\rm{CRPS}} = \frac{1}{N} \sum_{i=1}^{N} \int_{-\infty}^{+\infty} [ \rm{CDF}_{i}(z) - \rm{CDF}_{z_{s},i}(z)]^{2} dz$ |
#### Quality statistics as a function of redshift
```
gal_z_binedges = np.linspace(0, np.ceil(np.percentile(z_spec[zs_gal],99)), 9)
agn_z_binedges = np.linspace(0, np.ceil(np.percentile(z_spec[zs_agn],99)), 9)
gal_z_bins = 0.5*(gal_z_binedges[:-1] + gal_z_binedges[1:])
agn_z_bins = 0.5*(agn_z_binedges[:-1] + agn_z_binedges[1:])
galaxy_statistics_vs_z = []
galaxy_crps_vs_z = []
for i, zmin in enumerate(gal_z_binedges[:-1]):
zcut = np.logical_and(z_spec >= zmin, z_spec < gal_z_binedges[i+1])
galaxy_statistics_vs_z.append(calcStats(photoz['z1_median'][zs_gal*zcut], z_spec[zs_gal*zcut]))
pzs = photoz_pdf[zs_gal*zcut,:]
zs = z_spec[zs_gal*zcut]
galaxy_crps_vs_z.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
galaxy_statistics_vs_z = np.array(galaxy_statistics_vs_z)
agn_statistics_vs_z = []
agn_crps_vs_z = []
for i, zmin in enumerate(agn_z_binedges[:-1]):
zcut = np.logical_and(z_spec >= zmin, z_spec < agn_z_binedges[i+1])
agn_statistics_vs_z.append(calcStats(photoz['z1_median'][zs_agn*zcut], z_spec[zs_agn*zcut]))
pzs = photoz_pdf[zs_agn*zcut,:]
zs = z_spec[zs_agn*zcut]
agn_crps_vs_z.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
agn_statistics_vs_z = np.array(agn_statistics_vs_z)
Fig, Ax = plt.subplots(3,1, sharex=True, figsize=(5,8))
Ax[0].plot(gal_z_bins, galaxy_statistics_vs_z[:,1], color='steelblue', lw=2, label='Galaxy')
Ax[0].plot(agn_z_bins, agn_statistics_vs_z[:,1], color='firebrick', lw=2, label='AGN')
Ax[0].set_ylabel(r'$\sigma_{\rm{NMAD}}$', size=12)
Ax[1].plot(gal_z_bins, galaxy_statistics_vs_z[:,3], color='steelblue', lw=2, label='Galaxy')
Ax[1].plot(agn_z_bins, agn_statistics_vs_z[:,3], color='firebrick', lw=2, label='AGN')
Ax[1].set_ylabel(r'$\rm{OLF}$')
Ax[2].plot(gal_z_bins, galaxy_crps_vs_z, color='steelblue', lw=2, label='Galaxy')
Ax[2].plot(agn_z_bins, agn_crps_vs_z, color='firebrick', lw=2, label='AGN')
Ax[2].set_ylabel(r'$\overline{\rm{CRPS}}$')
Leg = Ax[2].legend(loc='upper left', frameon=False)
Ax[2].set_xlim([0, z_spec[zs_agn].max()])
Ax[2].set_xlabel('$z_{spec}$', size=12)
Tw = Ax[0].twiny()
Tw.set_xlim([0, z_spec[zs_agn].max()])
Tw.set_xlabel('$z_{spec}$', size=12)
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_stats_vs_z.png', format='png', bbox_inches='tight')
```
#### Quality as a function of optical magnitude
```
zs_gal_mag = np.logical_and(z_spec >= 0, AGN == 0) * (photometry['r_any_mag'] > 0.)
zs_agn_mag = np.logical_and(z_spec >= 0, AGN == 1) * (photometry['r_any_mag'] > 0.)
gal_mag_binedges = np.linspace(*np.percentile(photometry['r_any_mag'][zs_gal_mag],[1,99]), num=9)
agn_mag_binedges = np.linspace(*np.percentile(photometry['r_any_mag'][zs_agn_mag],[1,99]), num=6)
gal_mag_bins = 0.5*(gal_mag_binedges[:-1] + gal_mag_binedges[1:])
agn_mag_bins = 0.5*(agn_mag_binedges[:-1] + agn_mag_binedges[1:])
galaxy_statistics_vs_mag = []
galaxy_crps_vs_mag = []
mag = photometry['r_any_mag']
for i, mmin in enumerate(gal_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < gal_mag_binedges[i+1])
galaxy_statistics_vs_mag.append(calcStats(photoz['z1_median'][zs_gal_mag*mcut], z_spec[zs_gal_mag*mcut]))
pzs = photoz_pdf[zs_gal_mag*mcut,:]
zs = z_spec[zs_gal_mag*mcut]
galaxy_crps_vs_mag.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
galaxy_statistics_vs_mag = np.array(galaxy_statistics_vs_mag)
agn_statistics_vs_mag = []
agn_crps_vs_mag = []
for i, mmin in enumerate(agn_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < agn_mag_binedges[i+1])
agn_statistics_vs_mag.append(calcStats(photoz['z1_median'][zs_agn_mag*mcut], z_spec[zs_agn_mag*mcut]))
pzs = photoz_pdf[zs_agn_mag*mcut,:]
zs = z_spec[zs_agn_mag*mcut]
agn_crps_vs_mag.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
agn_statistics_vs_mag = np.array(agn_statistics_vs_mag)
Fig, Ax = plt.subplots(3,1, sharex=True, figsize=(5,8))
Ax[0].plot(gal_mag_bins, galaxy_statistics_vs_mag[:,1], color='steelblue', lw=2, label='Galaxy')
Ax[0].plot(agn_mag_bins, agn_statistics_vs_mag[:,1], color='firebrick', lw=2, label='AGN')
Ax[0].set_ylabel(r'$\sigma_{\rm{NMAD}}$', size=12)
Ax[1].plot(gal_mag_bins, galaxy_statistics_vs_mag[:,3], color='steelblue', lw=2, label='Galaxy')
Ax[1].plot(agn_mag_bins, agn_statistics_vs_mag[:,3], color='firebrick', lw=2, label='AGN')
Ax[1].set_ylabel(r'$\rm{OLF}$')
Ax[2].plot(gal_mag_bins, galaxy_crps_vs_mag, color='steelblue', lw=2, label='Galaxy')
Ax[2].plot(agn_mag_bins, agn_crps_vs_mag, color='firebrick', lw=2, label='AGN')
Ax[2].set_ylabel(r'$\overline{\rm{CRPS}}$')
Leg = Ax[2].legend(loc='upper left', frameon=False)
#Ax[2].set_xlim([0, z_spec[zs_agn].max()])
Ax[2].set_xlabel('$r$', size=12)
Tw = Ax[0].twiny()
Tw.set_xlim(Ax[2].get_xlim())
Tw.set_xlabel('$r$', size=12)
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_stats_vs_mag.png', format='png', bbox_inches='tight')
```
### Accuracy of the photometric redshift probability distribution
Following Duncan et al. (2017) we calibrate the accuracy of the redshift PDF by scaling the input PDFs to the hierarchical Bayesian combination depending on the optical magnitude of a source in the band chosen for calibration. The diagnostic plot which best illustrates the overall redshift PDF accuracy is the Q-Q plot ($\hat{F}(c)$):
>To quantify the over- or under-confidence of our photometric redshift estimates, we follow the example of Wittman et al. (2016) and calculate the distribution of threshold credible intervals, $c$, where the spectroscopic redshift is just included.
For a set of redshift PDFs which perfectly represent the redshift uncertainty (e.g. 10% of galaxies have the true redshift within the 10% credible interval, 20% within their 20% credible interval, etc.), the expected distribution of $c$ values should be constant between 0 and 1.
The cumulative distribution, $\hat{F}(c)$, should therefore follow a straight 1:1 relation.
Curves which fall below this expected 1:1 relation therefore indicate that there is overconfidence in the photometric redshift errors; the $P(z)$s are too sharp. - _Duncan et al. (2017)_
```
colors = plt.cm.viridis(np.linspace(0, 1, len(gal_mag_bins)))
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=gal_mag_bins.min(), vmax=gal_mag_bins.max())
Fig, Ax = plt.subplots(1)
for i, mmin in enumerate(gal_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < gal_mag_binedges[i+1])
ci, bins = calc_ci_dist(photoz_pdf[zs_gal_mag*mcut,:], photoz_zgrid, z_spec[zs_gal_mag*mcut])
Ax.plot(bins, ci, color=colors[i], lw=2)
cbax = Fig.add_axes([0.26, 1.02, 0.5, 0.05])
CB = mpl.colorbar.ColorbarBase(cbax, cmap=cmap, norm=norm, orientation='horizontal')
CB.set_label('r')
Ax.set_aspect('equal')
Ax.set_ylim([0,1])
Ax.plot([0,1],[0,1], color='0.5', ls='dashed', lw=2)
Ax.set_xlabel(r'$c$', size=12)
Ax.set_ylabel(r'$\hat{F}(c)$', size=12)
Ax.text(0.1, 0.9, 'Galaxies', size=12, verticalalignment='top')
Fig.savefig('plots/dmu24_Lockman-SWIRE_pz_accuracy_gal.png', format='png', bbox_inches='tight')
colors = plt.cm.viridis(np.linspace(0, 1, len(agn_mag_bins)))
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=agn_mag_bins.min(), vmax=agn_mag_bins.max())
Fig, Ax = plt.subplots(1)
for i, mmin in enumerate(agn_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < agn_mag_binedges[i+1])
ci, bins = calc_ci_dist(photoz_pdf[zs_agn_mag*mcut,:], photoz_zgrid, z_spec[zs_agn_mag*mcut])
Ax.plot(bins, ci, color=colors[i], lw=2)
#cbax = mpl.colorbar.make_axes(Ax.axes)
cbax = Fig.add_axes([0.26, 1.02, 0.5, 0.05])
CB = mpl.colorbar.ColorbarBase(cbax, cmap=cmap, norm=norm, orientation='horizontal')
CB.set_label('r')
Ax.set_aspect('equal')
Ax.set_ylim([0,1])
Ax.plot([0,1],[0,1], color='0.5', ls='dashed', lw=2)
Ax.set_xlabel(r'$c$', size=12)
Ax.set_ylabel(r'$\hat{F}(c)$', size=12)
Ax.text(0.1, 0.9, 'AGN', size=12, verticalalignment='top')
Fig.savefig('plots/dmu24_Lockman-SWIRE_pz_accuracy_agn.png', format='png', bbox_inches='tight')
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import properscoring as ps
from astropy.table import Table
from scipy.stats import ks_2samp
import h5py as h
import matplotlib as mpl
import corner
from astropy.convolution import Gaussian1DKernel, convolve
def pz2d_stack(pz, zspec, zgrid, smooth=3):
""" Build a 2D stack of the photometric redshift P(z) predictions
Inputs
------
pz : 2D array, (N x M)
Photo-z PDF array of N sources with M redshift steps
zspec : array, len(N)
Corresponding spectroscopic redshifts for the N sources
zgrid : array, len(M)
Redshift grid on which the redshift PDFs are samples
smooth : int
Number of redshift bins along which to smooth the stacked PDF
"""
kernel = Gaussian1DKernel(smooth)
photoz_stack = np.zeros((len(zgrid), len(zgrid)))
for iz, z in enumerate(zspec):
znearest = np.argmin(np.abs(z - zgrid))
# Axis 0 = zspec, axis 1 = photoz
pdf = pz[iz, :]
pdf[np.isnan(pdf)] = 0.
pdf[np.isinf(pdf)] = 0.
#print('{0} {1}'.format(z, zgrid[znearest]))
photoz_stack[znearest,:] += pdf
photoz_stack /= np.trapz(photoz_stack, zgrid, axis=1)[:, None]
photoz_stack[np.isnan(photoz_stack)] = 0.
photoz_stack_smoothed = np.zeros((len(zgrid), len(zgrid)))
for i in range(len(photoz_zgrid)):
photoz_stack_smoothed[:,i] = convolve(photoz_stack[:,i], kernel)
return photoz_stack_smoothed.T
def calcStats(photoz, specz):
pzgood = (photoz >= 0.)
szgood = (specz >= 0.)
cut = np.logical_and(pzgood, szgood)
pc_pzbad = (len(photoz) - float(pzgood.sum())) / len(photoz)*100.
photoz = photoz[cut]
specz = specz[cut]
dz = photoz - specz
sigma_all = np.sqrt( np.sum((dz/(1+specz))**2) / float(len(dz)))
nmad = 1.48 * np.median( np.abs((dz - np.median(dz)) / (1+specz)))
#nmad = 1.48 * np.median( np.abs(dz) / (1+specz))
bias = np.median(dz/(1+specz))
ol1 = (np.abs(dz)/(1+specz) > 0.2 )
OLF1 = np.sum( ol1 ) / float(len(dz))
sigma_ol1 = np.sqrt( np.sum((dz[np.invert(ol1)]/(1+specz[np.invert(ol1)]))**2) / float(len(dz[np.invert(ol1)])))
ol2 = (np.abs(dz)/(1+specz) > 5*nmad )
OLF2 = np.sum( ol2 ) / float(len(dz))
sigma_ol2 = np.sqrt( np.sum((dz[np.invert(ol2)]/(1+specz[np.invert(ol2)]))**2) / float(len(dz[np.invert(ol2)])))
KSscore = ks_2samp(specz, photoz)[0]
#print('Sigma_all: {0:.3f}'.format(sigma_all))
#print('Sigma_NMAD: {0:.3f}'.format(nmad))
#print('Bias: {0:.3f}'.format(bias))
#print('OLF: Def1 = {0:.3f} Def2 = {1:0.3f}'.format(OLF1, OLF2))
#print('Sigma_OL: Def 1 = {0:.3f} Def2 = {1:0.3f}'.format(sigma_ol1, sigma_ol2))
#print('KS: {0:.3f}'.format(KSscore))
return [sigma_all, nmad, bias, OLF1, sigma_ol1, OLF2, sigma_ol2, KSscore, pc_pzbad]
def calc_HPDciv(pz, zgrid, specz, dz = 0.005):
dz = np.diff(zgrid[:2])
i_zspec = np.argmin((np.abs(specz[:,None] - zgrid[None,:])), axis=1)
pz_s = pz[np.arange(len(i_zspec)), i_zspec]
mask = (pz < pz_s[:, None])
ipz_masked = np.copy(pz)
ipz_masked[mask] *= 0.
CI = np.trapz(ipz_masked, zgrid, axis=1) / np.trapz(pz, zgrid, axis=1)
return CI
def calc_ci_dist(pz, zgrid, specz):
ci_pdf = calc_HPDciv(pz, zgrid, specz)
nbins = 100
hist, bin_edges = np.histogram(ci_pdf, bins=nbins, range=(0,1), normed=True)
cumhist = np.cumsum(hist)/nbins
bin_max = 0.5*(bin_edges[:-1]+bin_edges[1:])
return cumhist, bin_max
photometry = Table.read('master_catalogue_lockman-SWIRE_20170710_processed.fits')
photoz = Table.read('full/photoz_all_merged_r_any.fits')
best = np.abs(np.array([photoz['chi_r_eazy'], photoz['chi_r_atlas'],photoz['chi_r_cosmos']])).min(0)
good = np.logical_and(best != 99., photoz['chi_r_stellar'] > 0.)
z_spec = photometry['z_spec']
AGN = photometry['AGN']
zs_gal = np.logical_and(z_spec >= 0, AGN == 0)
zs_agn = np.logical_and(z_spec >= 0, AGN == 1)
photoz_hdf = h.File('full/pz_all_hb.hdf', mode='r')
#photoz_hdf = h.File('../XMM-LSS/full/pz_all_hb.hdf', mode='r')
photoz_pdf = photoz_hdf['Pz']
photoz_zgrid = photoz_hdf['zgrid'][:]
#photometry = Table.read('../XMM-LSS/xmm-lss_processed.fits')
#photoz = Table.read('../XMM-LSS/full/photoz_all_merged.fits')
Fig, Ax = plt.subplots(1)
Ax.hist(np.log10(photoz['chi_r_eazy'][good]), bins=100, range=(-3,2.5), histtype='step', label='EAZY')
Ax.hist(np.log10(photoz['chi_r_atlas'][good]), bins=100, range=(-3,2.5), histtype='step', label='Atlas')
Ax.hist(np.log10(photoz['chi_r_cosmos'][good]), bins=100, range=(-3,2.5), histtype='step', label='XMM-COSMOS')
#Ax.hist(np.log10(photoz['chi_r_stellar']), bins=100, range=(-3,2.5), histtype='step')
Leg = Ax.legend(loc='upper right', prop={'size':10}, frameon=False)
Ax.set_xlabel(r'$\log_{10}\chi^{2}_{\rm{r}}$')
Ax.set_ylabel('N')
Ax.set_xlim([-2, 2.5])
Fig.savefig('plots/dmu24_Lockman-SWIRE_chi2_distributions.png', format='png', bbox_inches='tight')
Fig, Ax = plt.subplots(3, 2, figsize=(6.5, 10))
mag_lims = [16, 18, 20, 22, 24, 26, 28]
for im in range(len(mag_lims)-1):
ax = Ax.flatten()[im]
ax.axes.set_aspect('equal')
mag = np.logical_and(photometry['r_any_mag'] >= mag_lims[im], photometry['r_any_mag'] < mag_lims[im+1])
fraction = (np.sum(mag*good) / np.sum(good).astype('float'))
corner.hist2d(np.log10(best[good*mag]), np.log10(photoz['chi_r_stellar'])[good*mag], ax=ax, bins=50,
plot_datapoints=True, plot_density=True)
ax.plot([-3,3],[-3,3], zorder=10000, color='orange', lw=2, ls='--')
ax.set_xlim([-2, 3])
ax.set_ylim([-2, 3])
ax.set_title('{0:d} < {1} < {2:d}'.format(mag_lims[im], '$r$',mag_lims[im+1]), size=10)
ax.set_xlabel(r'$\log_{10}\chi^{2}_{\rm{r, Star}}$')
ax.set_ylabel(r'$\log_{10}\chi^{2}_{\rm{r, Gal}}$')
ax.text(2.8, -1.8, '{0:.1f}% of sources'.format(fraction*100),
horizontalalignment='right', verticalalignment='bottom')
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_chi2_galaxy_star_comparison.png', format='png', bbox_inches='tight')
pz2d_gal = pz2d_stack(photoz_pdf[zs_gal,:], z_spec[zs_gal], photoz_zgrid, 5)
pz2d_agn = pz2d_stack(photoz_pdf[zs_agn,:], z_spec[zs_agn], photoz_zgrid, 10)
Fig, Ax = plt.subplots(1, 2, figsize=(8,4))
X, Y = np.meshgrid(photoz_zgrid, photoz_zgrid)
Ax[0].pcolormesh(X, Y, pz2d_gal, cmap='magma_r',
vmin=0, vmax=np.percentile(pz2d_gal, 99.9))
# extent=[photoz_zgrid.min(), photoz_zgrid.max(),
# photoz_zgrid.min(), photoz_zgrid.max()],
Ax[1].pcolormesh(X, Y, pz2d_agn, cmap='magma_r',
vmin=0, vmax=np.percentile(pz2d_agn, 99.9))
Ax[0].set_title('Galaxies')
Ax[1].set_title('AGN')
Ax[0].set_xlim([0, np.ceil(z_spec[zs_gal].max())])
Ax[0].set_ylim([0, np.ceil(z_spec[zs_gal].max())])
Ax[1].set_xlim([0, np.ceil(z_spec[zs_agn].max())])
Ax[1].set_ylim([0, np.ceil(z_spec[zs_agn].max())])
for ax in Ax:
ax.set_xlabel('$z_{spec}$', size=12)
ax.set_ylabel('$z_{phot}$', size=12)
ax.plot(photoz_zgrid, photoz_zgrid, color='0.5', lw=2)
ax.plot(photoz_zgrid, (1+0.15)*photoz_zgrid, color='0.5', ls='--')
ax.plot(photoz_zgrid, (1-0.15)*photoz_zgrid, color='0.5', ls='--')
Fig.savefig('plots/dmu24_Lockman-SWIRE_specz_vs_photz_pz.png', format='png', bbox_inches='tight')
gal_z_binedges = np.linspace(0, np.ceil(np.percentile(z_spec[zs_gal],99)), 9)
agn_z_binedges = np.linspace(0, np.ceil(np.percentile(z_spec[zs_agn],99)), 9)
gal_z_bins = 0.5*(gal_z_binedges[:-1] + gal_z_binedges[1:])
agn_z_bins = 0.5*(agn_z_binedges[:-1] + agn_z_binedges[1:])
galaxy_statistics_vs_z = []
galaxy_crps_vs_z = []
for i, zmin in enumerate(gal_z_binedges[:-1]):
zcut = np.logical_and(z_spec >= zmin, z_spec < gal_z_binedges[i+1])
galaxy_statistics_vs_z.append(calcStats(photoz['z1_median'][zs_gal*zcut], z_spec[zs_gal*zcut]))
pzs = photoz_pdf[zs_gal*zcut,:]
zs = z_spec[zs_gal*zcut]
galaxy_crps_vs_z.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
galaxy_statistics_vs_z = np.array(galaxy_statistics_vs_z)
agn_statistics_vs_z = []
agn_crps_vs_z = []
for i, zmin in enumerate(agn_z_binedges[:-1]):
zcut = np.logical_and(z_spec >= zmin, z_spec < agn_z_binedges[i+1])
agn_statistics_vs_z.append(calcStats(photoz['z1_median'][zs_agn*zcut], z_spec[zs_agn*zcut]))
pzs = photoz_pdf[zs_agn*zcut,:]
zs = z_spec[zs_agn*zcut]
agn_crps_vs_z.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
agn_statistics_vs_z = np.array(agn_statistics_vs_z)
Fig, Ax = plt.subplots(3,1, sharex=True, figsize=(5,8))
Ax[0].plot(gal_z_bins, galaxy_statistics_vs_z[:,1], color='steelblue', lw=2, label='Galaxy')
Ax[0].plot(agn_z_bins, agn_statistics_vs_z[:,1], color='firebrick', lw=2, label='AGN')
Ax[0].set_ylabel(r'$\sigma_{\rm{NMAD}}$', size=12)
Ax[1].plot(gal_z_bins, galaxy_statistics_vs_z[:,3], color='steelblue', lw=2, label='Galaxy')
Ax[1].plot(agn_z_bins, agn_statistics_vs_z[:,3], color='firebrick', lw=2, label='AGN')
Ax[1].set_ylabel(r'$\rm{OLF}$')
Ax[2].plot(gal_z_bins, galaxy_crps_vs_z, color='steelblue', lw=2, label='Galaxy')
Ax[2].plot(agn_z_bins, agn_crps_vs_z, color='firebrick', lw=2, label='AGN')
Ax[2].set_ylabel(r'$\overline{\rm{CRPS}}$')
Leg = Ax[2].legend(loc='upper left', frameon=False)
Ax[2].set_xlim([0, z_spec[zs_agn].max()])
Ax[2].set_xlabel('$z_{spec}$', size=12)
Tw = Ax[0].twiny()
Tw.set_xlim([0, z_spec[zs_agn].max()])
Tw.set_xlabel('$z_{spec}$', size=12)
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_stats_vs_z.png', format='png', bbox_inches='tight')
zs_gal_mag = np.logical_and(z_spec >= 0, AGN == 0) * (photometry['r_any_mag'] > 0.)
zs_agn_mag = np.logical_and(z_spec >= 0, AGN == 1) * (photometry['r_any_mag'] > 0.)
gal_mag_binedges = np.linspace(*np.percentile(photometry['r_any_mag'][zs_gal_mag],[1,99]), num=9)
agn_mag_binedges = np.linspace(*np.percentile(photometry['r_any_mag'][zs_agn_mag],[1,99]), num=6)
gal_mag_bins = 0.5*(gal_mag_binedges[:-1] + gal_mag_binedges[1:])
agn_mag_bins = 0.5*(agn_mag_binedges[:-1] + agn_mag_binedges[1:])
galaxy_statistics_vs_mag = []
galaxy_crps_vs_mag = []
mag = photometry['r_any_mag']
for i, mmin in enumerate(gal_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < gal_mag_binedges[i+1])
galaxy_statistics_vs_mag.append(calcStats(photoz['z1_median'][zs_gal_mag*mcut], z_spec[zs_gal_mag*mcut]))
pzs = photoz_pdf[zs_gal_mag*mcut,:]
zs = z_spec[zs_gal_mag*mcut]
galaxy_crps_vs_mag.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
galaxy_statistics_vs_mag = np.array(galaxy_statistics_vs_mag)
agn_statistics_vs_mag = []
agn_crps_vs_mag = []
for i, mmin in enumerate(agn_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < agn_mag_binedges[i+1])
agn_statistics_vs_mag.append(calcStats(photoz['z1_median'][zs_agn_mag*mcut], z_spec[zs_agn_mag*mcut]))
pzs = photoz_pdf[zs_agn_mag*mcut,:]
zs = z_spec[zs_agn_mag*mcut]
agn_crps_vs_mag.append(np.nanmean([ps.crps_ensemble(zs[i],
forecasts=photoz_zgrid,
weights=pzs[i]) for i in range(len(zs))]))
agn_statistics_vs_mag = np.array(agn_statistics_vs_mag)
Fig, Ax = plt.subplots(3,1, sharex=True, figsize=(5,8))
Ax[0].plot(gal_mag_bins, galaxy_statistics_vs_mag[:,1], color='steelblue', lw=2, label='Galaxy')
Ax[0].plot(agn_mag_bins, agn_statistics_vs_mag[:,1], color='firebrick', lw=2, label='AGN')
Ax[0].set_ylabel(r'$\sigma_{\rm{NMAD}}$', size=12)
Ax[1].plot(gal_mag_bins, galaxy_statistics_vs_mag[:,3], color='steelblue', lw=2, label='Galaxy')
Ax[1].plot(agn_mag_bins, agn_statistics_vs_mag[:,3], color='firebrick', lw=2, label='AGN')
Ax[1].set_ylabel(r'$\rm{OLF}$')
Ax[2].plot(gal_mag_bins, galaxy_crps_vs_mag, color='steelblue', lw=2, label='Galaxy')
Ax[2].plot(agn_mag_bins, agn_crps_vs_mag, color='firebrick', lw=2, label='AGN')
Ax[2].set_ylabel(r'$\overline{\rm{CRPS}}$')
Leg = Ax[2].legend(loc='upper left', frameon=False)
#Ax[2].set_xlim([0, z_spec[zs_agn].max()])
Ax[2].set_xlabel('$r$', size=12)
Tw = Ax[0].twiny()
Tw.set_xlim(Ax[2].get_xlim())
Tw.set_xlabel('$r$', size=12)
Fig.tight_layout()
Fig.savefig('plots/dmu24_Lockman-SWIRE_stats_vs_mag.png', format='png', bbox_inches='tight')
colors = plt.cm.viridis(np.linspace(0, 1, len(gal_mag_bins)))
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=gal_mag_bins.min(), vmax=gal_mag_bins.max())
Fig, Ax = plt.subplots(1)
for i, mmin in enumerate(gal_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < gal_mag_binedges[i+1])
ci, bins = calc_ci_dist(photoz_pdf[zs_gal_mag*mcut,:], photoz_zgrid, z_spec[zs_gal_mag*mcut])
Ax.plot(bins, ci, color=colors[i], lw=2)
cbax = Fig.add_axes([0.26, 1.02, 0.5, 0.05])
CB = mpl.colorbar.ColorbarBase(cbax, cmap=cmap, norm=norm, orientation='horizontal')
CB.set_label('r')
Ax.set_aspect('equal')
Ax.set_ylim([0,1])
Ax.plot([0,1],[0,1], color='0.5', ls='dashed', lw=2)
Ax.set_xlabel(r'$c$', size=12)
Ax.set_ylabel(r'$\hat{F}(c)$', size=12)
Ax.text(0.1, 0.9, 'Galaxies', size=12, verticalalignment='top')
Fig.savefig('plots/dmu24_Lockman-SWIRE_pz_accuracy_gal.png', format='png', bbox_inches='tight')
colors = plt.cm.viridis(np.linspace(0, 1, len(agn_mag_bins)))
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(vmin=agn_mag_bins.min(), vmax=agn_mag_bins.max())
Fig, Ax = plt.subplots(1)
for i, mmin in enumerate(agn_mag_binedges[:-1]):
mcut = np.logical_and(mag >= mmin, mag < agn_mag_binedges[i+1])
ci, bins = calc_ci_dist(photoz_pdf[zs_agn_mag*mcut,:], photoz_zgrid, z_spec[zs_agn_mag*mcut])
Ax.plot(bins, ci, color=colors[i], lw=2)
#cbax = mpl.colorbar.make_axes(Ax.axes)
cbax = Fig.add_axes([0.26, 1.02, 0.5, 0.05])
CB = mpl.colorbar.ColorbarBase(cbax, cmap=cmap, norm=norm, orientation='horizontal')
CB.set_label('r')
Ax.set_aspect('equal')
Ax.set_ylim([0,1])
Ax.plot([0,1],[0,1], color='0.5', ls='dashed', lw=2)
Ax.set_xlabel(r'$c$', size=12)
Ax.set_ylabel(r'$\hat{F}(c)$', size=12)
Ax.text(0.1, 0.9, 'AGN', size=12, verticalalignment='top')
Fig.savefig('plots/dmu24_Lockman-SWIRE_pz_accuracy_agn.png', format='png', bbox_inches='tight')
| 0.271735 | 0.713294 |
# DanQ CNN-LSTM Experiments
Based on the DanQ neural network for DNA classification (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4914104/), these are a series of experiments designed to test what is the optimum architecture for our given problem of DNA promoter classification given limited data, while still remaining relatively true to the general concept of the DanQ CNN-LSTM. Parameters we examine are stride of pooling, convolution kernel size, type of convolution used, and number of convolutional layers.
```
# imports
from keras.layers import Conv2D, BatchNormalization, AveragePooling2D, Dense, Dropout, Activation, AveragePooling1D, Bidirectional, MaxPooling2D, GaussianNoise
from keras.layers import Input, Concatenate, Flatten, Embedding, CuDNNLSTM, Conv1D, MaxPooling1D, LSTM, StackedRNNCells, LSTMCell, Reshape, TimeDistributed, SeparableConv1D
from keras.layers import RepeatVector, Permute, merge, multiply, GlobalMaxPooling1D, Lambda, BatchNormalization, GlobalAveragePooling1D
from keras.layers.merge import Multiply
from keras.models import Model, load_model
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras import backend as K
import numpy as np
import os
K.clear_session()
del model
# our data
start_target_size = (672, 4)
batch_size = 16
x_train = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/x_train.npy')
y_train = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/y_train.npy')
# Basic 1D Conv CNN LSTM
# ala DanQ
# this is our base case
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(512, kernel_size=26, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=13, strides=13)(x)
x = Dropout(0.2)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
```
# Notes about kernel size and pool size
It doesn't necessarily make sense to be using such large kernel and pool sizes. Most transcription factors bind to DNA sites that are between 8 and 16 nucleotides long. Even these sites tend to have smaller pockets within them where the transcription factor actually binds with a high affinity. At the risk of having more parameters, we can test whether a finer grained architecture helps.
```
# similar to DanQ as above
# but with different pool and kernel sizes
# and dropout schema
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/smallkerneldanq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with different pool and kernel sizes
# and dropout schema
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=8, strides=2)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/smallstridedanq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with two conv filters in front
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x = Concatenate()([x1, x2])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
model.summary()
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/dualcnnconcat_danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# dual 1Dconv-LSTM, second conv is depthwise-first
# concat prior to LSTM
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = SeparableConv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x = Concatenate()([x1, x2])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/dualseparable_cnnlstm_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with two conv filters in front
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x3 = Concatenate()([x1, x2])
x3 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x3)
x = Concatenate()([x1, x2, x3])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
model.summary()
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/triplecnnconcat_danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
del model
K.clear_session()
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
x_test = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/x_test.npy')
y_test = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/y_test.npy')
model_list = ['D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/danq_weights/weights-28-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/smallkerneldanq_weights/weights-29-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/smallstridedanq_weights/weights-30-0.51.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/dualcnnconcat_danq_weights/weights-28-0.51.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/triplecnnconcat_danq_weights\weights-28-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/dualseparable_cnnlstm_weights\weights-28-0.52.hdf5'
]
label_list = ['DanQ', 'Small Kernel', 'Small Stride', 'Dual Conv Stem', 'Triple Conv Stem', 'Dual Separable Conv Stem']
roc_list = []
for path in model_list:
model = load_model(path)
y_pred = model.predict(x_test)
auc = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_list.append([fpr, tpr, auc])
K.clear_session()
del model
palette = sns.color_palette("cubehelix", len(roc_list))
#plot roc curve
for i in range(len(roc_list)):
plt.plot(roc_list[i][0],
roc_list[i][1],
color=palette[i],
label='{0} (AUC = {1:.3f})'.format(label_list[i], roc_list[i][2]))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Vanilla CNN-LSTMs')
plt.legend(loc="lower right")
plt.savefig('d:/projects/isynpro/SyntheticPromoter/readme_figures/cnnlstm_roc.png', bbox_inches = 'tight')
plt.show()
```
|
github_jupyter
|
# imports
from keras.layers import Conv2D, BatchNormalization, AveragePooling2D, Dense, Dropout, Activation, AveragePooling1D, Bidirectional, MaxPooling2D, GaussianNoise
from keras.layers import Input, Concatenate, Flatten, Embedding, CuDNNLSTM, Conv1D, MaxPooling1D, LSTM, StackedRNNCells, LSTMCell, Reshape, TimeDistributed, SeparableConv1D
from keras.layers import RepeatVector, Permute, merge, multiply, GlobalMaxPooling1D, Lambda, BatchNormalization, GlobalAveragePooling1D
from keras.layers.merge import Multiply
from keras.models import Model, load_model
from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger, LearningRateScheduler
from keras import backend as K
import numpy as np
import os
K.clear_session()
del model
# our data
start_target_size = (672, 4)
batch_size = 16
x_train = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/x_train.npy')
y_train = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/y_train.npy')
# Basic 1D Conv CNN LSTM
# ala DanQ
# this is our base case
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(512, kernel_size=26, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=13, strides=13)(x)
x = Dropout(0.2)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with different pool and kernel sizes
# and dropout schema
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/smallkerneldanq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with different pool and kernel sizes
# and dropout schema
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=8, strides=2)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/smallstridedanq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with two conv filters in front
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x = Concatenate()([x1, x2])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
model.summary()
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/dualcnnconcat_danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# dual 1Dconv-LSTM, second conv is depthwise-first
# concat prior to LSTM
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = SeparableConv1D(256, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x = Concatenate()([x1, x2])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(256, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/dualseparable_cnnlstm_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
# similar to DanQ as above
# but with two conv filters in front
# build our model
inputs = Input(shape=start_target_size)
x = GaussianNoise(0.3)(inputs)
x1 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x)
x2 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x1)
x3 = Concatenate()([x1, x2])
x3 = Conv1D(128, kernel_size=16, strides=1, padding='same', activation='relu')(x3)
x = Concatenate()([x1, x2, x3])
x = MaxPooling1D(pool_size=8, strides=8)(x)
x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x)
x = Flatten()(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='binary_crossentropy',
optimizer= SGD(lr=1e-3, momentum=0.9),
metrics=['binary_accuracy'])
model.summary()
# save path, callbacks
save_path = 'D:/Projects/Github/SyntheticPromoter/DanQCNNLSTM/triplecnnconcat_danq_weights'
lr_descent = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=5,
verbose=1,
mode='auto',
epsilon=0.0001,
cooldown=1,
min_lr=1e-6)
save_model = ModelCheckpoint(os.path.join(save_path, 'weights-{epoch:02d}-{val_loss:.2f}.hdf5'),
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(os.path.join(save_path, 'training_history.csv'), separator=',', append=False)
# train model
model.fit(x_train,
y_train,
batch_size=16,
epochs=30,
shuffle=True,
verbose=2,
validation_split=0.1,
callbacks = [save_model, csv_logger])
del model
K.clear_session()
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score, roc_curve
x_test = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/x_test.npy')
y_test = np.load('D:/Projects/iSynPro/iSynPro/DanQCNNLSTM/y_test.npy')
model_list = ['D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/danq_weights/weights-28-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/smallkerneldanq_weights/weights-29-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/smallstridedanq_weights/weights-30-0.51.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/dualcnnconcat_danq_weights/weights-28-0.51.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/triplecnnconcat_danq_weights\weights-28-0.52.hdf5',
'D:/Projects/iSynpro/SyntheticPromoter/DanQCNNLSTM/dualseparable_cnnlstm_weights\weights-28-0.52.hdf5'
]
label_list = ['DanQ', 'Small Kernel', 'Small Stride', 'Dual Conv Stem', 'Triple Conv Stem', 'Dual Separable Conv Stem']
roc_list = []
for path in model_list:
model = load_model(path)
y_pred = model.predict(x_test)
auc = roc_auc_score(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_list.append([fpr, tpr, auc])
K.clear_session()
del model
palette = sns.color_palette("cubehelix", len(roc_list))
#plot roc curve
for i in range(len(roc_list)):
plt.plot(roc_list[i][0],
roc_list[i][1],
color=palette[i],
label='{0} (AUC = {1:.3f})'.format(label_list[i], roc_list[i][2]))
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Vanilla CNN-LSTMs')
plt.legend(loc="lower right")
plt.savefig('d:/projects/isynpro/SyntheticPromoter/readme_figures/cnnlstm_roc.png', bbox_inches = 'tight')
plt.show()
| 0.721939 | 0.874131 |
```
from __future__ import absolute_import, division, print_function
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(units=128, activation=tf.nn.relu),
keras.layers.Dense(units=10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
```
|
github_jupyter
|
from __future__ import absolute_import, division, print_function
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(units=128, activation=tf.nn.relu),
keras.layers.Dense(units=10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
| 0.821975 | 0.802517 |
<a href="https://colab.research.google.com/github/DeathLilly/2021_22-Ratza-Intro-CS-Sem-2/blob/main/Sprint-1-Python-Basics/01_python_basics_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Key Terms
Complete the following questions to solidify you're understanding of the key concepts. Feel free to use the lecture notebook or any other resources to help you, but do not copy and paste answers from the internet instead use your own words to present your understanding.
----
What is a general use programming language?
A programing languge that can be used for many things and is not spect spisifcly for one task
Who invented Python and when?
Guido van Rossum, Feb 1991
What is the difference between front-end and back-end?
Back end is the hidden stuff that makes every thing run in the backround fron end is the stuff to make it useable and pretty
What is a GUI?
Grafical use interface
What is an API?
Aplaction programing interface
What is Open Source Software?
Software useable and avealbe for every one to save see and edit
What is a development environment?
The enviorment ware you edit and see your code
What is meant by local and remote in the context of computers?
Local is on your diveice remote is server wide or acsessing another meshine
What is an operating system?
An opratieing system is the system and base code used to run a mashene known oprateing systems are, Lynnex, Windows, IOS, And more
What is a kernel?
Is a peace of code that reside in the core of oprateing systems that has coplate controle
What is a shell?
What shell do we have access to in Colab notebooks?
A shell is the pretty part that is exposed to us from an oprateing system
What is an interpreter?
A program to write out instructions with out them being prefesly compliled with in your mashine
What is a value in programming?
Yeah idk how to explain a vaule in words i know how to spell so yeah- i know what a value is tho
What is an expression in programming?
A forme of code
What is syntax?
the termnolgy
What do we call the process of discovering and resolving errors?
De bugging
### Code
Let's revisit some of the things we practiced in the lecture. In the code cell below print your name to the console without first declaring it as a variable.
```
Name1="Ren"
```
Now declare your first name and last name as separate variables and combine them in the print statement.
```
Name1="Ren"
Name2="Primeau"
```
In the cell below run the "Zen of Python" easter egg.
```
```
### Explore
This portion of the assignment contains things we didn't explicitly cover in the lecture, instead encouraging you to explore and experiment on your own to discover some of the different operators and expressions in Python. For each expression first describe what you expect to happen before running the code cell.
Documentation for Python's numeric operators can be found [here](https://docs.python.org/3.10/library/stdtypes.html#numeric-types-int-float-complex)
#### `5 + 2 * 2`
What do you expect to happen?
2*2=4 5+4=9
```
5+2*2
```
#### `2 / 3`
What do you expect to happen?
Math-
```
2/3
```
#### `2.5 * 10`
What do you expect to happen?
More maths
```
2.5*10
```
#### `a`
What do you expect to happen?
IDk
```
```
#### `'a'`
What do you expect to happen?
```
'a'
```
#### `521 // 5`
What do you expect to happen?
```
521//5
```
|
github_jupyter
|
Name1="Ren"
Name1="Ren"
Name2="Primeau"
```
### Explore
This portion of the assignment contains things we didn't explicitly cover in the lecture, instead encouraging you to explore and experiment on your own to discover some of the different operators and expressions in Python. For each expression first describe what you expect to happen before running the code cell.
Documentation for Python's numeric operators can be found [here](https://docs.python.org/3.10/library/stdtypes.html#numeric-types-int-float-complex)
#### `5 + 2 * 2`
What do you expect to happen?
2*2=4 5+4=9
#### `2 / 3`
What do you expect to happen?
Math-
#### `2.5 * 10`
What do you expect to happen?
More maths
#### `a`
What do you expect to happen?
IDk
#### `'a'`
What do you expect to happen?
#### `521 // 5`
What do you expect to happen?
| 0.601711 | 0.958538 |
<a href="https://colab.research.google.com/github/carvalheirafc/imd0033_2018_2/blob/master/aula22/Lesson_22.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# 1.0 Comparing Frequency Distributions
## 1.1 Comparing Frequency Distributions
In the previous mission, we learned **what graphs we can use to visualize the frequency distribution of any kind of variable**. In this mission, we'll learn about the graphs we can use to **compare** multiple frequency distributions at once.
We'll continue to work with the WNBA data set. Below are the first five rows to help you recollect its structure:
| _ | Name | Team | Pos | Height | Weight | BMI | Birth_Place | Birthdate | Exp_ordinal |
|---|-----------------|------|-----|--------|--------|-----------|-------------|-------------------|-------------------|
| 0 | Aerial Powers | DAL | F | 183 | 71.0 | 21.200991 | US | January 17, 1994 | Little experience |
| 1 | Alana Beard | LA | G/F | 185 | 73.0 | 21.329438 | US | May 14, 1982 | Veteran |
| 2 | Alex Bentley | CON | G | 170 | 69.0 | 23.875433 | US | October 27, 1990 | Experienced |
| 3 | Alex Montgomery | SAN | G/F | 185 | 84.0 | 24.543462 | US | December 11, 1988 | Very experienced |
| 4 | Alexis Jones | MIN | G | 175 | 78.0 | 25.469388 | US | August 5, 1994 | Rookie |
Notice in the table above that we've kept the **Exp_ordinal** variable we created in the previous mission. To remind you, this variable is measured on an **ordinal scale** and describes the level of experience of a player according to the following labeling convention:
| Years in WNBA | Label |
|---------------|-------------------|
| 0 | Rookie |
| 1-3 | Little experience |
| 4-5 | Experienced |
| 5-10 | Very experienced |
| >10 | Veteran |
```
import pandas as pd
# read the dataset
wnba = pd.read_csv("wnba.csv")
# cleaning the experience column
wnba.loc[wnba.Experience == 'R',"Experience"] = 0
wnba["Experience"] = wnba["Experience"].astype(int)
# create exp_ordinal column
wnba["Exp_ordinal"] = pd.cut(wnba.Experience,
bins=[-1,0,3,5,10,100],
labels=["Rookie","Little experienced","Experienced",
"Very experienced","Veteran"])
# verify the results
wnba.Exp_ordinal.value_counts()
```
Let's say we're interested in analyzing how the distribution of the **Pos** variable (**player position**) varies with the level of experience. In other words, we want to determine, for instance, what are the positions on the court that rookies play most as, and how do rookies compare to veterans with respect to positions on the field.
Here's a series of steps we can take to achieve that:
- Segment the players in the data set by level of experience.
- For each segment, generate a frequency distribution table for the **Pos** variable.
- Analyze the frequency distributions comparatively.
In the cell below, we've already done the first step for you and segmented the players in the data set by level of experience. The next two steps are left for you as an exercise.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- For each segment, generate a frequency distribution table for the **Pos** variable.
- For the **rookie** segment, assign the frequency distribution table to a variable named **rookie_distro**. tip: use df.value_counts() function.
- For the **Little experience** segment, assign the table to **little_xp_distro.**
- For the **Experienced** segment, assign the table to **experienced_distro.**
- For the **Very experienced** segment, assign the table to **very_xp_distro**.
- For the **Veteran** segment, assign the table to **veteran_distro.**
- Print all the tables and analyze them comparatively to determine whether there are any clear patterns in the distribution of player position depending on the level of experience.
```
rookies = wnba[wnba['Exp_ordinal'] == 'Rookie']
little_xp = wnba[wnba['Exp_ordinal'] == 'Little experience']
experienced = wnba[wnba['Exp_ordinal'] == 'Experienced']
very_xp = wnba[wnba['Exp_ordinal'] == 'Very experienced']
veterans = wnba[wnba['Exp_ordinal'] == 'Veteran']
rookie_distro = rookies['Pos'].value_counts()
rookie_distro.plot.barh(title='Rookies')
little_xp_distro = little_xp['Pos'].value_counts()
little_xp_distro.plot.barh(title='Little XP')
experienced_distro = experienced['Pos'].value_counts()
experienced_distro.plot.barh(title='Experienced')
very_xp_distro = very_xp['Pos'].value_counts()
very_xp_distro.plot.barh(title='Very XP')
veterans_distro = veterans['Pos'].value_counts()
veterans_distro.plot.barh(title='Veterans')
```
## 1.2 Grouped Bar Plots
The purpose of the previous exercise was to give us a sense about how cumbersome really is to compare multiple distributions at once using frequency tables. Fortunately, we can make the comparison much quicker and more efficiently using graphs.
All the five frequency tables we wanted to compare were for the **Pos** variable, which is measured on a nominal scale. Remember that one kind of graph we can use to visualize the distribution of a nominal variable is a bar plot. A simple solution to our problem is to generate a bar plot for each table, and then group all the bar plots on a single figure.
This is where we'd like to arrive:
<img width="600" src="https://drive.google.com/uc?export=view&id=1xO1mMfvHCMhgglqAI0FrCBE-sE9gt9PZ">
Because we grouped all the bar plots together, the graph above is called a **grouped bar plot**. We can generate a grouped bar plot just like the one above using the [seaborn.countplot()](https://seaborn.pydata.org/generated/seaborn.countplot.html) function from the seaborn module, which you might already be familiar with from our visualization lessons. In the code snippet below, we will:
- Import the **seaborn** module with the alias **sns**.
- Generate the plot with **sns.countplot()**. We'll use the following parameters for this function:
- **x** — specifies as a string the name of the column we want on the x-axis. We'll place the **Exp_ordinal** column on the x-axis.
- **hue** — specifies as a string the name of the column we want the bar plots generated for. We want to generate the bar plots for the **Pos** column.
- **data** - specifies the name of the variable which stores the data set. We stored the data in a variable named **wnba**.
```
import seaborn as sns
sns.countplot(x = 'Exp_ordinal', hue = 'Pos', data = wnba)
```
Comparing the **five distributions** is now easier, and we can make a couple of observations:
- There's only one **rookie** playing on a combined position **(F/C)**. This is significantly less compared to more experienced players, which suggests that combined positions (**F/C** and **G/F**) may require more complex skills on the field that rookies rarely have.
- Rookies are the only category where we don't find players on all positions. We can see there are no rookies who play on a G/F position.
- Guards predominate for every level of experience. This probably means that most players in a basketball team are guards. It's worth examining the distributions of a couple of teams to find whether this is true. If it's true, it might be interesting to find out why teams need so many guards.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Usng **sns.countplot()**, generate a grouped bar plot similar to the one above.
- Place the **Exp_ordinal** variable on the x-axis.
- Generate the bar plots for the **Pos** variable. The data set is stored in **wnba** variable.
- Using the **order** parameter of **sns.countplot()**, order the values on the x-axis in **descending** order. The **order** parameter takes in a list of strings, so you should use **order = ['Veteran', 'Very experienced', ..........]**.
- Using the **hue_order** parameter, order the bars of each bar plot in ascending alphabetic order. **hue_order** takes in a list of strings, so you can use **hue_order = ['C', 'F', ......].**
```
import seaborn as sns
sns.countplot(x='Exp_ordinal', hue='Pos', data=wnba,
order=['Veteran',
'Very experienced',
'Experienced',
'Little experienced',
'Rookie'],
hue_order=['C', 'F', 'G', 'C/F', 'G/F'])
```
## 1.3 Challenge: Do Older Players Play Less?
When players get past a certain age, they become less and less physically fit as they get older. Intuitively, the fitness level of a player should directly affect how much she plays in a season. On average, a WNBA player played approximately 497 minutes in the 2016-2017 season:
```
wnba['MIN'].mean()
```
Let's hypothesize that older players generally play less than this average of 497 minutes, while younger players generally play more. As a benchmark to distinguish between younger and older players, we'll take the mean age of players in our sample, which is approximately 27:
```
wnba['Age'].mean()
```
To test our hypothesis, we can generate a grouped bar plot to examine the frequency distribution of younger and older players that played under the average or as much as the average or above. Our hypothesis predicts that we should see a grouped bar plot that looks similar to this:
<img width="600" src="https://drive.google.com/uc?export=view&id=11S_m6RQAGChN_iOy7mS1qoEjCOWSEBDV">
To generate a graph like the one above, we'll first need to create two new variables:
- An ordinal variable which labels each player as "young" or "old". If the player is 27 or over, we'll label her "old", otherwise the label is "young".
- An ordinal variable which describes whether the minutes played is below or above average (or equal to the average). If a player played 497 minutes or more, we'll assign her the label "average or above", otherwise we'll assign "below average".
In the code below, we'll use **lambda** functions to describe quickly the labeling logic above and **Series.apply()** to apply the **lambda** functions on the **Age** and **MIN** columns. We'll name the two resulting columns **age_mean_relative** and **min_mean_relative**.
```
wnba['age_mean_relative'] = wnba['Age'].apply(lambda x: 'old' if x >= 27 else 'young')
wnba['min_mean_relative'] = wnba['MIN'].apply(lambda x: 'average or above' if x >= 497 else
'below average')
cols = ["Name","Age","age_mean_relative","MIN","min_mean_relative"]
wnba[cols].head()
```
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Generate a grouped bar plot to confirm or reject our hypothesis. Using **sns.countplot()**:
- Place the **age_mean_relative** variable on the x-axis. The **age_mean_relative** and **min_mean_relative** are already defined.
- Generate the frequency distributions for the **min_mean_relative variable.**
- Analyze the graph and determine whether the data confirms or rejects our hypothesis. If it's a confirmation assign the string **'confirmation'** to a variable named **result**. If it's a rejection, assign the string **'rejection'** to the variable **result.**
```
import seaborn as sns
sns.countplot(x='age_mean_relative', hue='min_mean_relative', data=wnba)
result = 'rejection'
```
## 1.4 Comparing Histograms
Contrary to what our hypothesis predicted, the grouped bar plot we built showed that among old players the "average or above" category is the most numerous. Among young players we saw an opposite pattern: there are more players who played below the average number of minutes.
A shortcoming of our analysis so far is that the **min_mean_relative** variable doesn't show much granularity. We can see that more **old players** belong to the **"average or above"** category than to **"below average"**, but we can't tell, for instance, whether **old players** generally play much more than the average. For all we know, they could have all played exactly 497 minutes (which is the average).
The **min_mean_relative** variable is ordinal, and it was derived from the **MIN** variable, which is measured on a ratio scale. The information provided by the **MIN** variable is much more granular, and we can plot the distribution of this variable instead. Because the **MIN** variable is measured on a ratio scale, we'll need to use histograms instead of bar plots.
The easiest way to **compare two histograms** is to superimpose one on top of the other. We can do that by using the pandas visualization methods we learned in the previous mission:
```
wnba[wnba.Age >= 27]['MIN'].plot.hist(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.hist(label = 'Young', legend = True)
```
We can now see that most of the **old players** that belong to the **"average or above"** category play significantly more than average. The main downside of the visualization above is that the histogram for **young players** covers a large part of the other histogram. We can fix this easily by plotting only the shape of the histograms. We can do this using the **histtype** parameter and choose the **'step'** type:
```
sns.set_style("white")
wnba[wnba.Age >= 27]['MIN'].plot.hist(histtype = 'step', label = 'Old', legend = True,color="red")
wnba[wnba.Age < 27]['MIN'].plot.hist(histtype = 'step', label = 'Young', legend = True,color="blue")
```
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Looking on our graph above, it's not easy to visualize where the average number of minutes is. Using the [plt.axvline()](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.axvline.html) function, add a vertical line to demarcate the average point:
- The vertical line should be at point 497 on the x-axis.
- Use the label parameter of **plt.axvline()** to label it **'Average'**. Display the label by running **plt.legend()**.
```
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
sns.set_style("white")
wnba[wnba.Age >= 27]['MIN'].plot.hist(histtype = 'step', label = 'Old', legend = True,color="red")
wnba[wnba.Age < 27]['MIN'].plot.hist(histtype = 'step', label = 'Young', legend = True,color="blue")
ax.axvline(wnba['MIN'].mean())
```
## 1.5 Kernel Density Estimate Plots
The step-type histograms we built made it possible to see clearly both distributions. The graph looked a bit overcrowded though, and the legend was not ideally positioned.
<img width="500" src="https://drive.google.com/uc?export=view&id=1Oogmu0kyhTgtK-N1zqsdzYGisTM2D6bE">
If we added more histograms to the graph above, it would become highly unreadable, and it'd be difficult to see any clear patterns. One solution to this problem is to smooth out the shape of the histograms to make them look less dense on the graph. This is how a single histogram would look smoothed out:
<img width="500" src="https://drive.google.com/uc?export=view&id=1us30ptyKArBL7GvemVRzYvBlGOD3KuMR">
We can smooth out our two histograms above for old and young players using the [Series.plot.kde()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.plot.kde.html) function:
```
wnba[wnba.Age >= 27]['MIN'].plot.kde(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.kde(label = 'Young', legend = True)
```
Each of the smoothed histograms above is called a **kernel density estimate** plot or, shorter, **kernel density plot**. Unlike histograms, **kernel density** plots display densities on the y-axis instead of frequencies. The density values are actually probability values — which we'll be able to understand more about after the probability courses. All you need to know for now is that we can use kernel density plots to get a much clear picture about the shape of a distribution.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Reproduce the kernel density plots above, and add a vertical line to demarcate the average point.
- The vertical line should be at point 497 on the x-axis.
- Label the vertical line **'Average'** and make sure the label is displayed in the legend.
- Can we still see that most of the old players that belong to the **"average or above"** category play significantly more than average? If so, is the pattern more obvious (faster to observe) than in the case of the step-type histograms?
```
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
wnba[wnba.Age >= 27]['MIN'].plot.kde(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.kde(label = 'Young', legend = True)
ax.axvline(wnba['MIN'].mean())
```
## 1.6 Drawbacks of Kernel Density Plots
As data scientists, we'll often need to compare more than two distributions. In fact, previously in this mission we compared five distributions on a grouped bar plot:
<img width="400" src="https://drive.google.com/uc?export=view&id=1nSTEDf8EAAE8fQSqxs5dwrlqoU7PF9Gx">
Grouped bar plots are ideal for variables measured on nominal and ordinal scales. For variables measured on a ratio or interval scale, we learned that kernel density plots are a good solution when we have many distributions to compare. However, kernel density plots tend to become unreadable as we reach five distributions or more.
Let's say we're interested in analyzing the distribution of player height as a function of player position. In other words, we want to figure out, for instance, whether centers are generally taller than forwards, whether forwards are generally shorter than guards, and so on. In the code below, we'll segment the data set by player position, and for each segment we'll generate a kernel density plot for the distribution of the **Height** variable:
```
wnba[wnba.Pos == 'F']['Height'].plot.kde(label = 'F', legend = True)
wnba[wnba.Pos == 'C']['Height'].plot.kde(label = 'C', legend = True)
wnba[wnba.Pos == 'G']['Height'].plot.kde(label = 'G', legend = True)
wnba[wnba.Pos == 'G/F']['Height'].plot.kde(label = 'G/F', legend = True)
wnba[wnba.Pos == 'F/C']['Height'].plot.kde(label = 'F/C', legend = True)
```
If we look very closely, we can see a couple of clear patterns: the shortest players are generally guards, the tallest players are generally centers, mid-height players are generally forwards or play in a combined position, etc.
Having to look very closely to a graph to identify obvious patterns is far from ideal. If there's any pattern, we want to see it immediately. To overcome this problem, we can use other kinds of graphs, which present the same information in a more readable way. For the rest of this mission, we'll explore two such alternatives.
## 1.7 Strip Plots
This is one alternative we can use to visualize the distribution of **heights** as a function of **player** position:
<img width="400" src="https://drive.google.com/uc?export=view&id=1aQYZGHJg1IDb0C5dUxaSE1coWb3ZI6Dp">
The **Pos** variable is represented on the x-axis, while **Height** is on the y-axis. Each of the five vertical lines made of distinctly colored bullets represents a distribution. These are the logical steps we'd take to build a plot like the one above:
- Segment the data set by player position.
- For every segment:
- List all the values in the **Height** variable.
- For every value in that list, draw a bullet point on a graph. The x-coordinate of the bullet point is given by the player position, and the y-coordinate by the player's height.
<img width="500" src="https://drive.google.com/uc?export=view&id=1Cp_Pd3uSY-9nE7mPw9oGz788ZPsrMUpJ">
Because we segment by player position, for every segment the player position values will be identical for every player while their heights will vary more or less. Because of the segmentation, the player position is also guaranteed to be different from segment to segment. After drawing all the bullet points for all the segments, we'll inevitably end up with five narrow vertical strips, one above each unique value on the x-axis. Because of this, each of the five plots is called a **strip plot**.
To generate the first graph above with five strip plots, we can use the [sns.stripplot()](https://seaborn.pydata.org/generated/seaborn.stripplot.html?highlight=stripplot#seaborn.stripplot) function from the seaborn module. We place the **Pos** variable on the x-axis and **Height** on the y-axis:
```
sns.stripplot(x = 'Pos', y = 'Height', data = wnba)
```
Patterns are now immediately visible. We can see on the graph that the **shortest players are guards** — in fact, all players under 180 cm are guards. The **tallest players are centers** — this is the only category with players above 2 meters. Among combined positions, we can see that **F/C has slightly taller representatives** — most likely because it requires center qualities (and we've seen that the tallest players are generally centers).
A **big downside** of strip plots is that the bullet **points overlap**. We can **fix** this by adding a bit of **jitter** to each distribution. We can do this by setting the jitter parameter to **True:**
```
sns.stripplot(x = 'Pos', y = 'Height', data = wnba, jitter = True)
```
On a side note, you might have noticed that strip plots are similar to the scatter plots we learned about in the visualization courses. **In fact, strip plots are actually scatter plots.** When one of the variables is nominal or ordinal, a scatter plot will generally take the form of a series of narrow strips (the number of narrow strips will be the same as the number of unique values in the nominal or ordinal variable).
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Using strip plots, examine the distribution of player **weight** (not height) as a function of player **position**. The graph should have the following properties:
- The **Pos** variable in on the x-axis and the **Weight** variable on the y-axis.
- Each **strip** plot has **jitter** added to it. The amount of jitter to apply is the one specific to **jitter = True.**
- Do you see any similarity with the distributions of the **Height** variable? If so, how could this be explained?
```
import seaborn as sns
sns.stripplot(x='Pos', y='Weight', data=wnba, jitter=True)
```
## 1.8 Box plots
Besides strip plots, there's another kind of graph we can use to display many distributions at once and make sure everything is still readable. Below, we use this kind of graph to plot again the distribution of player height as a function of player position:
<img width="400" src="https://drive.google.com/uc?export=view&id=1YM9kJ-0f8eMvYEvI1C11TpvwQlqSuyRa">
Each individual plot above shows a distribution. Let's isolate the height distribution of guards and understand it by comparing it with a histogram showing the same distribution:
<img width="800" src="https://drive.google.com/uc?export=view&id=1ubAeLqYPthw2jJpN_ApMc30qM1BjSXpN">
In a nutshell, the graph on the right shows the range of the distribution and its three quartiles (the 25th, the 50th and the 75th percentile). This allows us to get a good visual intuition about the proportion of values that fall under a certain quartile, between any two quartiles, or between a quartile and the minimum or the maximum value in the distribution:
<img width="800" src="https://drive.google.com/uc?export=view&id=1Y0H3DLjHVbZOZSzOq8htlinzCxEAQW_R">
The two lines extending upwards and downwards out of the box in the middle look a bit like two whiskers, reason for which we call this plot a **box-and-whisker** plot, or, more convenient, just **box plot.**
We can generate the five box plots above using the [sns.boxplot()](https://seaborn.pydata.org/generated/seaborn.boxplot.html) function. On the x-axis we want the **Pos** variable, and on the y-axis the **Height** variable.
```
sns.boxplot(x = 'Pos', y = 'Height', data = wnba)
```
You might wonder what is the meaning of those few dots for the box plots of centers and guards/forwards (G/F), and **why some box plots seem to lack some of the quartiles**. We'll discuss this in the next screen. Now, let's practice generating box plots.
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Using **sns.boxplot()**, generate a series of box plots to examine the distribution of player weight as a function of player position. Place the **Pos** variable on the x-axis and the **Weight** variable on the y-axis.
```
import seaborn as sns
sns.boxplot(x='Pos', y='Weight', data=wnba)
```
## 1.9 Outliers
The few dots we see for the box plots of centers and guards/forwards (G/F) represent values in the distribution that are much larger or much lower than the rest of the values. A value that is much lower or much larger than the rest of the values in a distribution is called an **outlier.**
<img width="400" src="https://drive.google.com/uc?export=view&id=1WoZ6rZWu8bIFhJaPwQ5mDvoo2pU0dzLa">
A value is an **outlier** if:
- It's larger than the upper quartile by 1.5 times the difference between the upper quartile and the lower quartile (the difference is also called the interquartile range).
- It's lower than the lower quartile by 1.5 times the difference between the upper quartile and the lower quartile (the difference is also called the interquartile range).
<img width="600" src="https://drive.google.com/uc?export=view&id=1NminuWMq8htgOFD5TeAyiLFhXdCtHQjJ">
Probably this is not yet crystal clear, so let's walk through an example. Let's consider the box plot for centers:
<img width="800" src="https://drive.google.com/uc?export=view&id=1krahzq8cce3FQmzXVekQM0UumStTIq8l">
From the output of **wnba[wnba['Pos'] == 'C']['Height'].describe()**, we can see that the upper quartile (the 75th percentile) is 196 and the lower quartile (the 25th percentile) is 193. Hence, the interquartile range is 3.
$$
\text{interquartile range} = \text{upper quartile} - \text{lower quartil}
$$
Every value that is $3 \times 1.5$ bigger than the upper quartile is considered an outlier. $3 \times 1.5 = 4.5$, and the upper quartile is 196. This means that any value greater than $196 + 4.5 = 200.5$ is considered an outlier.
Similarly, every value that is $3 \times 1.5$ lower that the lower quartile is an outlier. $3 \times 1.5 = 4.5$, and the upper quartile is 193. This means that any value less than $193 - 4.5 = 188.5$ is an outlier.
<img width="500" src="https://drive.google.com/uc?export=view&id=18HtBhxsTPDtuhq4W0YoXfCs8Rx1gz-yQ">
This formal definition of an outlier is arbitrary, and it could be changed if we wanted to. For any given distribution, the upper and lower quartiles, and the interquartile range remain constant. However, the 1.5 factor can vary. If the factor is increased, then the range outside which values are considered outliers increases as well. If the factor is decreased, the range outside which values are considered outlier decreases as well.
When we generate boxplots, we can increase or decrease this factor by using the **whis** parameter of the **sns.boxplot()** function. This is the same height distribution for centers without any outliers:
```
sns.boxplot(wnba[wnba['Pos'] == 'C']['Height'], whis = 4,
orient = 'vertical', width = .15,)
```
**Exercise**
<img width="100" src="https://drive.google.com/uc?export=view&id=1E8tR7B9YYUXsU_rddJAyq0FrM0MSelxZ">
- Consider the quartiles of the **Games Played** variable:
```python
>> wnba['Games Played'].describe()
count 143.000000
mean 24.356643
std 7.104259
min 2.000000
25% 22.000000
50% 27.000000
75% 29.000000
max 32.000000
Name: Games Played, dtype: float64
```
- Find the interquartile range, and assign the result to a variable named **iqr**.
- Using a factor of 1.5, calculate the lower and upper bound outside which values are considered outliers.
- Assign the value of the lower bound to a variable named **lower_bound**.
- Assign the upper bound to a variable named **upper_bound.**
- Find how many values in the distribution are outliers.
- Assign the number of outliers below the lower bound to a variable named **outliers_low.**
- Assign the number of outliers below the upper bound to a variable named **outliers_high.**
- Plot a boxplot to check whether your answers are sensible.
```
iqr = wnba['Games Played'].describe()['75%'] - wnba['Games Played'].describe()['25%']
lower_bound = wnba['Games Played'].describe()['25%'] - (iqr * 1.5)
upper_bound = wnba['Games Played'].describe()['75%'] + (iqr * 1.5)
print(lower_bound)
print(upper_bound)
outliers_low = len(wnba[wnba['Games Played'] < lower_bound])
outliers_upper = len(wnba[wnba['Games Played'] > upper_bound])
print(outliers_low)
print(outliers_upper)
import seaborn as sns
sns.boxplot(x='Games Played', data=wnba)
```
## 1.10 Next Steps
In this mission, we learned how to compare frequency distributions using graphs. Grouped bar plots are ideal to compare the frequency distributions of nominal or ordinal variables. For variables measured on an interval or ratio scale, we can use step-type histograms, kernel density plots, or, for better readability, strip plots or box plots.
<img width="400" src="https://drive.google.com/uc?export=view&id=1J7n1gvx8sQpJ-WNZF5do8VPQk_vf2ORb">
We've come a long way in this course from learning about sampling to visualizing multiple frequency distributions. We've made great progress so far and completed the workflow we set out to do in the first mission.
<img width="600" src="https://drive.google.com/uc?export=view&id=1XQ_nPiVB1pMBaS0ikBE6IPeifOYbDG11">
|
github_jupyter
|
import pandas as pd
# read the dataset
wnba = pd.read_csv("wnba.csv")
# cleaning the experience column
wnba.loc[wnba.Experience == 'R',"Experience"] = 0
wnba["Experience"] = wnba["Experience"].astype(int)
# create exp_ordinal column
wnba["Exp_ordinal"] = pd.cut(wnba.Experience,
bins=[-1,0,3,5,10,100],
labels=["Rookie","Little experienced","Experienced",
"Very experienced","Veteran"])
# verify the results
wnba.Exp_ordinal.value_counts()
rookies = wnba[wnba['Exp_ordinal'] == 'Rookie']
little_xp = wnba[wnba['Exp_ordinal'] == 'Little experience']
experienced = wnba[wnba['Exp_ordinal'] == 'Experienced']
very_xp = wnba[wnba['Exp_ordinal'] == 'Very experienced']
veterans = wnba[wnba['Exp_ordinal'] == 'Veteran']
rookie_distro = rookies['Pos'].value_counts()
rookie_distro.plot.barh(title='Rookies')
little_xp_distro = little_xp['Pos'].value_counts()
little_xp_distro.plot.barh(title='Little XP')
experienced_distro = experienced['Pos'].value_counts()
experienced_distro.plot.barh(title='Experienced')
very_xp_distro = very_xp['Pos'].value_counts()
very_xp_distro.plot.barh(title='Very XP')
veterans_distro = veterans['Pos'].value_counts()
veterans_distro.plot.barh(title='Veterans')
import seaborn as sns
sns.countplot(x = 'Exp_ordinal', hue = 'Pos', data = wnba)
import seaborn as sns
sns.countplot(x='Exp_ordinal', hue='Pos', data=wnba,
order=['Veteran',
'Very experienced',
'Experienced',
'Little experienced',
'Rookie'],
hue_order=['C', 'F', 'G', 'C/F', 'G/F'])
wnba['MIN'].mean()
wnba['Age'].mean()
wnba['age_mean_relative'] = wnba['Age'].apply(lambda x: 'old' if x >= 27 else 'young')
wnba['min_mean_relative'] = wnba['MIN'].apply(lambda x: 'average or above' if x >= 497 else
'below average')
cols = ["Name","Age","age_mean_relative","MIN","min_mean_relative"]
wnba[cols].head()
import seaborn as sns
sns.countplot(x='age_mean_relative', hue='min_mean_relative', data=wnba)
result = 'rejection'
wnba[wnba.Age >= 27]['MIN'].plot.hist(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.hist(label = 'Young', legend = True)
sns.set_style("white")
wnba[wnba.Age >= 27]['MIN'].plot.hist(histtype = 'step', label = 'Old', legend = True,color="red")
wnba[wnba.Age < 27]['MIN'].plot.hist(histtype = 'step', label = 'Young', legend = True,color="blue")
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
sns.set_style("white")
wnba[wnba.Age >= 27]['MIN'].plot.hist(histtype = 'step', label = 'Old', legend = True,color="red")
wnba[wnba.Age < 27]['MIN'].plot.hist(histtype = 'step', label = 'Young', legend = True,color="blue")
ax.axvline(wnba['MIN'].mean())
wnba[wnba.Age >= 27]['MIN'].plot.kde(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.kde(label = 'Young', legend = True)
import matplotlib.pyplot as plt
import seaborn as sns
fig, ax = plt.subplots()
wnba[wnba.Age >= 27]['MIN'].plot.kde(label = 'Old', legend = True)
wnba[wnba.Age < 27]['MIN'].plot.kde(label = 'Young', legend = True)
ax.axvline(wnba['MIN'].mean())
wnba[wnba.Pos == 'F']['Height'].plot.kde(label = 'F', legend = True)
wnba[wnba.Pos == 'C']['Height'].plot.kde(label = 'C', legend = True)
wnba[wnba.Pos == 'G']['Height'].plot.kde(label = 'G', legend = True)
wnba[wnba.Pos == 'G/F']['Height'].plot.kde(label = 'G/F', legend = True)
wnba[wnba.Pos == 'F/C']['Height'].plot.kde(label = 'F/C', legend = True)
sns.stripplot(x = 'Pos', y = 'Height', data = wnba)
sns.stripplot(x = 'Pos', y = 'Height', data = wnba, jitter = True)
import seaborn as sns
sns.stripplot(x='Pos', y='Weight', data=wnba, jitter=True)
sns.boxplot(x = 'Pos', y = 'Height', data = wnba)
import seaborn as sns
sns.boxplot(x='Pos', y='Weight', data=wnba)
sns.boxplot(wnba[wnba['Pos'] == 'C']['Height'], whis = 4,
orient = 'vertical', width = .15,)
>> wnba['Games Played'].describe()
count 143.000000
mean 24.356643
std 7.104259
min 2.000000
25% 22.000000
50% 27.000000
75% 29.000000
max 32.000000
Name: Games Played, dtype: float64
iqr = wnba['Games Played'].describe()['75%'] - wnba['Games Played'].describe()['25%']
lower_bound = wnba['Games Played'].describe()['25%'] - (iqr * 1.5)
upper_bound = wnba['Games Played'].describe()['75%'] + (iqr * 1.5)
print(lower_bound)
print(upper_bound)
outliers_low = len(wnba[wnba['Games Played'] < lower_bound])
outliers_upper = len(wnba[wnba['Games Played'] > upper_bound])
print(outliers_low)
print(outliers_upper)
import seaborn as sns
sns.boxplot(x='Games Played', data=wnba)
| 0.333286 | 0.97857 |
# Author & Notes
This Jupyter Notebook was written by Benjamin S. Meyers ([bsm9339@rit.edu](mailto:bsm9339@rit.edu)). This is a very simple tutorial to get your started on working with HDF5. This covers datasets, but HDF5 is more than just data storage, it's also a set of functions for working with that data.
# Background on HDF5
[HDF5](https://www.hdfgroup.org/solutions/hdf5/) has been completely revamped since HDF4 (sort of like Python 2.7 vs. Python 3+). HDF5 has seven _concepts_ that you need to worry about:
- **File:** The HDF5 file containing any number of datasets. Typically organized like a file directory (see Groups).
- **Dataset:** Datasets contain data in the form of an n-dimensional array, typically with elements of the same type.
- **Datatype:** Metadata describing the individual elements of the dataset, e.g. 32bit integers, structs, strings, etc.
- **Dataspace:** Metadata describing the layout of the data, e.g. 3-D array.
- **Attribute:** Simple metadata describing basically anything you want.
- **Group:** A group is a collection of related datasets. Starting from the root group of the file, you may have a group of visualizations and a group of datasets, for example.
- **Link:** Links between related objects (Datasets and Groups) inside an HDF5 file (or even in another HDF5 file).
As you can see, HDF5 is object-oriented in it's design, but it's actually implemented in C for the sake of efficiency.
Here is a very good [video tutorial](https://www.youtube.com/watch?v=BAjsCldRMMc) to get started.
# Background on H5py
We'll be using [H5py](https://docs.h5py.org/en/stable/index.html), which is a popular Python wrapper for HDF5. [PyTables](https://www.pytables.org/) is an alternative, but they have a "there is only one good way to do X" mindset, so they're a bit limited.
The key thing to remember when using H5py is that HDF5 groups work like dictionaries, and HDF5 datasets work like NumPy arrays.
# Setup
Install HDF5:
- Debian/Ubuntu: `sudo apt install libhdf5-dev`
- Others: [See Documentation](https://www.hdfgroup.org/downloads/hdf5)
Install H5py: `pip3 install h5py`
```
# Imports
import h5py
import numpy as np
```
# (1) Creating an HDF5 File
```
with h5py.File("testfile.hdf5", "w") as f:
# This creates a file called `testfile.hdf5` containing a dataset called
# `test_dataset` with a 1-D array of size 100 with integer elements
test_dataset = f.create_dataset("test_dataset", (100, ), dtype="i")
```
NOTE: If you try to view `testfile.hdf5` in vim or some other editor, you'll only see compiled code.
# (2) Reading, Writing, and Closing an HDF5 File
```
# Note the read permissions
with h5py.File("testfile.hdf5", "r") as f:
pass # Do stuff
# OR
f = h5py.File("testfile.hdf5", "r")
```
So, how do we know what we have inside this file?
```
list(f.keys())
```
Now we know this file contains a dataset called "test_dataset". Let's create another:
```
# Variable length unicode strings
dt = h5py.special_dtype(vlen=str)
f.create_dataset("apples", (10,), dtype=dt)
```
We opened the HDF5 file with read permissions, so we can't modify it. Let's fix that:
```
# Always do this
h5py.File.close(f)
# Not this
# f.close()
# "a" = "rw" if the file exists
f = h5py.File("testfile.hdf5", "a")
list(f.keys())
```
**Read that again.** If you run `f.close()` instead of `h5py.File.close(f)`, your HDF5 will likely be corrupted.
# (3) Modifying and Accessing Datasets
Okay, now let's try creating another dataset:
```
# Variable length unicode strings
dt = h5py.special_dtype(vlen=str)
apples_dataset = f.create_dataset("apples", (10,), dtype=dt)
print(list(f.keys()))
print(apples_dataset.shape)
print(apples_dataset.dtype)
```
We've got two datasets. Let's add some apples:
```
apples = ["Red Delicious", "Gala", "Granny Smith", "Golden Delicious", "Lady", "Baldwin", "McIntosh", "Honey Crisp", "Fuji", "Cortland"]
apples_dataset = np.array(apples, dtype=dt)
print(apples_dataset)
print(list(f["apples"]))
```
So, setting the pointer `apples_dataset` to a NumPy array **does not** change the actually dataset. Instead, we need to do this:
```
apples_dataset = f["apples"]
# The `[...]` is critical
apples_dataset[...] = np.array(apples, dtype=dt)
print(list(f["apples"]))
print(apples_dataset[0])
# This should give us an IndexError
apples_dataset[10] = "Empire"
```
If we want to add more data, we need to change the shape of the dataset:
```
apples_dataset.resize((11,))
print(apples_dataset)
```
Alright, so we didn't plan very well. We need our dataset to be _chunked_ if we want to resize it. To understand what this means, we need to look at how HDF5 stores data:
- **Contiguous Layout:** The default. Datasets are serialized into a monolithic block, which maps directly to a memory buffer the size of the dataset.
- **Chunked Layout:**: Datasets are split into chunks which are stored separately in the file. Storage order doesn't matter. The benefit of chunking is that (1) datasets can be resized, and (2) chunks can be read/written individually, improving performance when manipulating a subset of the dataset. [More details](https://support.hdfgroup.org/HDF5/doc/Advanced/Chunking/).
```
# Delete our "apples" dataset
del f["apples"]
# Recreate it, but make it chunked
apples_dataset = f.create_dataset("apples", (10,), maxshape=(None,), dtype=dt, chunks=True)
apples_dataset[...] = np.array(apples, dtype=dt)
print(list(f["apples"]))
```
Now we should be able to resize the dataset:
```
apples_dataset.resize((11,))
print(apples_dataset)
apples_dataset[10] = "Empire"
print(list(f["apples"]))
```
# (4) Attrributes
```
print(dict(apples_dataset.attrs))
```
So, our "apples" dataset has no attributes, i.e. no metadata. Let's fix that:
```
attr_name = "Description"
attr_data = "List of the most popular species of apples."
apples_dataset.attrs.create(name=attr_name, data=attr_data)
print(dict(apples_dataset.attrs))
```
# (5) Groups
Groups allow us to group related datasets together. So let's make a group for vegetable datasets. First, let's see what the existing group hierarachy is:
```
print(f.name)
```
That's the root group. What about our apples?
```
print(f["apples"].name)
```
And now some veggies:
```
veggie_group = f.create_group("vegetables")
print(veggie_group)
```
Now we need some datasets for our veggie group:
```
root_veggies = veggie_group.create_dataset("root_veggies", (10,), maxshape=(None,), dtype=dt, chunks=True)
print(root_veggies)
leafy_veggies = veggie_group.create_dataset("leafy_veggies", (10,), maxshape=(None,), dtype=dt, chunks=True)
print(leafy_veggies)
root_veggies[...] = np.array(["Onions", "Sweet Potatoes", "Turnips", "Ginger", "Beets", "Garlic", "Radishes", "Turnips", "Fennel", "Carrots"])
print(list(root_veggies))
print(root_veggies.name)
print(leafy_veggies.name)
print(veggie_group.name)
```
As you can see, groups are basically directories of related datasets.
```
print(dict(f.items()))
```
We can iterate over the items in a group and run a function like this:
```
def printname(name):
print(name)
f["vegetables"].visit(printname)
```
|
github_jupyter
|
# Imports
import h5py
import numpy as np
with h5py.File("testfile.hdf5", "w") as f:
# This creates a file called `testfile.hdf5` containing a dataset called
# `test_dataset` with a 1-D array of size 100 with integer elements
test_dataset = f.create_dataset("test_dataset", (100, ), dtype="i")
# Note the read permissions
with h5py.File("testfile.hdf5", "r") as f:
pass # Do stuff
# OR
f = h5py.File("testfile.hdf5", "r")
list(f.keys())
# Variable length unicode strings
dt = h5py.special_dtype(vlen=str)
f.create_dataset("apples", (10,), dtype=dt)
# Always do this
h5py.File.close(f)
# Not this
# f.close()
# "a" = "rw" if the file exists
f = h5py.File("testfile.hdf5", "a")
list(f.keys())
# Variable length unicode strings
dt = h5py.special_dtype(vlen=str)
apples_dataset = f.create_dataset("apples", (10,), dtype=dt)
print(list(f.keys()))
print(apples_dataset.shape)
print(apples_dataset.dtype)
apples = ["Red Delicious", "Gala", "Granny Smith", "Golden Delicious", "Lady", "Baldwin", "McIntosh", "Honey Crisp", "Fuji", "Cortland"]
apples_dataset = np.array(apples, dtype=dt)
print(apples_dataset)
print(list(f["apples"]))
apples_dataset = f["apples"]
# The `[...]` is critical
apples_dataset[...] = np.array(apples, dtype=dt)
print(list(f["apples"]))
print(apples_dataset[0])
# This should give us an IndexError
apples_dataset[10] = "Empire"
apples_dataset.resize((11,))
print(apples_dataset)
# Delete our "apples" dataset
del f["apples"]
# Recreate it, but make it chunked
apples_dataset = f.create_dataset("apples", (10,), maxshape=(None,), dtype=dt, chunks=True)
apples_dataset[...] = np.array(apples, dtype=dt)
print(list(f["apples"]))
apples_dataset.resize((11,))
print(apples_dataset)
apples_dataset[10] = "Empire"
print(list(f["apples"]))
print(dict(apples_dataset.attrs))
attr_name = "Description"
attr_data = "List of the most popular species of apples."
apples_dataset.attrs.create(name=attr_name, data=attr_data)
print(dict(apples_dataset.attrs))
print(f.name)
print(f["apples"].name)
veggie_group = f.create_group("vegetables")
print(veggie_group)
root_veggies = veggie_group.create_dataset("root_veggies", (10,), maxshape=(None,), dtype=dt, chunks=True)
print(root_veggies)
leafy_veggies = veggie_group.create_dataset("leafy_veggies", (10,), maxshape=(None,), dtype=dt, chunks=True)
print(leafy_veggies)
root_veggies[...] = np.array(["Onions", "Sweet Potatoes", "Turnips", "Ginger", "Beets", "Garlic", "Radishes", "Turnips", "Fennel", "Carrots"])
print(list(root_veggies))
print(root_veggies.name)
print(leafy_veggies.name)
print(veggie_group.name)
print(dict(f.items()))
def printname(name):
print(name)
f["vegetables"].visit(printname)
| 0.479747 | 0.927363 |
```
import os
import json
import psycopg2
import pandas as pd
import geopandas as gpd
from geopandas import GeoSeries, GeoDataFrame
import folium
import fiona
from pyproj import Proj, transform
conn = psycopg2.connect(dbname="gis", user="postgres", password="")
rapperswil_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -1683921"
zurich_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -1682248"
switzerland_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -51701"
rapperswil_location = [47.226, 8.818]
zurich_location = [47.3763, 8.5403]
```
## Get the building polygons of all pubs and restaurants in Rapperswil
```
poi_polygons = gpd.read_postgis(
"""SELECT polygon.way AS geometry FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'""".format(rapperswil_polygon_query),
conn, geom_col='geometry')
poi_polygons.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=17)
folium.GeoJson(poi_polygons).add_to(m)
m
import matplotlib.cm as cmx
import matplotlib.colors as colors
def rgb(minimum, maximum, value):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
return r, g, b
def style_function(feature, n_colors):
cid = feature['properties']['cid']
return {
'fillOpacity': 0.5,
'weight': 0,
'fillColor': '#red' if cid is None else "rgb{}".format(rgb(0, n_colors, cid))
}
def init_style_function(n_colors):
return lambda feature: style_function(feature, n_colors)
```
## Cluster the restaurants and pubs with DBSCAN
```
poi_polygons_clustered = gpd.read_postgis(
"""SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 50, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'""".format(rapperswil_polygon_query),
conn, geom_col='geometry')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
```
## Generate convex hulls around each cluster
```
query_convex_hull = """
WITH clusters AS (
SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 50, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'
)
SELECT cid, ST_ConvexHull(ST_Union(geometry)) AS convexhull
FROM clusters
WHERE cid IS NOT NULL
GROUP BY cid;
"""
poi_polygons_clustered = gpd.read_postgis(query_convex_hull.format(rapperswil_polygon_query), conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
```
## Convex hull for clustered restaurants and pubs in Zürich
```
poi_polygons_clustered = gpd.read_postgis(query_convex_hull.format(zurich_polygon_query), conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=zurich_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
```
## Do the same with more tags
```
shop_tags = ['mall', 'bakery', 'beverages', 'butcher', 'chocolate', 'coffee',
'confectionery', 'deli', 'frozen_food', 'greengrocer', 'healthfood',
'ice_cream', 'pasta', 'pastry', 'seafood', 'spices', 'tea', 'department_store',
'supermarket', 'bag', 'boutique', 'clothes', 'fashion', 'jewelry', 'leather',
'shoes', 'tailor', 'watches', 'chemist', 'cosmetics', 'hairdresser',
'medical_supply', 'electrical', 'hardware', 'electronics', 'sports',
'swimming_pool', 'collector', 'games', 'music', 'books', 'gift', 'stationery',
'ticket', 'laundry', 'pet', 'tobacco', 'toys']
amenity_tags = ['pub', 'bar', 'cafe', 'restaurant', 'pharmacy', 'bank', 'fast_food',
'food_court', 'ice_cream', 'library', 'music_school', 'school',
'language_school', 'ferry_terminal', 'clinic', 'doctors', 'hospital',
'pharmacy', 'veterinary', 'dentist', 'arts_centre', 'cinema',
'community_centre', 'casino', 'fountain', 'nightclub', 'studio', 'theatre',
'dojo', 'internet_cafe', 'marketplace', 'post_opffice', 'townhall']
leisure_tags = ['adult_gaming_centre', 'amusement_arcade', 'beach_resort',
'fitness_centre', 'garden', 'ice_rink', 'sports_centre', 'water_park']
all_tags = shop_tags + amenity_tags + leisure_tags
fat_query_convex_hull = """
WITH clusters AS (
SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 30, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE (point.shop = ANY(ARRAY{})
OR point.amenity = ANY(ARRAY{})
OR point.leisure = ANY(ARRAY{}))
AND st_within(point.way, ({}))
AND polygon.building = 'yes'
)
SELECT cid, ST_ConvexHull(ST_Union(geometry)) AS convexhull
FROM clusters
WHERE cid IS NOT NULL
GROUP BY cid;
""".format(shop_tags, amenity_tags, leisure_tags, zurich_polygon_query)
poi_polygons_clustered = gpd.read_postgis(fat_query_convex_hull, conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=zurich_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
```
## Get all nodes and polygons with the all tags (in Rapperswil)
```
query = """
(SELECT way AS geometry, name AS popup FROM planet_osm_polygon
WHERE (amenity = ANY(ARRAY{tags}) OR shop = ANY(ARRAY{tags}) OR leisure = ANY(ARRAY{tags}))
AND st_within(way, ({polygon})))
UNION
(SELECT polygon.way AS geometry, point.name AS popup FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE (point.amenity = ANY(ARRAY{tags}) OR point.shop = ANY(ARRAY{tags}) OR point.leisure = ANY(ARRAY{tags}))
AND st_within(point.way, ({polygon}))
AND polygon.building = 'yes')
""".format(**{'polygon': rapperswil_polygon_query, 'tags': str(all_tags)})
poi_polygons = gpd.read_postgis(query, conn, geom_col='geometry')
poi_polygons.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=17)
folium.GeoJson(poi_polygons).add_to(m)
m
```
|
github_jupyter
|
import os
import json
import psycopg2
import pandas as pd
import geopandas as gpd
from geopandas import GeoSeries, GeoDataFrame
import folium
import fiona
from pyproj import Proj, transform
conn = psycopg2.connect(dbname="gis", user="postgres", password="")
rapperswil_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -1683921"
zurich_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -1682248"
switzerland_polygon_query = "SELECT way FROM planet_osm_polygon WHERE osm_id = -51701"
rapperswil_location = [47.226, 8.818]
zurich_location = [47.3763, 8.5403]
poi_polygons = gpd.read_postgis(
"""SELECT polygon.way AS geometry FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'""".format(rapperswil_polygon_query),
conn, geom_col='geometry')
poi_polygons.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=17)
folium.GeoJson(poi_polygons).add_to(m)
m
import matplotlib.cm as cmx
import matplotlib.colors as colors
def rgb(minimum, maximum, value):
minimum, maximum = float(minimum), float(maximum)
ratio = 2 * (value-minimum) / (maximum - minimum)
b = int(max(0, 255*(1 - ratio)))
r = int(max(0, 255*(ratio - 1)))
g = 255 - b - r
return r, g, b
def style_function(feature, n_colors):
cid = feature['properties']['cid']
return {
'fillOpacity': 0.5,
'weight': 0,
'fillColor': '#red' if cid is None else "rgb{}".format(rgb(0, n_colors, cid))
}
def init_style_function(n_colors):
return lambda feature: style_function(feature, n_colors)
poi_polygons_clustered = gpd.read_postgis(
"""SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 50, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'""".format(rapperswil_polygon_query),
conn, geom_col='geometry')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
query_convex_hull = """
WITH clusters AS (
SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 50, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE point.amenity IN ('pub', 'restaurant')
AND st_within(point.way, ({}))
AND polygon.building = 'yes'
)
SELECT cid, ST_ConvexHull(ST_Union(geometry)) AS convexhull
FROM clusters
WHERE cid IS NOT NULL
GROUP BY cid;
"""
poi_polygons_clustered = gpd.read_postgis(query_convex_hull.format(rapperswil_polygon_query), conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
poi_polygons_clustered = gpd.read_postgis(query_convex_hull.format(zurich_polygon_query), conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=zurich_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
shop_tags = ['mall', 'bakery', 'beverages', 'butcher', 'chocolate', 'coffee',
'confectionery', 'deli', 'frozen_food', 'greengrocer', 'healthfood',
'ice_cream', 'pasta', 'pastry', 'seafood', 'spices', 'tea', 'department_store',
'supermarket', 'bag', 'boutique', 'clothes', 'fashion', 'jewelry', 'leather',
'shoes', 'tailor', 'watches', 'chemist', 'cosmetics', 'hairdresser',
'medical_supply', 'electrical', 'hardware', 'electronics', 'sports',
'swimming_pool', 'collector', 'games', 'music', 'books', 'gift', 'stationery',
'ticket', 'laundry', 'pet', 'tobacco', 'toys']
amenity_tags = ['pub', 'bar', 'cafe', 'restaurant', 'pharmacy', 'bank', 'fast_food',
'food_court', 'ice_cream', 'library', 'music_school', 'school',
'language_school', 'ferry_terminal', 'clinic', 'doctors', 'hospital',
'pharmacy', 'veterinary', 'dentist', 'arts_centre', 'cinema',
'community_centre', 'casino', 'fountain', 'nightclub', 'studio', 'theatre',
'dojo', 'internet_cafe', 'marketplace', 'post_opffice', 'townhall']
leisure_tags = ['adult_gaming_centre', 'amusement_arcade', 'beach_resort',
'fitness_centre', 'garden', 'ice_rink', 'sports_centre', 'water_park']
all_tags = shop_tags + amenity_tags + leisure_tags
fat_query_convex_hull = """
WITH clusters AS (
SELECT polygon.way AS geometry,
ST_ClusterDBSCAN(polygon.way, eps := 30, minpoints := 3) over () AS cid FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE (point.shop = ANY(ARRAY{})
OR point.amenity = ANY(ARRAY{})
OR point.leisure = ANY(ARRAY{}))
AND st_within(point.way, ({}))
AND polygon.building = 'yes'
)
SELECT cid, ST_ConvexHull(ST_Union(geometry)) AS convexhull
FROM clusters
WHERE cid IS NOT NULL
GROUP BY cid;
""".format(shop_tags, amenity_tags, leisure_tags, zurich_polygon_query)
poi_polygons_clustered = gpd.read_postgis(fat_query_convex_hull, conn, geom_col='convexhull')
n_clusters = len(poi_polygons_clustered.groupby('cid').cid.nunique())
poi_polygons_clustered.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=zurich_location, zoom_start=16, tiles="cartodbpositron")
folium.GeoJson(poi_polygons_clustered, style_function=init_style_function(n_clusters)).add_to(m)
m
query = """
(SELECT way AS geometry, name AS popup FROM planet_osm_polygon
WHERE (amenity = ANY(ARRAY{tags}) OR shop = ANY(ARRAY{tags}) OR leisure = ANY(ARRAY{tags}))
AND st_within(way, ({polygon})))
UNION
(SELECT polygon.way AS geometry, point.name AS popup FROM planet_osm_polygon AS polygon
INNER JOIN planet_osm_point AS point
ON st_within(point.way, polygon.way)
WHERE (point.amenity = ANY(ARRAY{tags}) OR point.shop = ANY(ARRAY{tags}) OR point.leisure = ANY(ARRAY{tags}))
AND st_within(point.way, ({polygon}))
AND polygon.building = 'yes')
""".format(**{'polygon': rapperswil_polygon_query, 'tags': str(all_tags)})
poi_polygons = gpd.read_postgis(query, conn, geom_col='geometry')
poi_polygons.crs = fiona.crs.from_epsg(3857)
m = folium.Map(location=rapperswil_location, zoom_start=17)
folium.GeoJson(poi_polygons).add_to(m)
m
| 0.425844 | 0.511839 |
<table width="100%">
<tr style="border-bottom:solid 2pt #009EE3">
<td class="header_buttons">
<a href="prepare_anaconda.zip" download><img src="../../images/icons/download.png" alt="biosignalsnotebooks | download button"></a>
</td>
<td class="header_buttons">
<a href="https://mybinder.org/v2/gh/biosignalsplux/biosignalsnotebooks/mybinder_complete?filepath=biosignalsnotebooks_environment%2Fcategories%2FInstall%2Fprepare_anaconda.dwipynb" target="_blank"><img src="../../images/icons/program.png" alt="biosignalsnotebooks | binder server" title="Be creative and test your solutions !"></a>
</td>
<td></td>
<td class="header_icons">
<a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png" alt="biosignalsnotebooks | home button"></a>
</td>
<td class="header_icons">
<a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png" alt="biosignalsnotebooks | contacts button"></a>
</td>
<td class="header_icons">
<a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png" alt="biosignalsnotebooks | github button"></a>
</td>
<td class="header_logo">
<img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo">
</td>
</tr>
</table>
<link rel="stylesheet" href="../../styles/theme_style.css">
<!--link rel="stylesheet" href="../../styles/header_style.css"-->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
<table width="100%">
<tr>
<td id="image_td" width="15%" class="header_image_color_13"><div id="image_img"
class="header_image_13"></div></td>
<td class="header_text"> Download, Install and Execute Anaconda </td>
</tr>
</table>
<div id="flex-container">
<div id="diff_level" class="flex-item">
<strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
<span class="fa fa-star"></span>
<span class="fa fa-star"></span>
<span class="fa fa-star"></span>
<span class="fa fa-star"></span>
</div>
<div id="tag" class="flex-item-tag">
<span id="tag_list">
<table id="tag_list_table">
<tr>
<td class="shield_left">Tags</td>
<td class="shield_right" id="tags">install☁jupyter☁notebook☁anaconda☁download</td>
</tr>
</table>
</span>
<!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
</div>
</div>
In every journey we always need to prepare our toolbox with the needed resources !
With <strong><span class="color1">biosignalsnotebooks</span></strong> happens the same, being <strong><span class="color4">Jupyter Notebook</span></strong> environment the most relevant application (that supports <strong><span class="color1">biosignalsnotebooks</span></strong>) to take the maximum advantage during your learning process.
In the following sequence of instruction it will be presented the operations that should be completed in order to have <strong><span class="color4">Jupyter Notebook</span></strong> ready to use and to open our <strong>ipynb</strong> files on local server.
<table width="100%">
<tr>
<td style="text-align:left;font-size:12pt;border-top:dotted 2px #62C3EE">
<span class="color1">☌</span> The current <span class="color4"><strong>Jupyter Notebook</strong></span> is focused on a complete Python toolbox called <a href="https://www.anaconda.com/distribution/"><span class="color4"><strong>Anaconda <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></span></a>.
However, there is an alternative approach to get all things ready for starting our journey, which is described on <a href="../Install/prepare_jupyter.ipynb"><span class="color1"><strong>"Download, Install and Execute Jypyter Notebook Environment" <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></span></a>
</td>
</tr>
</table>
<hr>
<hr>
<p class="steps">1 - Access the <strong><span class="color4">Anaconda</span></strong> official page at <a href="https://www.anaconda.com/distribution/">https://www.anaconda.com/distribution/</a></p>
<img src="../../images/install/prepare_anaconda/anaconda_page.png">
<p class="steps">2 - Click on "Download" button, giving a first but strong step into our final objective</p>
<img src="../../images/install/prepare_anaconda/anaconda_download.gif">
<p class="steps">3 - Specify the operating system of your local machine</p>
<img src="../../images/install/prepare_anaconda/anaconda_download_os.gif">
<p class="steps">4 - Select the version of <span class="color1">Python</span> compiler to be included on <span class="color4">Anaconda</span></p>
It is strongly advisable that you chose version <strong>3.-</strong> to ensure that all functionalities of packages like <strong><span class="color1">biosignalsnotebooks</span></strong> are fully operational.
<img src="../../images/install/prepare_anaconda/anaconda_download_version.gif">
<p class="steps">5 - After defining the directory where the downloaded file will be stored, please, wait a few minutes for the end of transfer</p>
<span class="color13" style="font-size:30px">⚠</span>
The waiting time will depend on the quality of the Internet connection !
<p class="steps">6 - When download is finished navigate through your directory tree until reaching the folder where the downloaded file is located</p>
In our case the destination folder was <img src="../../images/install/prepare_anaconda/anaconda_download_location.png" style="display:inline;margin-top:0px">
<p class="steps">7 - Execute <span class="color4">Anaconda</span> installer file with a double-click</p>
<img src="../../images/install/prepare_anaconda/anaconda_download_installer.gif">
<p class="steps">8 - Follow the sequential instructions presented on the <span class="color4">Anaconda</span> installer</p>
<img src="../../images/install/prepare_anaconda/anaconda_download_install_steps.gif">
<p class="steps">9 - <span class="color4">Jupyter Notebook</span> environment is included on the previous installation. For starting your first Notebook execute <span class="color4">Jupyter Notebook</span></p>
Launch from "Anaconda Navigator" or through a command window, like described on the following steps.
<p class="steps">9.1 - For executing <span class="color4">Jupyter Notebook</span> environment you should open a <strong>console</strong> (in your operating system).</p>
<i>If you are a Microsoft Windows native, just type click on Windows logo (bottom-left corner of the screen) and type "cmd". Then press "Enter".</i>
<p class="steps">9.2 - Type <strong>"jupyter notebook"</strong> inside the opened console. A local <span class="color4"><strong>Jupyter Notebook</strong></span> server will be launched.</p>
<img src="../../images/install/prepare_anaconda/open_jupyter.gif">
<p class="steps">10 - Create a blank Notebook</p>
<p class="steps">10.1 - Now, you should navigate through your directories until reaching the folder where you want to create or open a Notebook (as demonstrated in the following video)</p>
<span class="color13" style="font-size:30px">⚠</span>
<p style="margin-top:0px">You should note that your folder hierarchy is unique, so, the steps followed in the next image, will depend on your folder organisation, being merely illustrative </p>
<img src="../../images/install/prepare_anaconda/create_notebook_part1.gif">
<p class="steps">10.2 - For creating a new Notebook, "New" button (top-right zone of Jupyter Notebook interface) should be pressed and <span class="color1"><strong>Python 3</strong></span> option selected.</p>
<i>A blank Notebook will arise and now you just need to be creative and expand your thoughts to others persons!!!</i>
<img src="../../images/install/prepare_anaconda/create_notebook_part2.gif">
This can be the start of something great. Now you have all the software conditions to create and develop interactive tutorials, combining Python with HTML !
<span class="color4"><strong>Anaconda</strong></span> contains lots of additional functionalities, namely <a href="https://anaconda.org/anaconda/spyder"><span class="color7"><strong>Spyder <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></span></a>, which is an intuitive Python editor for creating and testing your own scripts.
<strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
<hr>
<table width="100%">
<tr>
<td class="footer_logo">
<img src="../../images/ost_logo.png" alt="biosignalsnotebooks | project logo [footer]">
</td>
<td width="40%" style="text-align:left">
<a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
<br>
<a href="https://github.com/biosignalsplux/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
<br>
<a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/signal_samples.ipynb">☌ Signal Library</a>
</td>
<td width="40%" style="text-align:left">
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
<br>
<a href="https://www.biosignalsplux.com/notebooks/Categories/MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
</td>
</tr>
</table>
```
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
```
|
github_jupyter
|
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
%%html
<script>
// AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
require(
['base/js/namespace', 'jquery'],
function(jupyter, $) {
$(jupyter.events).on("kernel_ready.Kernel", function () {
console.log("Auto-running all cells-below...");
jupyter.actions.call('jupyter-notebook:run-all-cells-below');
jupyter.actions.call('jupyter-notebook:save-notebook');
});
}
);
</script>
| 0.22627 | 0.662484 |
# Exploratory Data Analysis
by : Andreas Chandra \
date:21/03/2021
```
import os
import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from scipy.stats import norm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
data_path = "../datasets"
files = list(filter(lambda x: x.endswith('.csv'), os.listdir(data_path)))
files
d_data = pd.read_csv(os.path.join(data_path, files[0]))
d_data.shape
d_data
d_data.dtypes
d_data.date = pd.to_datetime(d_data.date)
len(d_data.date)
# check whether there's non one-hour timeframe
checkNotOneHour = lambda i: (d_data.date[i+1]-d_data.date[i]).seconds/3600
notOneHour = [(i,d_data.date[i], checkNotOneHour(i)) for i in range(len(d_data.date)-1) if checkNotOneHour(i) != 1.0]
len(notOneHour)
pd.Series([difference[2] for difference in notOneHour]).value_counts()
# get duration of the data
print(d_data.date.min(), d_data.date.max())
print(d_data.date.max() - d_data.date.min())
plt.figure(figsize=(12,4))
fig = sns.lineplot(x = 'date', y='swh', data=d_data)
# fig.figure.savefig("../figures/timeframe.png")
d_data.groupby(d_data.date.dt.date).count()
d_data.groupby(d_data.date.dt.date).count().plot(figsize=(14,4))
plt.yticks(list(range(0,25, 2)))
# plt.savefig('../figures/data_point_per_day.png')
plt.show()
```
### Restructure Dataset
```
date_range = pd.date_range(d_data.date.min(), d_data.date.max(), freq='1H')
date_range
d_data_fulltime = pd.DataFrame(date_range, columns=['date'])
d_data_fulltime = d_data_fulltime.merge(d_data, how='left', on='date')
d_data_fulltime.head()
d_data_fulltime.isna().sum()
```
### fill nan values with interpolation
```
df_interpolate = d_data_fulltime.copy()
df_interpolate['swh'].interpolate(method='linear', inplace=True)
```
or using average per day
```
d_data_fulltime.isna().sum()
d_data_fulltime2 = d_data_fulltime.copy()
d_data_fulltime2.swh = d_data_fulltime.groupby(d_data_fulltime.date.dt.date)['swh'].transform(
lambda x: x.fillna(x.mean()))
d_data_fulltime2.isna().sum()
# but there is a missing values in a whole day, let us use interpolation to handle this
d_data_fulltime2.isna().sum()
```
---
```
plt.figure(figsize=(12,4))
fig = sns.lineplot(x = 'date', y='swh', data=df_interpolate)
# fig.figure.savefig("../figures/timeframe.png")
# each year plot
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16,9))
ax1.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2013])
ax1.set_title('Ocean Wave in 2013')
ax2.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2014])
ax2.set_title('Ocean Wave in 2014')
ax3.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2015])
ax3.set_title('Ocean Wave in 2015')
ax4.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2016])
ax4.set_title('Ocean Wave in 2016')
# Augmented Dickey-Fuller test to determine stationarity
data = df_interpolate['swh']
adf = adfuller(data)
print(f'Test Statistic: {adf[0]}')
print(f'p-value: {adf[1]}')
print('Critical Values:')
for key, value in adf[4].items():
print(f'\t{key}: {value}')
# p-value is very low, so we reject the null hypothesis
# which indicates the data is stationary
# distribution, seems a little bit right skewed, but we'll leave it
plt.figure(figsize=(10,5))
fig = sns.distplot(x=df_interpolate.swh, kde=True, fit=norm)
df_interpolate.swh.describe()
# seasonal decomposition
season = seasonal_decompose(df_interpolate.resample('1H', on='date').mean())
fig = season.plot()
fig.set_size_inches(16,8)
plt.show()
# seasonal decomposition
season = seasonal_decompose(df_interpolate.resample('1W', on='date').mean())
fig = season.plot()
fig.set_size_inches(16,8)
plt.show()
# cyclinical pattern
fig,(ax1,ax2,ax3,ax4,ax5) = plt.subplots(5,1,figsize=(14,14))
ax1 = sns.boxplot(x=df_interpolate.date.dt.hour, y='swh', data=df_interpolate, ax=ax1)
ax1.set_title("By hour")
ax1.set_xlabel("hour")
ax2 = sns.boxplot(x=df_interpolate.date.dt.day, y='swh', data=df_interpolate, ax=ax2)
ax2.set_title("By day")
ax2.set_xlabel("day")
ax3 = sns.boxplot(x=df_interpolate.date.dt.week, y='swh', data=df_interpolate, ax=ax3)
ax3.set_title("By week")
ax3.set_xlabel("week")
ax4 = sns.boxplot(x=df_interpolate.date.dt.month, y='swh', data=df_interpolate, ax=ax4)
ax4.set_title("By month")
ax4.set_xlabel("month")
ax5 = sns.boxplot(x=df_interpolate.date.dt.year, y='swh', data=df_interpolate, ax=ax5)
ax5.set_title("By year")
ax5.set_xlabel("year")
plt.tight_layout()
plt.show()
```
Autocorrelation & partial autocorrelation which gives us idea of how data points at different points in time are linearly related to one another as a function of their time difference.\
\
Rule of Thumb to determine lags by [Rob J Hyndman](https://robjhyndman.com/hyndsight/ljung-box-test/):
- For non-seasonal time series, use `h = min(10,T/5)` where `h = lags`, `T = length of time series`
- For seasonal time series, use `h = min(2m, T/5)` where `h = lags`, `T = length of time series`, `m = period of seasonality`
```
# autocorrelation & partial autocorrelation
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(18, 4))
ax1 = plot_acf(df_interpolate.swh, lags=24*3, ax=ax1)
ax2 = plot_pacf(df_interpolate.swh, lags=24*3, ax=ax2)
# df_interpolate.to_csv(f"{data_path}/clean/{files[0]}", index=False)
```
|
github_jupyter
|
import os
import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from scipy.stats import norm
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
data_path = "../datasets"
files = list(filter(lambda x: x.endswith('.csv'), os.listdir(data_path)))
files
d_data = pd.read_csv(os.path.join(data_path, files[0]))
d_data.shape
d_data
d_data.dtypes
d_data.date = pd.to_datetime(d_data.date)
len(d_data.date)
# check whether there's non one-hour timeframe
checkNotOneHour = lambda i: (d_data.date[i+1]-d_data.date[i]).seconds/3600
notOneHour = [(i,d_data.date[i], checkNotOneHour(i)) for i in range(len(d_data.date)-1) if checkNotOneHour(i) != 1.0]
len(notOneHour)
pd.Series([difference[2] for difference in notOneHour]).value_counts()
# get duration of the data
print(d_data.date.min(), d_data.date.max())
print(d_data.date.max() - d_data.date.min())
plt.figure(figsize=(12,4))
fig = sns.lineplot(x = 'date', y='swh', data=d_data)
# fig.figure.savefig("../figures/timeframe.png")
d_data.groupby(d_data.date.dt.date).count()
d_data.groupby(d_data.date.dt.date).count().plot(figsize=(14,4))
plt.yticks(list(range(0,25, 2)))
# plt.savefig('../figures/data_point_per_day.png')
plt.show()
date_range = pd.date_range(d_data.date.min(), d_data.date.max(), freq='1H')
date_range
d_data_fulltime = pd.DataFrame(date_range, columns=['date'])
d_data_fulltime = d_data_fulltime.merge(d_data, how='left', on='date')
d_data_fulltime.head()
d_data_fulltime.isna().sum()
df_interpolate = d_data_fulltime.copy()
df_interpolate['swh'].interpolate(method='linear', inplace=True)
d_data_fulltime.isna().sum()
d_data_fulltime2 = d_data_fulltime.copy()
d_data_fulltime2.swh = d_data_fulltime.groupby(d_data_fulltime.date.dt.date)['swh'].transform(
lambda x: x.fillna(x.mean()))
d_data_fulltime2.isna().sum()
# but there is a missing values in a whole day, let us use interpolation to handle this
d_data_fulltime2.isna().sum()
plt.figure(figsize=(12,4))
fig = sns.lineplot(x = 'date', y='swh', data=df_interpolate)
# fig.figure.savefig("../figures/timeframe.png")
# each year plot
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(16,9))
ax1.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2013])
ax1.set_title('Ocean Wave in 2013')
ax2.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2014])
ax2.set_title('Ocean Wave in 2014')
ax3.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2015])
ax3.set_title('Ocean Wave in 2015')
ax4.plot('date', 'swh', data=df_interpolate[df_interpolate.date.dt.year==2016])
ax4.set_title('Ocean Wave in 2016')
# Augmented Dickey-Fuller test to determine stationarity
data = df_interpolate['swh']
adf = adfuller(data)
print(f'Test Statistic: {adf[0]}')
print(f'p-value: {adf[1]}')
print('Critical Values:')
for key, value in adf[4].items():
print(f'\t{key}: {value}')
# p-value is very low, so we reject the null hypothesis
# which indicates the data is stationary
# distribution, seems a little bit right skewed, but we'll leave it
plt.figure(figsize=(10,5))
fig = sns.distplot(x=df_interpolate.swh, kde=True, fit=norm)
df_interpolate.swh.describe()
# seasonal decomposition
season = seasonal_decompose(df_interpolate.resample('1H', on='date').mean())
fig = season.plot()
fig.set_size_inches(16,8)
plt.show()
# seasonal decomposition
season = seasonal_decompose(df_interpolate.resample('1W', on='date').mean())
fig = season.plot()
fig.set_size_inches(16,8)
plt.show()
# cyclinical pattern
fig,(ax1,ax2,ax3,ax4,ax5) = plt.subplots(5,1,figsize=(14,14))
ax1 = sns.boxplot(x=df_interpolate.date.dt.hour, y='swh', data=df_interpolate, ax=ax1)
ax1.set_title("By hour")
ax1.set_xlabel("hour")
ax2 = sns.boxplot(x=df_interpolate.date.dt.day, y='swh', data=df_interpolate, ax=ax2)
ax2.set_title("By day")
ax2.set_xlabel("day")
ax3 = sns.boxplot(x=df_interpolate.date.dt.week, y='swh', data=df_interpolate, ax=ax3)
ax3.set_title("By week")
ax3.set_xlabel("week")
ax4 = sns.boxplot(x=df_interpolate.date.dt.month, y='swh', data=df_interpolate, ax=ax4)
ax4.set_title("By month")
ax4.set_xlabel("month")
ax5 = sns.boxplot(x=df_interpolate.date.dt.year, y='swh', data=df_interpolate, ax=ax5)
ax5.set_title("By year")
ax5.set_xlabel("year")
plt.tight_layout()
plt.show()
# autocorrelation & partial autocorrelation
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(18, 4))
ax1 = plot_acf(df_interpolate.swh, lags=24*3, ax=ax1)
ax2 = plot_pacf(df_interpolate.swh, lags=24*3, ax=ax2)
# df_interpolate.to_csv(f"{data_path}/clean/{files[0]}", index=False)
| 0.436862 | 0.866189 |
# Semantic Polarisation
Newspaper bias seem to be a source of constant debate in the UK. Bias can of course come in many forms; in what news a newspaper cover as well as how particular issues are being framed. How a newspaper choses to frame particular issues have been difficult to research in the past, beacuse the tools we have had to study text have often been to crude to pick up on subtle differences in the way words are used.
However, with the genesis of effective word embedding representations through the word2vec algorithm, we may start to be able to study these subtle differences. The vector representation of word meaning should allow us to quantify how words change over time between different corpora, which could be an important tool in understanding polarisation in language usage.
In this notebook I will demonstrate how one can use such algorithms to study semantic polarisation in the news, by comparing the semantic change of the vocabulary used by two UK news outlets, from each side of the political spectrum, namely the Guardian (Left-Wing) and The Daily Mail (Right-Wing).

This approach requires us to chose two discrete time periods to compare. Because Brexit has been seen as a watershed moment in recent UK history, and one that has been said to magnify polarisation in the news, I will use the three years leading up to Brexit (2013-2015) and the three years from Brexit onwards (2016-2019) as discrete time periods.
The approach can briefly be summarised through the following steps:
* Compute separate word vectors for each outlet in each discrete time period.
* Align the 4 different word vector models onto a common space.
* For all common words between the 4 word vector models; compute a vector that represents its movement from time period t to time period t+1
* Look for words that have changed in opposite directions between the outlets; these are assumed to be polarising.
## Words and Vectors
One of the key insights from the Word2Vec algorithm was that we can *know a word by the company it keeps*. In other words, we can use the surrounding words from running text to infer the meaning of any given word. *Mikolov et al* showed how using a logistic regression trained to *'learn vector represenatations that are good at predicting nearby words*' could create word embeddings that captures semantic relationships between words that could be verified through simple arithmetic.

A common example is that analogies like *"man is to king as woman is to ...?"* can be found through simple vector addition and subtraction.
For example; king - man + woman, will give you a vector close to the word 'queen'.

In other words; word vectors give a mathematical representation to each word in your corpus such that words that are used in a similar context will have a similar mathematical representation.
We measure similarity of two vectors by taking the cosine similarity of the two. Something interesting to note is that the cosine similarity only measures the angle between the two vectors; or in which way they are pointing, not the lenght of the vectors. The assumption is that meaning is only captured by the direction of the vector and not by its length. Some research suggest that the length of the vectors captures a combination of word frequency and how coherently a word is used. A word that is used in a narrow set of contexts will get a long vector, whilst words typically used in a wide array of context, like stop words, will get shorter vectors (*Schakel & Wilson, 2015*).
# Alignement
However, a general problem with wordembedding model is that they are not deterministic. Every model will come to a different solution, depending on random decisions in how the model is built.
Therefore, in order to compare words created from different models, we need a way to put all our models into a common space. A couple of solutions have been proposed, but one very intuitve and simple way is by finding a transformation matrix from each model onto a common space:

For each word in our model, we use that to predict itself and its N-nearest words in the space we are transforming it into. This is in essence a piecewise linear regression problem, where the predicted output is the wordvectors in its new space.
## Direction of Semantic Change
This idea that we can represent the meaning of words through vectors means we can track how words change over time by using simple arithmetical operations on vectors.
In our example with two discrete points in time; we can then create word embeddings for each of these two time periods, and measure the semantic change by calculating how the mathematical representation of a particular word has changed from one time period to the next.
This can be done by subtracting the vector of a word in *t* from the vector of that same word in *t+1*. This will give us a vector equivalent of one pointing from the tip of vector of a word in *t* and ending at the tip of the vector of that same word in *t+1*.
```
# hide
import matplotlib.pyplot as plt
import numpy as np
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-1, 6)
plt.ylim(-1, 6)
plt.scatter(x = (1,4), y = (5,2))
plt.arrow(x = 0, y = 0, dx = 1, dy = 5, length_includes_head = True, head_width = 0.2, color = 'k')
plt.arrow(x = 0, y = 0, dx = 4, dy = 2, length_includes_head = True, head_width = 0.2, color = 'k')
plt.arrow(x = 1, y = 5, dx = 3, dy = -3, length_includes_head = True, head_width = 0.2, color = 'b', ls = ':')
plt.annotate('Movement in meaning from t to t+1', xy = (1.5,2.5), rotation = -40, size = 12)
plt.annotate('Word representation t', xy = (0,5.1))
plt.annotate('Word representation t+1', xy = (4,2.1))
plt.show()
```
## Semantic Polarisation
So we can track the semantic change of a word from one period to the next, and with the same intuition we can get at semantic polarisation. For example if we calculate the direction of change of each word in both The Daily Mail and The Guardian from one time period to the next, we should also be able to compare whether the words are changing in the same, or the opposite direction.
The inutition is that words that change in the same direction will represent a general shift in language. However, a move in the opposite direction should indicate that the two outlets are increasingly using that word in different contexts from one another, and could give an insight into polarisation of word usage.
```
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-1, 5)
plt.ylim(-1, 5)
plt.scatter(x = (3,5), y = (1,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'g')
plt.arrow(x = 0, y = 0, dx = 5, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.annotate('Direction of change w1 Daily Mail', xy = (2,1.2), size = 12)
plt.annotate('Direction of change w1 Guardian', xy = (2.5, 3), size = 12)
plt.show()
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-1, 5)
plt.scatter(x = (3,-4), y = (1,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'g')
plt.arrow(x = 0, y = 0, dx = -4, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.annotate('Direction of change w1 Daily Mail', xy = (2,1.2), size = 12)
plt.annotate('Direction of change w1 Guardian', xy = (-4.75, 3.2), size = 12)
plt.show()
```
# Example Results
## Geopolitics
One of the words that have undergone a similar semantic change between the two outlets is 'Russia'. When looking at the words that 'Russia' has moved towards we find words associated with either speculation around Russian interference in Brexit, like *investigation*, or words possibly associated with the Salisbury incident like *murder*.
We can also note that 'Russia' has moved away from other countries as well as industries typically associated with Russia, like oil and gas.
```
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: Russia')
plt.scatter(x = (0,3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = -2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = 0.5, y = 0.9, s = r'$\Delta Russia$ Guardian', fontsize=12, rotation = 43)
plt.text(x = -3, y = -3, s = "market oil\nenergy gas", fontsize=12)
plt.scatter(x = (0,3), y = (0,1))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = 0.5, y = -0.1, s = r'$\Delta Russia$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -1.2, s = "uk greece\njapan canada", fontsize=12)
plt.text(x = 3.5, y = -1, s = "raid\n\ninvestigate\n\nassault\n\narrest\n\ncrime\n\nscandal\n\nmurder\n\nallegation", fontsize=12)
plt.show()
```
One of the words that have undergone a dissimilar semantic change between the two outlets is 'Bejing'.
Whilst the Daily Mail seem to increasingly mention Beijing in the context of economic terms like *investment* and *supplier*, the Guardian is moving towards adjectives like *unfair* and *nice*.
You see a similar story in terms of what words 'Beijing' is moving away from in each of the outlets.
```
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: Beijing')
plt.scatter(x = (0,3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = -2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = 0.5, y = 0.9, s = r'$\Delta Beijing$ Guardian', fontsize=12, rotation = 43)
plt.text(x = 3.5, y = 3, s = "unfair\n\nhonest\n\nnice", fontsize=12)
plt.text(x = -2.5, y = -3, s = "resume\n\npartnership", fontsize=12)
plt.scatter(x = (0,-3), y = (0,-1))
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = -2.5, y = -0.5, s = r'$\Delta Beijing$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -3, s = "service\n\nexport\n\ninvestment\n\nsupplier", fontsize=12)
plt.text(x = 3.5, y = 0, s = "scared\n\nafraid\n\nunfair", fontsize=12)
plt.show()
```
This change suggest that there has been a marked difference in the way that Beijing has been covered in the two outlets. We also see a difference when looking at the change of the word 'China' between the two outlets:
```
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: China')
plt.scatter(x = (0,-3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = -3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = 2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = -2, y = 1, s = r'$\Delta China$ Guardian', fontsize=12, rotation = -43)
plt.text(x = -4.5, y = 2, s = "deserve\n\naccusation\n\ntake action\n\npursue", fontsize=12)
plt.scatter(x = (0,-3), y = (0,-1))
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = -2.5, y = -0.5, s = r'$\Delta China$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -3, s = "ally\n\nleader\n\nadviser\n\nattacker", fontsize=12)
plt.show()
```
# Conclusions
Although the results shown above are fairly convincing, the approach seemed to work less well for other words that are not GeoPolitical entities. There are a number of possible technical reasons for this:
* Relatively small amounts of training data; each model is trained on only about xx examples, which is not a lot in terms of word vectors. Although there are no hard limits or even guidelines on sample sizes, most tend to agree that more data is better.
* The discrete time periods that were picked out are somewhat arbitrary.
* The alignement algorithm. In this particular implementation I used all common words to find a linear transformation from the original space of the embedding to the new common space. This implicitly assumes that all the words have a similar representation across models. Some researchers suggest only using words that are likely to not be very different between models for the traning of the transformation matrix; like stopwords (). This may produce more pronounced differences.
Despite the approach needing some fine-tuning and experimentation, intial results suggest word vectors represent an interesting avenue for detecting changes in how language is used in the media.
# References
Azarbonyad, Hosein & Dehghani, Mostafa & Beelen, Kaspar & Arkut, Alexandra & Marx, Maarten & Kamps, Jaap. (2017). *Words are Malleable: Computing Semantic Shifts in Political and Media Discourse.* 1509-1518. 10.1145/3132847.3132878.
Kulkarni, Vivek & Al-Rfou, Rami & Perozzi, Bryan & Skiena, Steven. (2015). *Statistically Significant Detection of Linguistic Change.* 625-635. 10.1145/2736277.2741627.
Mikolov, Tomas & Sutskever, Ilya & Chen, Kai & Corrado, G.s & Dean, Jeffrey. (2013). *Distributed Representations of Words and Phrases and their Compositionality.* Advances in Neural Information Processing Systems. 26.
Schakel, Adriaan & Wilson, Benjamin. (2015). *Measuring Word Significance using Distributed Representations of Words.*
Yougov: https://yougov.co.uk/topics/politics/articles-reports/2017/03/07/how-left-or-right-wing-are-uks-newspapers
|
github_jupyter
|
# hide
import matplotlib.pyplot as plt
import numpy as np
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-1, 6)
plt.ylim(-1, 6)
plt.scatter(x = (1,4), y = (5,2))
plt.arrow(x = 0, y = 0, dx = 1, dy = 5, length_includes_head = True, head_width = 0.2, color = 'k')
plt.arrow(x = 0, y = 0, dx = 4, dy = 2, length_includes_head = True, head_width = 0.2, color = 'k')
plt.arrow(x = 1, y = 5, dx = 3, dy = -3, length_includes_head = True, head_width = 0.2, color = 'b', ls = ':')
plt.annotate('Movement in meaning from t to t+1', xy = (1.5,2.5), rotation = -40, size = 12)
plt.annotate('Word representation t', xy = (0,5.1))
plt.annotate('Word representation t+1', xy = (4,2.1))
plt.show()
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-1, 5)
plt.ylim(-1, 5)
plt.scatter(x = (3,5), y = (1,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'g')
plt.arrow(x = 0, y = 0, dx = 5, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.annotate('Direction of change w1 Daily Mail', xy = (2,1.2), size = 12)
plt.annotate('Direction of change w1 Guardian', xy = (2.5, 3), size = 12)
plt.show()
# hide
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-1, 5)
plt.scatter(x = (3,-4), y = (1,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'g')
plt.arrow(x = 0, y = 0, dx = -4, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.annotate('Direction of change w1 Daily Mail', xy = (2,1.2), size = 12)
plt.annotate('Direction of change w1 Guardian', xy = (-4.75, 3.2), size = 12)
plt.show()
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: Russia')
plt.scatter(x = (0,3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = -2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = 0.5, y = 0.9, s = r'$\Delta Russia$ Guardian', fontsize=12, rotation = 43)
plt.text(x = -3, y = -3, s = "market oil\nenergy gas", fontsize=12)
plt.scatter(x = (0,3), y = (0,1))
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = 0.5, y = -0.1, s = r'$\Delta Russia$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -1.2, s = "uk greece\njapan canada", fontsize=12)
plt.text(x = 3.5, y = -1, s = "raid\n\ninvestigate\n\nassault\n\narrest\n\ncrime\n\nscandal\n\nmurder\n\nallegation", fontsize=12)
plt.show()
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: Beijing')
plt.scatter(x = (0,3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = 3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = -2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = 0.5, y = 0.9, s = r'$\Delta Beijing$ Guardian', fontsize=12, rotation = 43)
plt.text(x = 3.5, y = 3, s = "unfair\n\nhonest\n\nnice", fontsize=12)
plt.text(x = -2.5, y = -3, s = "resume\n\npartnership", fontsize=12)
plt.scatter(x = (0,-3), y = (0,-1))
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = -2.5, y = -0.5, s = r'$\Delta Beijing$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -3, s = "service\n\nexport\n\ninvestment\n\nsupplier", fontsize=12)
plt.text(x = 3.5, y = 0, s = "scared\n\nafraid\n\nunfair", fontsize=12)
plt.show()
plt.figure(figsize = (10,8))
plt.grid(True)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.title('Word: China')
plt.scatter(x = (0,-3), y = (0,3))
plt.arrow(x = 0, y = 0, dx = -3, dy = 3, length_includes_head = True, head_width = 0.2, color = 'b')
plt.arrow(x = 0, y = 0, dx = 2, dy = -2, head_width = 0, color = 'b',ls = "--", alpha = .5)
plt.text(x = -2, y = 1, s = r'$\Delta China$ Guardian', fontsize=12, rotation = -43)
plt.text(x = -4.5, y = 2, s = "deserve\n\naccusation\n\ntake action\n\npursue", fontsize=12)
plt.scatter(x = (0,-3), y = (0,-1))
plt.arrow(x = 0, y = 0, dx = -3, dy = -1, length_includes_head = True, head_width = 0.2, color = 'r')
plt.arrow(x = 0, y = 0, dx = 3, dy = 1, head_width = 0, color = 'r', ls = "--", alpha = .5)
plt.text(x = -2.5, y = -0.5, s = r'$\Delta China$ Daily Mail', fontsize=12, rotation = 15)
plt.text(x = -4.5, y = -3, s = "ally\n\nleader\n\nadviser\n\nattacker", fontsize=12)
plt.show()
| 0.357568 | 0.990235 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.