code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
import qiskit as q
from qiskit.tools.visualization import plot_bloch_multivector
from qiskit.visualization import plot_histogram
from matplotlib import style
style.use("dark_background")
%matplotlib inline
# Creating instances for 2 different simulators
statevector_simulator = q.Aer.get_backend("statevector_simulator") # To plot the block vector
qasm_sim = q.Aer.get_backend("qasm_simulator") # To get the distribution outputs
def do_job(circuit):
result = q.execute(circuit, backend = statevector_simulator).result()
statevec = result.get_statevector()
n_qubits = circuit.n_qubits
circuit.measure([i for i in range(n_qubits)], [i for i in range(len(circuit.clbits))])
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
return statevec, counts
circuit = q.QuantumCircuit(2, 2)
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# Collapses to a single value
# Now qubit2 put to superposition state
circuit = q.QuantumCircuit(2, 2)
circuit.h(1) # Hadamard gate puts qubit 1 to superposition
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# Vector rotated on y axis
plot_histogram([counts], legend = ['output'])
# Clearly shows both qubits are entangled
circuit = q.QuantumCircuit(2, 2)
circuit.h(0) # Hadamard gate puts qubit 0 to superposition
circuit.cx(0,1) # cnot to qubit 1
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# Vector collapses to the centre
# Change in one clearly affects the other
plot_histogram([counts], legend = ['output'])
# Superposition states and two are entangled so 50 % results
# Now qubit2 put to superposition state
circuit = q.QuantumCircuit(2, 2)
circuit.h(0) # Hadamard gate puts qubit 0 to superposition
circuit.x(1) # cnot to qubit 1
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# One is rotated along y axis
# Another is rotated again due to not gate
plot_histogram([counts], legend = ['output'] )
# 3 qubits architecture
circuit = q.QuantumCircuit(3, 3)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.ccx(0, 1, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.draw()
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# qubits in reverse
# 2nd bar 001 signifies q2 =1, q1=q0=0
# Proving that below
# Probability of value being 1 for q1 and q0 is 50%
# Probability of value being 1 for q2 is 0.5 * 0.5 = 0.25 = 25%
# 75% times it must be a zero
# Proof
circuit = q.QuantumCircuit(3, 1)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.ccx(0, 1, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.measure([2], [0]) # measuring qubit2 and mapping it to a classical bit
circuit.draw()
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend = ['output'])
# Probability of getting a zero = 75% and getting a one is 25%
# Ignore quantum noise
# 3 qubits rotation
import math
circuit = q.QuantumCircuit(3, 3)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.rx(math.pi/4, 2) # rotate qubit 2 by pi/4
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# Here you see all possible combinations and it is not a quantum noise
# Checking out probability of 0 & 1 for qubit2
circuit = q.QuantumCircuit(3, 1)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.rx(math.pi/4, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.measure([2], [0]) # measuring qubit2 and mapping it to a classical bit
circuit.draw()
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend = ['output'])
# Controlled rotation along y axis
# List of gates : https://qiskit.org/documentation/apidoc/circuit.html#gates-and-instructions
#github link to source code of quantum gates : github.com/Qiskit/qiskit-terra/tree/master/qiskit/extensions/standard
```
|
github_jupyter
|
import qiskit as q
from qiskit.tools.visualization import plot_bloch_multivector
from qiskit.visualization import plot_histogram
from matplotlib import style
style.use("dark_background")
%matplotlib inline
# Creating instances for 2 different simulators
statevector_simulator = q.Aer.get_backend("statevector_simulator") # To plot the block vector
qasm_sim = q.Aer.get_backend("qasm_simulator") # To get the distribution outputs
def do_job(circuit):
result = q.execute(circuit, backend = statevector_simulator).result()
statevec = result.get_statevector()
n_qubits = circuit.n_qubits
circuit.measure([i for i in range(n_qubits)], [i for i in range(len(circuit.clbits))])
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
return statevec, counts
circuit = q.QuantumCircuit(2, 2)
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# Collapses to a single value
# Now qubit2 put to superposition state
circuit = q.QuantumCircuit(2, 2)
circuit.h(1) # Hadamard gate puts qubit 1 to superposition
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# Vector rotated on y axis
plot_histogram([counts], legend = ['output'])
# Clearly shows both qubits are entangled
circuit = q.QuantumCircuit(2, 2)
circuit.h(0) # Hadamard gate puts qubit 0 to superposition
circuit.cx(0,1) # cnot to qubit 1
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# Vector collapses to the centre
# Change in one clearly affects the other
plot_histogram([counts], legend = ['output'])
# Superposition states and two are entangled so 50 % results
# Now qubit2 put to superposition state
circuit = q.QuantumCircuit(2, 2)
circuit.h(0) # Hadamard gate puts qubit 0 to superposition
circuit.x(1) # cnot to qubit 1
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
# One is rotated along y axis
# Another is rotated again due to not gate
plot_histogram([counts], legend = ['output'] )
# 3 qubits architecture
circuit = q.QuantumCircuit(3, 3)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.ccx(0, 1, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.draw()
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# qubits in reverse
# 2nd bar 001 signifies q2 =1, q1=q0=0
# Proving that below
# Probability of value being 1 for q1 and q0 is 50%
# Probability of value being 1 for q2 is 0.5 * 0.5 = 0.25 = 25%
# 75% times it must be a zero
# Proof
circuit = q.QuantumCircuit(3, 1)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.ccx(0, 1, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.measure([2], [0]) # measuring qubit2 and mapping it to a classical bit
circuit.draw()
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend = ['output'])
# Probability of getting a zero = 75% and getting a one is 25%
# Ignore quantum noise
# 3 qubits rotation
import math
circuit = q.QuantumCircuit(3, 3)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.rx(math.pi/4, 2) # rotate qubit 2 by pi/4
statevec, counts = do_job(circuit)
plot_bloch_multivector(statevec)
plot_histogram([counts], legend = ['output'])
# Here you see all possible combinations and it is not a quantum noise
# Checking out probability of 0 & 1 for qubit2
circuit = q.QuantumCircuit(3, 1)
circuit.h(0) # Hadamard (qubit 0 in superposition)
circuit.h(1) # Hadamard (qubit 1 in superposition)
circuit.rx(math.pi/4, 2) # control control not (qubit 0 & 1 controls qubit 2)
circuit.measure([2], [0]) # measuring qubit2 and mapping it to a classical bit
circuit.draw()
qasm_job = q.execute(circuit, backend = qasm_sim, shots = 1024).result()
counts = qasm_job.get_counts()
plot_histogram([counts], legend = ['output'])
# Controlled rotation along y axis
# List of gates : https://qiskit.org/documentation/apidoc/circuit.html#gates-and-instructions
#github link to source code of quantum gates : github.com/Qiskit/qiskit-terra/tree/master/qiskit/extensions/standard
| 0.805479 | 0.890342 |
# X2Paddle快速上手——TensorFlow迁移至PaddlePaddle
***X2Paddle简介***:X2Paddle支持将Caffe/TensorFlow/ONNX/PyTorch深度学习框架训练得到的模型,迁移至PaddlePaddle模型。
***X2Paddle代码GitHub链接***:[https://github.com/PaddlePaddle/X2Paddle](https://github.com/PaddlePaddle/X2Paddle)
***【注意】***前往GitHub给[X2Paddle](https://github.com/PaddlePaddle/X2Paddle)点击Star,关注项目,即可随时了解X2Paddle的最新进展。
本教程用于帮助用户学习将TensorFlow训练后的预测模型迁移至PaddlePaddle框架,以TensorFlow版本的[MobileNetV1](https://github.com/tensorflow/models/tree/master/research/slim)为例进行详细介绍。
## 安装及准备
### 1. 安装X2Paddle
***方式一:(推荐)***
```
! git clone https://github.com/PaddlePaddle/X2Paddle.git
! cd X2Paddle
! git checkout develop
! python setup.py install
```
***方式二:***
```
! pip install x2paddle==1.0.1 --index https://pypi.Python.org/simple/
```
### 2. 安装TensorFlow
```
! pip install tensorflow==1.14.0
```
### 3. 安装PaddlePaddle
```
! pip install paddlepaddle==2.0.1
```
## 模型迁移
### 1. 获取MobileNetV1的FrozenModel
由于X2Paddle只支持TensorFlow中FrozenModel的转换,如果为纯checkpoint模型,需要参考参考X2Paddle官方[文档](https://github.com/PaddlePaddle/X2Paddle/blob/release-1.1/docs/user_guides/export_tf_model.md),将其转换为FrozenModel,本示例中提供的模型为FrozenModel,所以无需转换。
```
! wget http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz
! tar zxvf mobilenet_v1_0.25_128.tgz
```
### 2. 转换
需要传入的参数如下:
> --framework (-f):源模型类型,此处设置为tensorflow。
> --save_dir (-s):指定转换后的模型保存目录路径。
> --model (-m):指定tensorflow的pb模型。
> --paddle_type (-pt):指定转换为动态图代码(dygraph)或者静态图代码(static),默认为dygraph。
***方式一:***生成静态图代码,并保存成静态图预测模型
```
! x2paddle -f tensorflow -m ./mobilenet_v1_0.25_128_frozen.pb -s pd_model_static -pt static
```
***方式二:***生成动态图代码,并保存成静态图预测模型
```
! x2paddle -f tensorflow -m ./mobilenet_v1_0.25_128_frozen.pb -s pd_model_dygraph -pt dygraph
```
## PaddlePaddle模型使用
使用方式一转换得到的PaddlePaddle预测模型进行预测:
(1)下载ImageNet类别文件
```
! wget https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt
```
(2)预测
```
# 构造输入
import cv2
import numpy as np
img = cv2.imread("dog_tf.png").astype("float32") / 255.0
img = np.expand_dims(img, 0)
img -= 0.5
img *= 2.0
# 进行预测
import paddle
import numpy as np
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
[prog, inputs, outputs] = paddle.static.load_inference_model(path_prefix="pd_model_static/inference_model",
executor=exe,
model_filename="model.pdmodel",
params_filename="model.pdiparams")
result = exe.run(prog, feed={inputs[0]: img}, fetch_list=outputs)
max_index = np.argmax(result[0])
with open('imagenet_classes.txt') as f:
classes = [line.strip() for line in f.readlines()]
print("The category of dog.jpg is: {}".format(classes[max_index]))
```
|
github_jupyter
|
! git clone https://github.com/PaddlePaddle/X2Paddle.git
! cd X2Paddle
! git checkout develop
! python setup.py install
! pip install x2paddle==1.0.1 --index https://pypi.Python.org/simple/
! pip install tensorflow==1.14.0
! pip install paddlepaddle==2.0.1
! wget http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz
! tar zxvf mobilenet_v1_0.25_128.tgz
! x2paddle -f tensorflow -m ./mobilenet_v1_0.25_128_frozen.pb -s pd_model_static -pt static
! x2paddle -f tensorflow -m ./mobilenet_v1_0.25_128_frozen.pb -s pd_model_dygraph -pt dygraph
! wget https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt
# 构造输入
import cv2
import numpy as np
img = cv2.imread("dog_tf.png").astype("float32") / 255.0
img = np.expand_dims(img, 0)
img -= 0.5
img *= 2.0
# 进行预测
import paddle
import numpy as np
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
[prog, inputs, outputs] = paddle.static.load_inference_model(path_prefix="pd_model_static/inference_model",
executor=exe,
model_filename="model.pdmodel",
params_filename="model.pdiparams")
result = exe.run(prog, feed={inputs[0]: img}, fetch_list=outputs)
max_index = np.argmax(result[0])
with open('imagenet_classes.txt') as f:
classes = [line.strip() for line in f.readlines()]
print("The category of dog.jpg is: {}".format(classes[max_index]))
| 0.395951 | 0.954605 |
# Worksheet 1.2.0: Modules
<div class="alert alert-block alert-info">
This worksheet implements to-do markers where work needs to be completed. In some cases, this means that you'll need to add a line or two to an example. In other cases (such as the final exercise), you may need to solve an entire problem.
</div>
## Totally modular, bruh
To this point, we've relied on either "built-in" or self-written code to create and run our programs. However, the universe of Python is mighty, and there are many ways that we can augment our programs by bringing in _other_ functionality from what are called _modules_.
You may ask: "Why don't we just have all of this stuff from the beginning?" It's a good question.
Without delving into all sorts of discussions about application performance, "bloatware," and general difficulties with the idea of "what do we include," it's enough to say that we don't need every bit of functionality _all the time_. For example, we haven't had to use any random numbers to this point, right?
Today we will. To do so, we need to `import` the module by name. That's it:
```python
import random
```
The above is just Markdown, though, so you need to do it below.
### 1. `import` the `random` module below.
```
# TODO
```
## Using the module
OK. Now that we have that out of the way, we can _use it_. Doing so is similar how we used `string`s last week; `random` contains a bunch of methods, and we use the `dot notation` syntax to access them.
| Method | Argument(s) |Effect |
|--------|-------------|-------|
|`.choice()`|Sequence or data structure to choose from| Chooses a random element from the argument(s) provided |
|`.randint()`|Two `int`egers |Selects a random value in the range of the two provided `int`egers |
|`.random()`|None | Generates a random number between `0.0` and `1.0` |
|`.sample()`|Sequence or data structure, "k" value (`int`) (number of items to choose| Returns a random sample (no repeats) from the arugment(s) provided |
|`.shuffle()`* |Sequence or data structure to "shuffle" | Rearranges the order of the argument(s) provided |
`*`: Note that this _will permanently alter the list_.
Here, when using the `random` module, we always refer to it as the `random` object, like so:
```
# Run this cell a few times to see how the values of the numbers change
print(f"A random number between 0.0 and 1.0: {random.random()}")
print(f"A random number from 1-100: {random.randint(1,100)}")
```
Many of these methods have somewhat specific applications -- namely something like `sample`, often used to quickly demonstrate what a random sampling of a given sequence or data structure might be. This might seem equivalent to `choice`, but the outcomes differ:
```
# On any given day, I call my cat these names -- as you know
cat_names = ["Ulysses", "The Boss", "Mr. U", "Snooze Magoo", "Mane Man", "The Bug", "Buddy Bug Man"]
# Again, run this cell multiple times to see how the values change
print(f"A random sample of any 3 names: {random.sample(cat_names,3)}")
print(f"A random choice of any name: {random.choice(cat_names)}")
```
The difference here is slight (one returns a `string`, the other a `list` of values), but the implications for any code are significant -- we have to handle what we get differently.
### 2. Generate random numbers between the following ranges:
`print` the result of each operation.
```
# TODO: Generate number between 10 - 100
# TODO: Generate number between 3 - 76
# TODO: Generate number between 20 - 80
```
### 3. `shuffle` the `cat_names` `list`
```
# Observe how the list changes
print(f"Before shuffle: {cat_names}")
# TODO: Shuffle list
print(f"After shuffle: {cat_names}")
```
## `datetime`
Usually when we think about dates and times, they're essentially `string`s in our minds -- take today for instance: we'd normally think about it as "Wednesday, the 27<sup>th</sup> of January, 2021." However, many of us have seen the math tricks which somehow make dates _computable_. Many of us don't want to occupy our time recalling dates in the past or forecasting what day of the week 27<sup>th</sup> of January, 2121 falls on -- so, we have a `module` that can help us with that or, even, tell us more about the _current week_.
To acquire these powers -- abilities which we _don't always need_ (hence its place in a `module`), we run into another special issue: there are many parts to the `datetime` `module`. We could import them all (bringing the entirety of `datetime` into our program), or we could request only the portions we need, say, the ability to use `date`, and `timedelta` (the ability to do date "math") `objects` only. To do this, we would:
```python
from datetime import date
from datetime import timedelta
```
Perform these `import`s in the cell below.
```
# TODO
```
`module`s can work differently based on how they're made, and `datetime` is a good example of this. While it still follows our "rules," the rabbit hole goes a bit deeper with this one. For example, `module`s can create `objects` which have various `properties`, such as the date and time _right now_:
```
today = datetime.now()
today # <-- I do this just to show what the object _looks like_
```
The above creates a `datetime` object representing _right now_. It has the following properties:
* year
* month
* day
* hour
* minute
* ...
You get the point. But how do we access these? They're not quite `method`s -- I do refer to them as `properties` -- so they have a bit of a different syntax which loses the `()`:
```
print(today.year)
print(today.month)
print(today.day)
```
Because a `property` doesn't exactly have an `argument` (how could you argue with today's year, for example?) there are no `parameters` or `arguments`. Instead, we're actually calling a variable directly from `datetime`, a behavior which we'll see when we talk more about making `object`s.
Note that we still follow _dot notation_:
$$ object.property $$
format.
That doesn't mean we don't have `method`s available to us. Here, I'll instead focus on a particular group and demonstrate how to use them. Then, I'll ask you to do some date math below.
### All day
By default, Python represents days of the week by a `0-indexed` integer, meaning that:
|`integer`| Day |
|---------|-----|
|0 | Monday|
|1 | Tuesday|
|2 | Wednesday|
|3 | Thursday|
|4 | Friday|
|5 | Saturday|
|6 | Sunday |
Today would use `datetime.now()` to establish the basic data, and then make `.weekday()` available to us:
```
today = datetime.now()
print(today.weekday())
```
If we wanted to be start being _truly "Pythonic"_ about it, we would "chain" them together:
```
day_of_week = datetime.now().weekday()
print(day_of_week)
```
If we revisit `string`s for a bit, we can see more of what I mean:
```
example = "Today is a day of the week like any other."
print(example.upper().replace(".","").split()) # <-- I mean...why? But, everything worth doing is worth overdoing.
```
Of course, this is subject to comfortability. I bring this up here because you'll see steps combined more often than not when reading others' code.
And, I digress -- back to `datetime`.
Having days as numbers is great when we want to do math, like find out the date for last Monday, regardless of where we are in the week:
```
past_monday = today - timedelta(today.weekday())
print(past_monday)
```
(This works because Monday is `0`, hence today -- `2` -- sutracted from itself is `0`, or Monday.)
We invoke the `timedelta` method directly from our `from datetime` import to do the "delta" (or "difference") in times. We could even _add_ time by using an integer to represent `7` days from now.
```
next_monday = past_monday + timedelta(7)
print(next_monday)
```
And that's great. But, what's not so great is that format. We have the ability to do a bit better using some formatting specific to `datetime` through the `strftime` method (or "string to formatted time"):
```
print(past_monday.strftime("%A, %d %B, %Y"))
print(next_monday.strftime("%A, %d %B, %Y"))
```
As with anything specific to a module (such as expected arguments), this comes from reading into _how_ Python implements their dates. Here's a key to the above:
|Symbol | Purpose |
|-------|---------|
|`%A` | Full text day of week |
|`%d` | Two digit day |
|`%B` | Full text month |
|`%Y` | Four digit year |
(A fuller table is [located here](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes).)
### 4. Calculate (and `print`) the following dates using today as a basis
* Last Tuesday
* Next Friday
* One year from now
* 21 years ago
* 100 years into the future
`print` each using `strftime` -- you might see what different combinations (uppercase/lowercase formatting symbols) do -- consult the table linked above.
```
# TODO
```
## Finishing this activity
Challenge yourself with the [final activity](f1_week-1-worksheet-agenda.md).
|
github_jupyter
|
import random
# TODO
# Run this cell a few times to see how the values of the numbers change
print(f"A random number between 0.0 and 1.0: {random.random()}")
print(f"A random number from 1-100: {random.randint(1,100)}")
# On any given day, I call my cat these names -- as you know
cat_names = ["Ulysses", "The Boss", "Mr. U", "Snooze Magoo", "Mane Man", "The Bug", "Buddy Bug Man"]
# Again, run this cell multiple times to see how the values change
print(f"A random sample of any 3 names: {random.sample(cat_names,3)}")
print(f"A random choice of any name: {random.choice(cat_names)}")
# TODO: Generate number between 10 - 100
# TODO: Generate number between 3 - 76
# TODO: Generate number between 20 - 80
# Observe how the list changes
print(f"Before shuffle: {cat_names}")
# TODO: Shuffle list
print(f"After shuffle: {cat_names}")
from datetime import date
from datetime import timedelta
# TODO
today = datetime.now()
today # <-- I do this just to show what the object _looks like_
print(today.year)
print(today.month)
print(today.day)
today = datetime.now()
print(today.weekday())
day_of_week = datetime.now().weekday()
print(day_of_week)
example = "Today is a day of the week like any other."
print(example.upper().replace(".","").split()) # <-- I mean...why? But, everything worth doing is worth overdoing.
past_monday = today - timedelta(today.weekday())
print(past_monday)
next_monday = past_monday + timedelta(7)
print(next_monday)
print(past_monday.strftime("%A, %d %B, %Y"))
print(next_monday.strftime("%A, %d %B, %Y"))
# TODO
| 0.151059 | 0.954984 |
<body class="stackedit">
<div class="stackedit__html"><h2 id="background">Background</h2>
<p>The aim of data is to see what parameters are present.</p>
<ol>
<li>From 2013 to 2018, how many people and businesses paid taxes?</li>
<li>How many male and female taxpayers are there?</li>
<li>In which year did the government raise more tax and in which year did it collect less tax?</li>
<li>Which province of Pakistan has the highest tax payers?</li>
<li>Which city in Pakistan has the highest number of tax payers?</li>
<li>In which year did Pakistan’s government earn billions of rupees in revenue?</li>
<li>A comparison of individual taxpayers and businesses.</li>
</ol>
</div>
</body>
<body class="stackedit">
<div class="stackedit__html"><h2 id="summary">Summary</h2>
<ul>
<li>The data comprises from 2013 to 2018.</li>
<li>The data obtained from <strong>Kaggle</strong> does not meet the criteria for analysis.</li>
<li>The data reveals that:<br>
i) Individuals account for <em><strong>42%</strong></em> of tax payers, while businesses account for <strong>58%</strong>.<br>
ii) Individuals made up the majority of tax payers from <em>2013</em> to <em>2016</em>, but businesses made up the majority of tax payers from <em>2017</em> to <em>2018</em>.<br>
iii) Individual tax payers have been decresing since <em>2016</em> or data is unavailable.<br>
iv) According to the numbers, the city of “<strong>Dadu</strong>” is Pakistan’s highest tax payer.<br>
v) According to the report, <em><strong>85.5%</strong></em> of taxpayers are <strong>Male</strong> and <em><strong>14.5%</strong></em> are <strong>Female</strong>.<br>
vi) The data shows that the province of Punjab has the highest percentage of tax payers (<strong>49.3%</strong>), followed by Sindh (<strong>36.4%</strong>),and Gilgit is lowest tax payer area which is <strong>0.1%</strong>.<br>
vii) The data shows that the growth rate of the Pakistani economy is increasing, but in <em>2015</em> it decreased to <strong>9.88%</strong>, but in the following year, <em>2016</em>, it increased to <strong>48.08%</strong>, the highest in this period.</li>
</ul>
</div>
</body>
Datasets is downloaded from here: https://drive.google.com/file/d/1nLHJFxVIilwaLSzOjz88Pd-UfyIbEUWB/view?usp=sharing
```
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from collections import Counter
import folium
from folium.plugins import MarkerCluster
import warnings
warnings.filterwarnings("ignore")
```
Import all the csv files
```
df_2013 = pd.read_csv('2013.csv', encoding = "ISO-8859-1")
df_2014 = pd.read_csv('2014.csv', encoding = "ISO-8859-1")
df_2015 = pd.read_csv('2015.csv', encoding = "ISO-8859-1")
df_2016_1 = pd.read_csv('2016(1).csv', encoding = "ISO-8859-1")
df_2016_2 = pd.read_csv('2016(2).csv', encoding = "ISO-8859-1")
df_2017_1 = pd.read_csv('2017(1).csv', encoding = "ISO-8859-1")
df_2017_2 = pd.read_csv('2017(2).csv', encoding = "ISO-8859-1")
df_2018_1 = pd.read_csv('2018(1).csv', encoding = "ISO-8859-1")
df_2018_2 = pd.read_csv('2018(2).csv', encoding = "ISO-8859-1")
df_2018_3 = pd.read_csv('2018(3).csv', encoding = "ISO-8859-1")
# All Dataframes shape save in a dict
def shape_of_dataframes(years, years_df):
dict = {}
lst_years = list(map(lambda yr:yr[:4],years))
counter_yr = Counter(lst_years)
count = 0
for k, v in counter_yr.items():
if v > 1:
df = pd.concat(years_df[count:count+v])
rows, cols = df.shape
dict[k] = [rows,cols,v]
count+=v
else:
df = years_df[count]
rows, cols = df.shape
dict[k] = [rows,cols,v]
count+=1
return dict
```
# EDA
```
#Rename the columns name of all datasets
def renameColumns(years_df):
for df in years_df:
df.rename(columns = {df.columns[0]:'Name', df.columns[1]:'NTN', df.columns[2]:'TaxPaid'}, inplace = True)
def cleaningPart1(years_df):
for df in years_df:
df['NTN'] = df['NTN'].replace(r"[^A-Za-z0-9 ]+", "", regex=True)
df["NTN"].fillna("0", inplace = True)
df['TaxPaid'] = df['TaxPaid'].replace(r"[^0-9\d\.]+", "", regex=True)
df['TaxPaid'] = pd.to_numeric(df['TaxPaid'], errors='coerce')
df["TaxPaid"].replace([np.inf, -np.inf], np.nan)
df.drop(df[df["TaxPaid"]<1].index, inplace = True)
df.drop(df[df["TaxPaid"].isnull()].index, inplace = True)
df['Name'] = df['Name'].replace(r"[^A-Za-z0-9 ]+", "", regex=True)
df['Name'].fillna("Unknown", inplace = True)
#Some Rows of Name and NTN swap each other
index = df.loc[df['Name'].str.isdigit()].index
for idx in index:
df.loc[idx,['Name','NTN']] = df.loc[idx,['NTN','Name']].values
df['NTN'] = df['NTN'].replace(r"[^0-9]+", "", regex=True)
df["NTN"] = df["NTN"].astype(str)
#Convert 8 digit NTN into 7 digit
index = df[df["NTN"].str.len()==8].index
df['NTN'][index] = df["NTN"][index].str[:-1]
#Drop NTN Outliers
index = df[(df["NTN"].str.len()==9) | (df["NTN"].str.len()==11) | (df["NTN"].str.len()==12)].index
df.drop(index, inplace = True)
def taxRecordsPerYear(years_dict,years_df):
dict = {}
count = 0
for key,val in years_dict.items():
if(val[2]>1):
v = val[2]
df = pd.concat(years_df[count:count+v])
dict[key] = sum(df["TaxPaid"])
count+=v
else:
df = years_df[count]
dict[key] = sum(df["TaxPaid"])
count+=1
return dict
def millify_by_dict(tax_dict):
dict = {}
for key,val in tax_dict.items():
millnames = ['','Thousand','Million','Billion','Trillion']
n = float(val)
millidx = max(0,min(len(millnames)-1,
int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
m = round(n / 10**(3 * millidx))
dict[key] = [m, millnames[millidx]]
return dict
def millify_by_records(tax_record):
millnames = ['','K','M','B','T']
n = float(tax_record)
millidx = max(0,min(len(millnames)-1,
int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
m = round(n / 10**(3 * millidx))
return str(m)+str(millnames[millidx])
def adding_column_year(years,years_df):
count = 0
for df in years_df:
df["years"] = years[count][:4]
count+=1
def detect_outliers(dataframe):
Q1 = dataframe['TaxPaid'].quantile(0.25)
Q3 = dataframe['TaxPaid'].quantile(0.75)
IQR = Q3 - Q1
minimum = Q1 - 1.5 * IQR
maximum = Q3 + 1.5 * IQR
outliers = (dataframe['TaxPaid'] < minimum) | (dataframe['TaxPaid'] > maximum)
dataframe['TaxPaid'].loc[dataframe['TaxPaid'] < minimum] = dataframe['TaxPaid'].loc[~outliers].min()
dataframe['TaxPaid'].loc[dataframe['TaxPaid'] > maximum] = dataframe['TaxPaid'].loc[~outliers].max()
years_df = [df_2013, df_2014, df_2015, df_2016_1, df_2016_2, df_2017_1, df_2017_2, df_2018_1, df_2018_2, df_2018_3]
years = ["2013", "2014", "2015", "2016_1", "2016_2", "2017_1", "2017_2", "2018_1", "2018_2", "2018_3"]
years_dict = shape_of_dataframes(years, years_df)
years_dict
renameColumns(years_df)
cleaningPart1(years_df)
tax_dict = taxRecordsPerYear(years_dict,years_df)
millify_by_dict(tax_dict)
adding_column_year(years,years_df)
df = pd.concat(years_df)
df_other = df.loc[(df["NTN"].str.len()==1)]
df_companies = df.loc[(df["NTN"].str.len()==7) | (df["NTN"].str.len()==10)]
df_individuals = df.loc[df["NTN"].str.len()==13]
detect_outliers(df_companies)
detect_outliers(df_individuals)
df_individuals_max = df_individuals["TaxPaid"].max()
df_companies_max = df_companies["TaxPaid"].max()
df_other["Category"] = np.NAN
for index, row in df_other.iterrows():
if row['TaxPaid'] <= df_individuals_max:
df_other.at[index, 'Category'] = 1
elif (row['TaxPaid'] > df_individuals_max) and (row['TaxPaid'] <= df_companies_max):
df_other.at[index, 'Category'] = 0
df_other['Category'].dropna(inplace=True)
df_companies["Category"] = 0
df_individuals["Category"] = 1
df = pd.concat([df_other,df_companies,df_individuals])
df_companies = df.loc[df["Category"] == 0]
df_individuals = df.loc[df["Category"] == 1]
df_companies.to_csv("df_companies.csv",index=False)
df_individuals.to_csv("df_individuals.csv",index=False)
```
# Visualization
```
df_companies = pd.read_csv('df_companies.csv')
df_individuals = pd.read_csv('df_individuals.csv')
df = pd.concat([df_companies,df_individuals])
df_yt = df[['years','TaxPaid']]
def taxes_per_year():
years = np.sort(df['years'].unique())
co_tax = []
indiv_tax = []
for yr in years:
co_yr = df_companies[df_companies["years"] == yr]
co_yr_sum = round(co_yr['TaxPaid'].sum())
indiv_yr = df_individuals[df_individuals["years"] == yr]
indiv_yr_sum = round(indiv_yr['TaxPaid'].sum())
if co_yr_sum > 0:
co_tax.append(co_yr_sum)
else:
co_tax.append(0)
if indiv_yr_sum > 0:
indiv_tax.append(indiv_yr_sum)
else:
indiv_tax.append(0)
return years, co_tax, indiv_tax
```
Graph 1 depicts the amount of money raised by the Pakistani government in billions of rupees.
```
labels, companies, individuals = taxes_per_year()
xs = np.arange(len(labels))
ys = np.array(companies) + np.array(individuals)
fig, ax = plt.subplots(figsize=(16, 8), sharey=True)
ax.plot(xs,ys,'o-', color='Brown')
previous_revenue = 0
for x,y in zip(xs,ys):
if(previous_revenue != 0 ):
growth = round((y - previous_revenue) / previous_revenue * 100, 2)
else:
growth = 0
ax.annotate("Rs: " + millify_by_records(y) + " Growth: " + str(growth) + "%",
(x,y),
textcoords="offset points",
xytext=(0,10),
ha='center')
previous_revenue = y
ax.set_xticks(xs)
ax.set_xticklabels(labels)
ax.legend();
plt.show();
```
The growth rate of the Pakistani economy is depicted in graph 2.
```
labels, companies, individuals = taxes_per_year()
x = np.arange(len(labels))
width = 0.35
y = np.array(companies) + np.array(individuals)
previous_revenue = 0
yg = []
for val in y:
if previous_revenue != 0:
growth = round((val - previous_revenue) / previous_revenue * 100, 2)
yg.append(growth)
else:
yg.append(0)
previous_revenue = val
fig, ax = plt.subplots(figsize=(16, 8), sharey=True)
bar = ax.bar(x - width/2, yg, width, label='Growth', color='Brown')
ax.set_ylabel('Tax')
ax.set_title('Revenue Growth')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
for b in bar:
height = b.get_height()
label_x_pos = b.get_x() + b.get_width() / 2
ax.text(label_x_pos, height, s=(str(height)+"%"), ha='center', va='bottom')
plt.show()
```
Individuals and businesses paid taxes to the government, as seen in graph 3.
```
companies_tax = round(sum(df_companies['TaxPaid']))
individuals_tax = round(sum(df_individuals['TaxPaid']))
labels = 'Company', 'Individuals'
sizes = [companies_tax,individuals_tax]
explode = (0, 0.1)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax.axis('equal')
fig.set_size_inches(10, 6)
plt.show()
```
Individuals and businesses are compared in graph 4.
```
labels, companies, individuals = taxes_per_year()
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(1,2, figsize=(16, 6), sharey=True)
co_bar = ax[0].bar(x - width/2, companies, width, label='Companies')
indiv_bar = ax[0].bar(x + width/2, individuals, width, label='Individuals')
ax[0].set_ylabel('Tax')
ax[0].set_title('Company and Individuals Tax Per Year')
ax[0].set_xticks(x)
ax[0].set_xticklabels(labels)
ax[0].legend()
for bar in co_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
for bar in indiv_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
ax[1].plot(x - width/2, companies,'o-', width, label='Companies', color='Blue')
ax[1].plot(x + width/2, individuals,'o-', width, label='Individuals', color='Orange')
for x_val,y_val in zip(x,companies):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
for x_val,y_val in zip(x,individuals):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
ax[1].set_ylabel('Tax')
ax[1].set_title('Company and Individuals Tax Per Year')
ax[1].set_xticks(x)
ax[1].set_xticklabels(labels)
ax[1].legend()
plt.show()
df_individuals['Province'] = df_individuals['NTN'].astype(str).str[0]
df_individuals['City'] = df_individuals['NTN'].astype(str).str[1]
df_individuals['City'].fillna(0, inplace=True)
df_individuals.loc[pd.to_numeric(df_individuals['NTN'].astype(str).str[-1])%2==0, 'Gender'] = 0
df_individuals.loc[pd.to_numeric(df_individuals['NTN'].astype(str).str[-1])%2==1, 'Gender'] = 1
def gender_pay_tax_per_year(df):
years = np.sort(df['years'].unique())
male = []
female = []
for yr in years:
male_yr = len(df[(df['years'] == yr) & (df['Gender'] == 1)])
female_yr = len(df[(df['years'] == yr) & (df['Gender'] == 0)])
if male_yr > 0:
male.append(male_yr)
else:
male.append(0)
if female_yr > 0:
female.append(female_yr)
else:
female.append(0)
return years, male, female
```
Male and female tax payers are depicted in graph 5.
```
male = len(df_individuals.loc[df_individuals['Gender']==1])
female = len(df_individuals.loc[df_individuals['Gender']==0])
labels = 'Male', 'Female'
sizes = [male, female]
explode = (0, 0.1)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, colors = ['Green', 'Red'])
ax.axis('equal')
fig.set_size_inches(10, 6)
plt.show()
labels, male, female = gender_pay_tax_per_year(df_individuals)
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots(1,2, figsize=(16, 6), sharey=True)
male_bar = ax[0].bar(x - width/2, male, width, label='Male', color='Green')
female_bar = ax[0].bar(x + width/2, female, width, label='Female', color='Red')
ax[0].set_ylabel('Tax')
ax[0].set_title('Male and Female Pay Tax Per Year')
ax[0].set_xticks(x)
ax[0].set_xticklabels(labels)
ax[0].legend()
for bar in male_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
for bar in female_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
ax[1].plot(x - width/2, male,'o-', width, label='Male', color='Green')
ax[1].plot(x + width/2, female,'o-', width, label='Female', color='Red')
for x_val,y_val in zip(x, male):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
for x_val,y_val in zip(x, female):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
ax[1].set_ylabel('Tax')
ax[1].set_title('Male and Female Pay Tax Per Year')
ax[1].set_xticks(x)
ax[1].set_xticklabels(labels)
ax[1].legend()
plt.show()
df_cities = pd.read_csv('df_cities.csv')
df_individuals[['Province', 'City']] = df_individuals[['Province', 'City']].apply(pd.to_numeric)
df_individuals['Province'].loc[(df_individuals['Province']==0) | (df_individuals['Province']==8) | (df_individuals['Province']==9)] = np.NAN
df_individuals['City'].loc[df_individuals['City']==0] = np.NAN
df_individuals['Province'].bfill(axis ='rows', inplace=True)
df_individuals['Province'].ffill(axis ='rows', inplace=True)
df_individuals['City'].bfill(axis ='rows', inplace=True)
df_individuals['City'].ffill(axis ='rows', inplace=True)
df_indiv_locations = pd.merge(df_individuals, df_cities, on=['Province','City'], how='left')
```
Graph 6 depicts the amount of government tax collected by city.
```
sr = df_indiv_locations.groupby('CityName')['TaxPaid'].agg('sum')
dict1 = sr.to_dict()
fig = plt.figure(figsize=(12,6))
ax = fig.add_axes([0,0,1,1])
k = dict1.keys()
v = dict1.values()
ax.bar(k,v)
plt.xticks(rotation='vertical')
plt.show()
```
Graph 7 depicts the amount of government tax collected by province.
```
sr = df_indiv_locations.groupby('Province')['TaxPaid'].agg('sum')
dict1 = sr.to_dict()
k = dict1.keys()
v = dict1.values()
labels = 'Balochistan', 'KPK', 'Punjab', 'Sindh', 'Islamabad', 'AJK', 'Gilgit'
sizes = v
explode = (0, 0, 0, 0, 0, 0, 0)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax.axis('equal')
fig.set_size_inches(16,10)
plt.show()
df_pk = pd.read_csv('pakistan.csv')
df_pk.drop(['id'], axis=1, inplace=True)
df_pk.rename(columns = {df_pk.columns[0]:'CityName', df_pk.columns[1]:'Latitude', df_pk.columns[2]:'Longitude'}, inplace = True)
df_indiv_locations = pd.merge(df_indiv_locations, df_pk, on=['CityName'], how='left')
df_city = df_indiv_locations.drop_duplicates(subset ="CityName")
```
#### Map
```
df1 = df_city.dropna()
locations = df1[['Latitude', 'Longitude']]
locationlist = locations.values.tolist()
map1 = folium.Map(location=[30.3753, 69.3451], zoom_start=5)
marker_cluster = MarkerCluster().add_to(map1)
for point in range(0, len(locationlist)):
folium.Marker(locationlist[point], popup=df1['CityName'].iloc[point]).add_to(marker_cluster)
map1
```
|
github_jupyter
|
import math
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from collections import Counter
import folium
from folium.plugins import MarkerCluster
import warnings
warnings.filterwarnings("ignore")
df_2013 = pd.read_csv('2013.csv', encoding = "ISO-8859-1")
df_2014 = pd.read_csv('2014.csv', encoding = "ISO-8859-1")
df_2015 = pd.read_csv('2015.csv', encoding = "ISO-8859-1")
df_2016_1 = pd.read_csv('2016(1).csv', encoding = "ISO-8859-1")
df_2016_2 = pd.read_csv('2016(2).csv', encoding = "ISO-8859-1")
df_2017_1 = pd.read_csv('2017(1).csv', encoding = "ISO-8859-1")
df_2017_2 = pd.read_csv('2017(2).csv', encoding = "ISO-8859-1")
df_2018_1 = pd.read_csv('2018(1).csv', encoding = "ISO-8859-1")
df_2018_2 = pd.read_csv('2018(2).csv', encoding = "ISO-8859-1")
df_2018_3 = pd.read_csv('2018(3).csv', encoding = "ISO-8859-1")
# All Dataframes shape save in a dict
def shape_of_dataframes(years, years_df):
dict = {}
lst_years = list(map(lambda yr:yr[:4],years))
counter_yr = Counter(lst_years)
count = 0
for k, v in counter_yr.items():
if v > 1:
df = pd.concat(years_df[count:count+v])
rows, cols = df.shape
dict[k] = [rows,cols,v]
count+=v
else:
df = years_df[count]
rows, cols = df.shape
dict[k] = [rows,cols,v]
count+=1
return dict
#Rename the columns name of all datasets
def renameColumns(years_df):
for df in years_df:
df.rename(columns = {df.columns[0]:'Name', df.columns[1]:'NTN', df.columns[2]:'TaxPaid'}, inplace = True)
def cleaningPart1(years_df):
for df in years_df:
df['NTN'] = df['NTN'].replace(r"[^A-Za-z0-9 ]+", "", regex=True)
df["NTN"].fillna("0", inplace = True)
df['TaxPaid'] = df['TaxPaid'].replace(r"[^0-9\d\.]+", "", regex=True)
df['TaxPaid'] = pd.to_numeric(df['TaxPaid'], errors='coerce')
df["TaxPaid"].replace([np.inf, -np.inf], np.nan)
df.drop(df[df["TaxPaid"]<1].index, inplace = True)
df.drop(df[df["TaxPaid"].isnull()].index, inplace = True)
df['Name'] = df['Name'].replace(r"[^A-Za-z0-9 ]+", "", regex=True)
df['Name'].fillna("Unknown", inplace = True)
#Some Rows of Name and NTN swap each other
index = df.loc[df['Name'].str.isdigit()].index
for idx in index:
df.loc[idx,['Name','NTN']] = df.loc[idx,['NTN','Name']].values
df['NTN'] = df['NTN'].replace(r"[^0-9]+", "", regex=True)
df["NTN"] = df["NTN"].astype(str)
#Convert 8 digit NTN into 7 digit
index = df[df["NTN"].str.len()==8].index
df['NTN'][index] = df["NTN"][index].str[:-1]
#Drop NTN Outliers
index = df[(df["NTN"].str.len()==9) | (df["NTN"].str.len()==11) | (df["NTN"].str.len()==12)].index
df.drop(index, inplace = True)
def taxRecordsPerYear(years_dict,years_df):
dict = {}
count = 0
for key,val in years_dict.items():
if(val[2]>1):
v = val[2]
df = pd.concat(years_df[count:count+v])
dict[key] = sum(df["TaxPaid"])
count+=v
else:
df = years_df[count]
dict[key] = sum(df["TaxPaid"])
count+=1
return dict
def millify_by_dict(tax_dict):
dict = {}
for key,val in tax_dict.items():
millnames = ['','Thousand','Million','Billion','Trillion']
n = float(val)
millidx = max(0,min(len(millnames)-1,
int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
m = round(n / 10**(3 * millidx))
dict[key] = [m, millnames[millidx]]
return dict
def millify_by_records(tax_record):
millnames = ['','K','M','B','T']
n = float(tax_record)
millidx = max(0,min(len(millnames)-1,
int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
m = round(n / 10**(3 * millidx))
return str(m)+str(millnames[millidx])
def adding_column_year(years,years_df):
count = 0
for df in years_df:
df["years"] = years[count][:4]
count+=1
def detect_outliers(dataframe):
Q1 = dataframe['TaxPaid'].quantile(0.25)
Q3 = dataframe['TaxPaid'].quantile(0.75)
IQR = Q3 - Q1
minimum = Q1 - 1.5 * IQR
maximum = Q3 + 1.5 * IQR
outliers = (dataframe['TaxPaid'] < minimum) | (dataframe['TaxPaid'] > maximum)
dataframe['TaxPaid'].loc[dataframe['TaxPaid'] < minimum] = dataframe['TaxPaid'].loc[~outliers].min()
dataframe['TaxPaid'].loc[dataframe['TaxPaid'] > maximum] = dataframe['TaxPaid'].loc[~outliers].max()
years_df = [df_2013, df_2014, df_2015, df_2016_1, df_2016_2, df_2017_1, df_2017_2, df_2018_1, df_2018_2, df_2018_3]
years = ["2013", "2014", "2015", "2016_1", "2016_2", "2017_1", "2017_2", "2018_1", "2018_2", "2018_3"]
years_dict = shape_of_dataframes(years, years_df)
years_dict
renameColumns(years_df)
cleaningPart1(years_df)
tax_dict = taxRecordsPerYear(years_dict,years_df)
millify_by_dict(tax_dict)
adding_column_year(years,years_df)
df = pd.concat(years_df)
df_other = df.loc[(df["NTN"].str.len()==1)]
df_companies = df.loc[(df["NTN"].str.len()==7) | (df["NTN"].str.len()==10)]
df_individuals = df.loc[df["NTN"].str.len()==13]
detect_outliers(df_companies)
detect_outliers(df_individuals)
df_individuals_max = df_individuals["TaxPaid"].max()
df_companies_max = df_companies["TaxPaid"].max()
df_other["Category"] = np.NAN
for index, row in df_other.iterrows():
if row['TaxPaid'] <= df_individuals_max:
df_other.at[index, 'Category'] = 1
elif (row['TaxPaid'] > df_individuals_max) and (row['TaxPaid'] <= df_companies_max):
df_other.at[index, 'Category'] = 0
df_other['Category'].dropna(inplace=True)
df_companies["Category"] = 0
df_individuals["Category"] = 1
df = pd.concat([df_other,df_companies,df_individuals])
df_companies = df.loc[df["Category"] == 0]
df_individuals = df.loc[df["Category"] == 1]
df_companies.to_csv("df_companies.csv",index=False)
df_individuals.to_csv("df_individuals.csv",index=False)
df_companies = pd.read_csv('df_companies.csv')
df_individuals = pd.read_csv('df_individuals.csv')
df = pd.concat([df_companies,df_individuals])
df_yt = df[['years','TaxPaid']]
def taxes_per_year():
years = np.sort(df['years'].unique())
co_tax = []
indiv_tax = []
for yr in years:
co_yr = df_companies[df_companies["years"] == yr]
co_yr_sum = round(co_yr['TaxPaid'].sum())
indiv_yr = df_individuals[df_individuals["years"] == yr]
indiv_yr_sum = round(indiv_yr['TaxPaid'].sum())
if co_yr_sum > 0:
co_tax.append(co_yr_sum)
else:
co_tax.append(0)
if indiv_yr_sum > 0:
indiv_tax.append(indiv_yr_sum)
else:
indiv_tax.append(0)
return years, co_tax, indiv_tax
labels, companies, individuals = taxes_per_year()
xs = np.arange(len(labels))
ys = np.array(companies) + np.array(individuals)
fig, ax = plt.subplots(figsize=(16, 8), sharey=True)
ax.plot(xs,ys,'o-', color='Brown')
previous_revenue = 0
for x,y in zip(xs,ys):
if(previous_revenue != 0 ):
growth = round((y - previous_revenue) / previous_revenue * 100, 2)
else:
growth = 0
ax.annotate("Rs: " + millify_by_records(y) + " Growth: " + str(growth) + "%",
(x,y),
textcoords="offset points",
xytext=(0,10),
ha='center')
previous_revenue = y
ax.set_xticks(xs)
ax.set_xticklabels(labels)
ax.legend();
plt.show();
labels, companies, individuals = taxes_per_year()
x = np.arange(len(labels))
width = 0.35
y = np.array(companies) + np.array(individuals)
previous_revenue = 0
yg = []
for val in y:
if previous_revenue != 0:
growth = round((val - previous_revenue) / previous_revenue * 100, 2)
yg.append(growth)
else:
yg.append(0)
previous_revenue = val
fig, ax = plt.subplots(figsize=(16, 8), sharey=True)
bar = ax.bar(x - width/2, yg, width, label='Growth', color='Brown')
ax.set_ylabel('Tax')
ax.set_title('Revenue Growth')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
for b in bar:
height = b.get_height()
label_x_pos = b.get_x() + b.get_width() / 2
ax.text(label_x_pos, height, s=(str(height)+"%"), ha='center', va='bottom')
plt.show()
companies_tax = round(sum(df_companies['TaxPaid']))
individuals_tax = round(sum(df_individuals['TaxPaid']))
labels = 'Company', 'Individuals'
sizes = [companies_tax,individuals_tax]
explode = (0, 0.1)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax.axis('equal')
fig.set_size_inches(10, 6)
plt.show()
labels, companies, individuals = taxes_per_year()
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots(1,2, figsize=(16, 6), sharey=True)
co_bar = ax[0].bar(x - width/2, companies, width, label='Companies')
indiv_bar = ax[0].bar(x + width/2, individuals, width, label='Individuals')
ax[0].set_ylabel('Tax')
ax[0].set_title('Company and Individuals Tax Per Year')
ax[0].set_xticks(x)
ax[0].set_xticklabels(labels)
ax[0].legend()
for bar in co_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
for bar in indiv_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
ax[1].plot(x - width/2, companies,'o-', width, label='Companies', color='Blue')
ax[1].plot(x + width/2, individuals,'o-', width, label='Individuals', color='Orange')
for x_val,y_val in zip(x,companies):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
for x_val,y_val in zip(x,individuals):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
ax[1].set_ylabel('Tax')
ax[1].set_title('Company and Individuals Tax Per Year')
ax[1].set_xticks(x)
ax[1].set_xticklabels(labels)
ax[1].legend()
plt.show()
df_individuals['Province'] = df_individuals['NTN'].astype(str).str[0]
df_individuals['City'] = df_individuals['NTN'].astype(str).str[1]
df_individuals['City'].fillna(0, inplace=True)
df_individuals.loc[pd.to_numeric(df_individuals['NTN'].astype(str).str[-1])%2==0, 'Gender'] = 0
df_individuals.loc[pd.to_numeric(df_individuals['NTN'].astype(str).str[-1])%2==1, 'Gender'] = 1
def gender_pay_tax_per_year(df):
years = np.sort(df['years'].unique())
male = []
female = []
for yr in years:
male_yr = len(df[(df['years'] == yr) & (df['Gender'] == 1)])
female_yr = len(df[(df['years'] == yr) & (df['Gender'] == 0)])
if male_yr > 0:
male.append(male_yr)
else:
male.append(0)
if female_yr > 0:
female.append(female_yr)
else:
female.append(0)
return years, male, female
male = len(df_individuals.loc[df_individuals['Gender']==1])
female = len(df_individuals.loc[df_individuals['Gender']==0])
labels = 'Male', 'Female'
sizes = [male, female]
explode = (0, 0.1)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90, colors = ['Green', 'Red'])
ax.axis('equal')
fig.set_size_inches(10, 6)
plt.show()
labels, male, female = gender_pay_tax_per_year(df_individuals)
x = np.arange(len(labels))
width = 0.35
fig, ax = plt.subplots(1,2, figsize=(16, 6), sharey=True)
male_bar = ax[0].bar(x - width/2, male, width, label='Male', color='Green')
female_bar = ax[0].bar(x + width/2, female, width, label='Female', color='Red')
ax[0].set_ylabel('Tax')
ax[0].set_title('Male and Female Pay Tax Per Year')
ax[0].set_xticks(x)
ax[0].set_xticklabels(labels)
ax[0].legend()
for bar in male_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
for bar in female_bar:
height = bar.get_height()
label_x_pos = bar.get_x() + bar.get_width() / 2
ax[0].text(label_x_pos, height, s=millify_by_records(height), ha='center', va='bottom')
ax[1].plot(x - width/2, male,'o-', width, label='Male', color='Green')
ax[1].plot(x + width/2, female,'o-', width, label='Female', color='Red')
for x_val,y_val in zip(x, male):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
for x_val,y_val in zip(x, female):
ax[1].annotate(millify_by_records(y_val),
(x_val,y_val),
textcoords="offset points",
xytext=(0,10),
ha='center')
ax[1].set_ylabel('Tax')
ax[1].set_title('Male and Female Pay Tax Per Year')
ax[1].set_xticks(x)
ax[1].set_xticklabels(labels)
ax[1].legend()
plt.show()
df_cities = pd.read_csv('df_cities.csv')
df_individuals[['Province', 'City']] = df_individuals[['Province', 'City']].apply(pd.to_numeric)
df_individuals['Province'].loc[(df_individuals['Province']==0) | (df_individuals['Province']==8) | (df_individuals['Province']==9)] = np.NAN
df_individuals['City'].loc[df_individuals['City']==0] = np.NAN
df_individuals['Province'].bfill(axis ='rows', inplace=True)
df_individuals['Province'].ffill(axis ='rows', inplace=True)
df_individuals['City'].bfill(axis ='rows', inplace=True)
df_individuals['City'].ffill(axis ='rows', inplace=True)
df_indiv_locations = pd.merge(df_individuals, df_cities, on=['Province','City'], how='left')
sr = df_indiv_locations.groupby('CityName')['TaxPaid'].agg('sum')
dict1 = sr.to_dict()
fig = plt.figure(figsize=(12,6))
ax = fig.add_axes([0,0,1,1])
k = dict1.keys()
v = dict1.values()
ax.bar(k,v)
plt.xticks(rotation='vertical')
plt.show()
sr = df_indiv_locations.groupby('Province')['TaxPaid'].agg('sum')
dict1 = sr.to_dict()
k = dict1.keys()
v = dict1.values()
labels = 'Balochistan', 'KPK', 'Punjab', 'Sindh', 'Islamabad', 'AJK', 'Gilgit'
sizes = v
explode = (0, 0, 0, 0, 0, 0, 0)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax.axis('equal')
fig.set_size_inches(16,10)
plt.show()
df_pk = pd.read_csv('pakistan.csv')
df_pk.drop(['id'], axis=1, inplace=True)
df_pk.rename(columns = {df_pk.columns[0]:'CityName', df_pk.columns[1]:'Latitude', df_pk.columns[2]:'Longitude'}, inplace = True)
df_indiv_locations = pd.merge(df_indiv_locations, df_pk, on=['CityName'], how='left')
df_city = df_indiv_locations.drop_duplicates(subset ="CityName")
df1 = df_city.dropna()
locations = df1[['Latitude', 'Longitude']]
locationlist = locations.values.tolist()
map1 = folium.Map(location=[30.3753, 69.3451], zoom_start=5)
marker_cluster = MarkerCluster().add_to(map1)
for point in range(0, len(locationlist)):
folium.Marker(locationlist[point], popup=df1['CityName'].iloc[point]).add_to(marker_cluster)
map1
| 0.182826 | 0.85446 |
# Creating PictoBERT
This notebook creates the basis of the PictoBERT models by instantiating BERT and changing its embeddings layer.
```
!pip install transformers
```
## ARES embeddings
ARES embeddings have two sides, one extracted from contextualized examples and another from gloss definition, both using BERT. We trained BERT using both sides as input embeddings. So these are our model variants: 1) contextualized and 2) gloss-based.
The vectors can be downloaded directly from the [ARES website](http://sensembert.org/) or by executing the following cell
```
!gdown https://drive.google.com/uc?id=1TU9Ar85Cs61owjy3GDsxPDaerYYNdIkl
!gdown https://drive.google.com/uc?id=18ljLH7_z5CrcoRXNU6hOJbgxrZlhaRq9
```
The cell above downloads the ARES side separately in binary format. It can be loaded using gensim.
```
from gensim.models import KeyedVectors
ares_contextual = KeyedVectors.load_word2vec_format("/content/ares_1024.bin", binary=True)
ares_gloss = KeyedVectors.load_word2vec_format("/content/ares_1024_gloss.bin", binary=True)
```
## Load Tokenizer
It is necessary to load our trained tokenizer to its vocabulary to update BERT.
```
!gdown https://drive.google.com/uc?id=1-2g-GCxjBwESqDn3JByAJABU9Dkuqy0m
TOKENIZER_PATH = "./childes_all_new.json" # you can change this path to use your custom tokenizer
from transformers import PreTrainedTokenizerFast
loaded_tokenizer = PreTrainedTokenizerFast(tokenizer_file=TOKENIZER_PATH)
loaded_tokenizer.pad_token = "[PAD]"
loaded_tokenizer.sep_token = "[SEP]"
loaded_tokenizer.mask_token = "[MASK]"
loaded_tokenizer.cls_token = "[CLS]"
loaded_tokenizer.unk_token = "[UNK]"
```
## Load BERT
PictoBERT extends the BERT original weights. This way, we load BERT-large to use as base model.
```
from transformers import BertForMaskedLM, BertTokenizer
pretrained_w = 'bert-large-uncased'
tokenizer_bert = BertTokenizer.from_pretrained(pretrained_w)
base_model = BertForMaskedLM.from_pretrained(pretrained_w)
```
Let's use the copy library to make copies of BERT
```
import copy
contextualized_model = copy.copy(base_model)
gloss_based_model = copy.copy(base_model)
```
## Embeddings Update
#### Embeddings Update
The procedure for embeddings update is based on Algorithm 1.

Let's calculate the mean vector of each ARES side,
```
import torch
ares_contextual_mean = torch.tensor(ares_contextual.vectors).mean(0)
ares_gloss_mean = torch.tensor(ares_gloss.vectors).mean(0)
```
Obtain the BERT old embeddings and vocabulary to use as a basis,
```
old_wgts = base_model.get_input_embeddings().weight.clone().detach()
old_vocab = tokenizer_bert.get_vocab()
```
Create the matrix that will receive the new embeddings,
```
new_vocab_size = len(loaded_tokenizer)
new_vocab = loaded_tokenizer.get_vocab()
new_wgts_contextual = old_wgts.new_zeros(new_vocab_size,old_wgts.size(1))
new_wgts_gloss = old_wgts.new_zeros(new_vocab_size,old_wgts.size(1))
new_wgts_contextual.size()
```
Populate the matrices,
```
from torch import tensor
for w,idx_new in new_vocab.items():
idx_old = old_vocab.get(w, -1)
if idx_old>=0:
new_wgts_contextual[idx_new] = old_wgts[idx_old]
new_wgts_gloss[idx_new] = old_wgts[idx_old]
else:
if w in ares_contextual:
new_wgts_contextual[idx_new] = tensor(ares_contextual[w])
new_wgts_gloss[idx_new] = tensor(ares_gloss[w])
else:
new_wgts_contextual[idx_new] = ares_contextual_mean
new_wgts_gloss[idx_new] = ares_gloss_mean
```
And update the models embeddings layer.
```
from torch import nn
new_wte_contextual = nn.Embedding(new_vocab_size,old_wgts.size(1))
new_wte_gloss = nn.Embedding(new_vocab_size,old_wgts.size(1))
new_wte_contextual.weight.data.normal_(mean=0,std=base_model.config.initializer_range)
new_wte_contextual.weight.data = new_wgts_contextual
new_wte_gloss.weight.data.normal_(mean=0,std=base_model.config.initializer_range)
new_wte_gloss.weight.data = new_wgts_gloss
contextualized_model.resize_token_embeddings(len(loaded_tokenizer))
contextualized_model.set_input_embeddings(new_wte_contextual)
contextualized_model.set_output_embeddings(new_wte_contextual)
gloss_based_model.resize_token_embeddings(len(loaded_tokenizer))
gloss_based_model.set_input_embeddings(new_wte_gloss)
gloss_based_model.set_output_embeddings(new_wte_gloss)
```
### Saving models
The following cell save the PictoBERT (not trained yet) models.
```
contextualized_model.save_pretrained("./models/pictobert-large-contextual")
gloss_based_model.save_pretrained("./models/pictobert-large-gloss")
```
|
github_jupyter
|
!pip install transformers
!gdown https://drive.google.com/uc?id=1TU9Ar85Cs61owjy3GDsxPDaerYYNdIkl
!gdown https://drive.google.com/uc?id=18ljLH7_z5CrcoRXNU6hOJbgxrZlhaRq9
from gensim.models import KeyedVectors
ares_contextual = KeyedVectors.load_word2vec_format("/content/ares_1024.bin", binary=True)
ares_gloss = KeyedVectors.load_word2vec_format("/content/ares_1024_gloss.bin", binary=True)
!gdown https://drive.google.com/uc?id=1-2g-GCxjBwESqDn3JByAJABU9Dkuqy0m
TOKENIZER_PATH = "./childes_all_new.json" # you can change this path to use your custom tokenizer
from transformers import PreTrainedTokenizerFast
loaded_tokenizer = PreTrainedTokenizerFast(tokenizer_file=TOKENIZER_PATH)
loaded_tokenizer.pad_token = "[PAD]"
loaded_tokenizer.sep_token = "[SEP]"
loaded_tokenizer.mask_token = "[MASK]"
loaded_tokenizer.cls_token = "[CLS]"
loaded_tokenizer.unk_token = "[UNK]"
from transformers import BertForMaskedLM, BertTokenizer
pretrained_w = 'bert-large-uncased'
tokenizer_bert = BertTokenizer.from_pretrained(pretrained_w)
base_model = BertForMaskedLM.from_pretrained(pretrained_w)
import copy
contextualized_model = copy.copy(base_model)
gloss_based_model = copy.copy(base_model)
import torch
ares_contextual_mean = torch.tensor(ares_contextual.vectors).mean(0)
ares_gloss_mean = torch.tensor(ares_gloss.vectors).mean(0)
old_wgts = base_model.get_input_embeddings().weight.clone().detach()
old_vocab = tokenizer_bert.get_vocab()
new_vocab_size = len(loaded_tokenizer)
new_vocab = loaded_tokenizer.get_vocab()
new_wgts_contextual = old_wgts.new_zeros(new_vocab_size,old_wgts.size(1))
new_wgts_gloss = old_wgts.new_zeros(new_vocab_size,old_wgts.size(1))
new_wgts_contextual.size()
from torch import tensor
for w,idx_new in new_vocab.items():
idx_old = old_vocab.get(w, -1)
if idx_old>=0:
new_wgts_contextual[idx_new] = old_wgts[idx_old]
new_wgts_gloss[idx_new] = old_wgts[idx_old]
else:
if w in ares_contextual:
new_wgts_contextual[idx_new] = tensor(ares_contextual[w])
new_wgts_gloss[idx_new] = tensor(ares_gloss[w])
else:
new_wgts_contextual[idx_new] = ares_contextual_mean
new_wgts_gloss[idx_new] = ares_gloss_mean
from torch import nn
new_wte_contextual = nn.Embedding(new_vocab_size,old_wgts.size(1))
new_wte_gloss = nn.Embedding(new_vocab_size,old_wgts.size(1))
new_wte_contextual.weight.data.normal_(mean=0,std=base_model.config.initializer_range)
new_wte_contextual.weight.data = new_wgts_contextual
new_wte_gloss.weight.data.normal_(mean=0,std=base_model.config.initializer_range)
new_wte_gloss.weight.data = new_wgts_gloss
contextualized_model.resize_token_embeddings(len(loaded_tokenizer))
contextualized_model.set_input_embeddings(new_wte_contextual)
contextualized_model.set_output_embeddings(new_wte_contextual)
gloss_based_model.resize_token_embeddings(len(loaded_tokenizer))
gloss_based_model.set_input_embeddings(new_wte_gloss)
gloss_based_model.set_output_embeddings(new_wte_gloss)
contextualized_model.save_pretrained("./models/pictobert-large-contextual")
gloss_based_model.save_pretrained("./models/pictobert-large-gloss")
| 0.6705 | 0.818229 |
# Preprocessing for simulation 3
## Effects at phylum level and order level
@ Aug. 16, Youngwon (youngwon08@gmail.com)
```
import pandas as pd
import numpy as np
from sklearn.utils.extmath import softmax as softmax
from scipy.special import expit as sigmoid
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
%matplotlib inline
path = "./data/simulation/s3"
path_genus = "./data/genus48"
def relu(x):
x[x <= 0.] = 0.
return x
```
## Phylogenetic Tree information
```
## TODO: Should we start with genus48.csv?
phylogenetic_tree_info = pd.read_csv('%s/genus48_dic.csv' % path_genus)
# tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum']
tree_level_list = phylogenetic_tree_info.columns[:-1].tolist()
phylogenetic_tree_info
print('------------------------------------------------------------------------------------------')
print('Phylogenetic tree level list: %s' % tree_level_list)
print('------------------------------------------------------------------------------------------')
phylogenetic_tree_dict = {'Number':{}}
for tree_lvl in tree_level_list:
lvl_category = phylogenetic_tree_info[tree_lvl].unique()
lvl_num = lvl_category.shape[0]
print('%6s: %d' % (tree_lvl, lvl_num))
phylogenetic_tree_dict[tree_lvl] = dict(zip(lvl_category, np.arange(lvl_num)))
phylogenetic_tree_dict['Number'][tree_lvl]=lvl_num
print('------------------------------------------------------------------------------------------')
print('Phylogenetic_tree_dict info: %s' % list(phylogenetic_tree_dict.keys()))
print('------------------------------------------------------------------------------------------')
phylogenetic_tree = phylogenetic_tree_info.iloc[:,:-1]
for tree_lvl in tree_level_list:
phylogenetic_tree[tree_lvl] = phylogenetic_tree[tree_lvl].map(phylogenetic_tree_dict[tree_lvl])
phylogenetic_tree = np.array(phylogenetic_tree)
phylogenetic_tree
tree_weight_list = []
tree_weight_noise_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper))
tree_w_n = np.zeros_like(tree_w) + 0.01
for j in range(n_upper):
tree_w[lower[j==upper],j] = 1.
tree_w_n[lower[j==upper],j] = 1.
tree_weight_list.append(tree_w)
tree_weight_noise_list.append(tree_w_n)
```
### Check Error
```
maskw_l1 = np.array(pd.read_csv('%s/mw1.csv' % path))
maskw_l2 = np.array(pd.read_csv('%s/mw2.csv' % path))
maskw_l3 = np.array(pd.read_csv('%s/mw3.csv' % path))
maskw_l4 = np.array(pd.read_csv('%s/mw4.csv' % path))
maskw_list = [maskw_l1,maskw_l2,maskw_l3,maskw_l4]
print([np.sum(np.abs(tw-mw)) for tw, mw in zip(tree_weight_noise_list, maskw_list)])
```
# Multiclass classification with order level and phylum level selections
Aug. 10, 2019
## Experiments with 1000 replications
## Generating X: microbiome abundance data
* Using the data that Jing made, described in detail by Zhai et al. (2018a,b).
* $n \times p$ OTU count matrix
* Aggregating $p_0=2964$ OTUs to $p=48$ genus
* Sample size for each replication: $n = 1000$
* Training $75%$; Test $25%$
* Phylogenetic tree levels:
1. Genus: $m^0 = 48$
1. Family: $m^1 = 40$
1. Order: $m^2 = 23$
1. Class: $m^3 = 17$
1. Phylum: $m^4 = 9$
## Generation Y: a forward propagation approach
### Ver 0: same weights with same true connection for each repeatition
#### (similar to the original version)
$x^0 = x^{input} \in \mathbb{R}^{n\times p}$ (input genus abundance data)
#### Before repeatition,
* Select the true connection to disease (based on the phylogenetic tree information)
1. Choose 4 indexes from the order nodes
1. Construct the disease path according to the chosen order nodes.
* Construct the true weights.
1. For $i = 1,2,...,4$, $w^{i} \in \mathbb{R}^{m^{i-1} \times m^{i}}, b^{i} \in \mathbb{R}^{m^{i} \times 1},$
$$w^{i}_{j,k} \sim \left\{\begin{matrix}
\text{Uniform}(-4.5,3) \quad\quad \text{associated with the disease} \\
% \mathcal{N}(0,0.0001) \quad\quad \text{not associaated with the disease} \\
0 \quad\quad \text{not associaated with the disease} \\
0 \quad\quad\quad \text{not associaated with the phylogenetic trees}
\end{matrix}\right.$$
$$b^{i}_{k} \sim \mathcal{N}(0,4)$$
1. Classification with the K classes:
- For each k, pick n_k indexes from the phylum nodes to make connection with the disease.
- For the binary classification (K=2), choose 3 indexes for the label = 1.
- Weights: $w^{5} \in \mathbb{R}^{m^4 \times K}, b^{5} \in \mathbb{R}^{K},$
$$w^{5}_{j,k} \sim \left\{\begin{matrix}
\text{Uniform}(-4.5,3) \quad\quad \text{associated with the disease} \\
0 \quad\quad\quad \text{not associaated with the disease}
\end{matrix}\right.$$
$$b^{5}_{k} \sim \mathcal{N}(-\mathbb{E}(x^{4}), 4)$$
* for normally distributed $x$, $\mathbb{E}[sigmoid(x)] \simeq sigmoid(\frac{\mathbb{E}(x)}{\sqrt{1+\pi \sigma^2/8}})$
#### For each repeatition,
* For $i = 1,2,...,4$,
1. $h^i = w^{i} x^{i-1} + b^i$
1. $x^{i} = \text{ReLU}(h^i) \in \mathbb{R}^{m^{i}}$
* For the last layer for the classification with the K classes:
1. $x^{5} = w^{5}x^{4}+b^{5} + \epsilon$ where $\epsilon \sim \mathcal{N}(0,1)$
1. For each class k = 1,...,K,
$$y_k = \frac{\exp{x^{5}_k}}{\sum_{j=1}^{K}\exp{x^{5}_j}}$$
```
verbose=False
count_path = 'data/simulation/count/'
x_list = np.array(pd.read_csv('data/simulation/gcount_list.csv', header=None)[0])
np.random.seed(20)
while True:
print('-------------------------------------------------------------------------------')
print('Generation True Connection to disease')
order_idx = np.array([5,4,1,9]) # 5(0) and 4(2) are effect at order level, 1(1) and 9(1) are effects at phylum level
phylum_idx = np.array([1,0,2]) # 1(1, 9) are effect at phylum level, 1(6) and 3(5) are effects at order level
true_tree_idx_list = []
for i in range(5):
idx_order = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,2]==k] for k in order_idx]))
idx_phylum = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,4]==k] for k in phylum_idx]))
idx = np.intersect1d(idx_order, idx_phylum)
print("%6s idx: %s"% (tree_level_list[i], idx))
true_tree_idx_list.append(idx)
print('------------------------------------------------------------------------------------------')
true_tree_weight_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build true edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper), dtype=np.float32)
for j in true_tree_idx_list[i]:
tree_w[j,upper[lower==j]] = 1.
true_tree_weight_list.append(tree_w)
np.save('%s/ver0/tw_%d.npy'%(path,i+1), np.repeat([tree_w], x_list.shape[0], axis=0))
print('Saved true edge weights between [%6s, %6s] to %s/ver0/tw_%d.npy'%(tree_level_list[i],tree_level_list[i+1],path,i+1))
print('-------------------------------------------------------------------------------')
print('Generation y')
yweight_list = []
ybias_list = []
for i in range(len(true_tree_weight_list)):
yw = np.random.uniform(-4.5,3, true_tree_weight_list[i].shape).astype(np.float32) * true_tree_weight_list[i] # left only the weights on the true connections to the disease
yb = np.random.normal(0,4, true_tree_weight_list[i].shape[-1]).astype(np.float32)
yw = np.repeat([yw], x_list.shape[0], axis=0)
yb = np.repeat([yb], x_list.shape[0], axis=0)
yweight_list.append(yw)
ybias_list.append(yb)
np.save('%s/ver0/solw_%d.npy'%(path,i), yw)
np.save('%s/ver0/solb_%d.npy'%(path,i), yb)
ywc = np.zeros((true_tree_weight_list[3].shape[-1],3), dtype=np.float32)
ywc[:3,0] = np.random.uniform(-4.5,3, 3).astype(np.float32)
ywc[:3,1] = np.random.uniform(-4.5,3, 3).astype(np.float32)
ywc[:3,2] = np.random.uniform(-4.5,3, 3).astype(np.float32)
# ywc[:3,2] = 1. - ywc[:3,0] - ywc[:3,1]
ywc[:,0] = ywc[:,0] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc[:,1] = ywc[:,1] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc[:,2] = ywc[:,2] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc = np.repeat([ywc], x_list.shape[0], axis=0)
ybc = np.random.normal(0,4, ywc.shape[-1]).astype(np.float32)
ybc = np.repeat([ybc], x_list.shape[0], axis=0)
np.save('%s/ver0/solw_%d.npy'%(path,len(true_tree_weight_list)), ywc)
# np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
print('-------------------------------------------------------------------------------')
print('Last activation')
# palette = plt.get_cmap('Set1')
# fig, ax = plt.subplots(nrows =1, ncols=2, figsize=(10,4))
print('-------------------------------------------------------------------------------')
newy_all = []
for fold in range(x_list.shape[0]):
x = pd.read_csv('%s/%s'%(count_path, x_list[fold])) # input x
mat = np.matrix(x)
prepro = MinMaxScaler()
prepro.fit(mat)
x = pd.DataFrame(prepro.transform(mat), columns = list(x.columns))
h = np.array(x, dtype=np.float32)
for i, (yw, yb) in enumerate(zip(yweight_list,ybias_list)):
yw_noise = yw[fold]
# yw_noise += np.random.normal(0,0.0001, true_tree_weight_list[i].shape) \
# *(1.-true_tree_weight_list[i])*(tree_weight_list[i]) # add noise on the tree
h = relu(np.dot(h, yw_noise) + np.repeat([yb[fold]], h.shape[0], axis=0))
h = np.dot(h, ywc[fold])
pp = h + np.repeat([ybc[fold]], h.shape[0], axis=0)
pp += np.random.normal(0,1)
p = softmax(pp) # imbalanced y
order = np.argsort(pp[:,0])
# if fold < 10:
# c = palette(fold)
# ax[0].plot(pp[order,0], p[order,0], marker='', c=c, label=fold)
# ax[0].set_title('Before adjusting')
# ax[0].get_xaxis().set_visible(False)
# if verbose: print("[%d fold] mean (sd): %6.3f (%6.3f)" % (fold, np.mean(pp[:,0]), np.std(pp[:,0])))
ybc[fold] = ybc[fold] - np.mean(h, axis=0) # imbalanced y, adjusted
pp = h + np.repeat([ybc[fold]], h.shape[0], axis=0)
pp += np.random.normal(0,1)
p = softmax(pp)
order = np.argsort(pp[:,0])
# if fold < 10:
# ax[1].plot(pp[order,0], p[order,0], marker='', c=c, label=fold)
# ax[1].set_title('After adjusting')
# ax[1].get_xaxis().set_visible(False)
# if verbose: print("[%d fold] (adjusted) mean (sd): %6.3f (%6.3f)" % (fold, np.mean(pp[:,0]), np.std(pp[:,0])))
# print(p.shape, p[0], np.argmax(p[0]), np.argmax(p, axis=1)[0])
newy = np.argmax(p, axis=1).astype(np.int32)
newy_all.append(newy)
newy_all = pd.DataFrame(np.stack(newy_all).T)
if (min(np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0))) > 100 and max(np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0))) < 800):
newy_all.to_csv('%s/ver0/y.csv'%path, index=False)
np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
fig.tight_layout()
print('-------------------------------------------------------------------------------')
print('Data imbalance: %s' % np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0)))
break
else:
print('-------------------------------------------------------------------------------')
print('fail')
print('Data imbalance: %s' % np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0)))
```
# Check
```
import os
import json
import numpy as np
import pandas as pd
import copy
import logging
import sys
import matplotlib.pyplot as plt
%matplotlib inline
os.environ['CUDA_VISIBLE_DEVICES']='0'
import configuration
from loss_and_metric import metric_test, metric_texa_test
import readers
import build_network
import configuration
from utils import file_path_fold, plot_history
from keras.utils import to_categorical
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
fold = 0
model_path = 'simulation_s3_v0/simulation_s3_deepbiome_wo_noise_trueinit/'
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
try: path_info['data_info']['disease_weight_path'] = '/'.join(path_info['data_info']['disease_weight_path'].split('/')[2:])
except: pass
model_weight_path = './%s/%s' % (model_path, path_info['model_info']['weight'])
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, verbose=True)
data_path = path_info['data_info']['data_path']
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None)[0])
y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
reader.read_dataset('%s/%s'%(count_path, x_list[fold]), y_path, fold)
x_train, x_test, y_train, y_test = reader.get_dataset(idxs[:,fold])
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info['data_info'], log, fold=0, num_classes=3)
network.model_compile()
phat = network.predict(x_train)
yhat = to_categorical(np.argmax(phat, axis=1), num_classes=3)
kernel_lists = network.get_trained_weight()
bias_lists = network.get_trained_bias()
yweight_list, ybias_list = network.load_true_disease_weight_list(path_info['data_info']['disease_weight_path'])
print(np.sum((y_train - yhat)**2))
for i in range(len(yweight_list)):
print(np.sum((kernel_lists[i] - yweight_list[i])**2), np.sum((bias_lists[i] - ybias_list[i])**2))
```
## Figure
```
# Figure
# (+ effect) dark red: Actinobacteria (phylum level)
# (- effect) dark blue: Clostridiales, Bacteroidales (Order level)
# (0 effect): black
```
|
github_jupyter
|
import pandas as pd
import numpy as np
from sklearn.utils.extmath import softmax as softmax
from scipy.special import expit as sigmoid
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
%matplotlib inline
path = "./data/simulation/s3"
path_genus = "./data/genus48"
def relu(x):
x[x <= 0.] = 0.
return x
## TODO: Should we start with genus48.csv?
phylogenetic_tree_info = pd.read_csv('%s/genus48_dic.csv' % path_genus)
# tree_level_list = ['Genus', 'Family', 'Order', 'Class', 'Phylum']
tree_level_list = phylogenetic_tree_info.columns[:-1].tolist()
phylogenetic_tree_info
print('------------------------------------------------------------------------------------------')
print('Phylogenetic tree level list: %s' % tree_level_list)
print('------------------------------------------------------------------------------------------')
phylogenetic_tree_dict = {'Number':{}}
for tree_lvl in tree_level_list:
lvl_category = phylogenetic_tree_info[tree_lvl].unique()
lvl_num = lvl_category.shape[0]
print('%6s: %d' % (tree_lvl, lvl_num))
phylogenetic_tree_dict[tree_lvl] = dict(zip(lvl_category, np.arange(lvl_num)))
phylogenetic_tree_dict['Number'][tree_lvl]=lvl_num
print('------------------------------------------------------------------------------------------')
print('Phylogenetic_tree_dict info: %s' % list(phylogenetic_tree_dict.keys()))
print('------------------------------------------------------------------------------------------')
phylogenetic_tree = phylogenetic_tree_info.iloc[:,:-1]
for tree_lvl in tree_level_list:
phylogenetic_tree[tree_lvl] = phylogenetic_tree[tree_lvl].map(phylogenetic_tree_dict[tree_lvl])
phylogenetic_tree = np.array(phylogenetic_tree)
phylogenetic_tree
tree_weight_list = []
tree_weight_noise_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper))
tree_w_n = np.zeros_like(tree_w) + 0.01
for j in range(n_upper):
tree_w[lower[j==upper],j] = 1.
tree_w_n[lower[j==upper],j] = 1.
tree_weight_list.append(tree_w)
tree_weight_noise_list.append(tree_w_n)
maskw_l1 = np.array(pd.read_csv('%s/mw1.csv' % path))
maskw_l2 = np.array(pd.read_csv('%s/mw2.csv' % path))
maskw_l3 = np.array(pd.read_csv('%s/mw3.csv' % path))
maskw_l4 = np.array(pd.read_csv('%s/mw4.csv' % path))
maskw_list = [maskw_l1,maskw_l2,maskw_l3,maskw_l4]
print([np.sum(np.abs(tw-mw)) for tw, mw in zip(tree_weight_noise_list, maskw_list)])
verbose=False
count_path = 'data/simulation/count/'
x_list = np.array(pd.read_csv('data/simulation/gcount_list.csv', header=None)[0])
np.random.seed(20)
while True:
print('-------------------------------------------------------------------------------')
print('Generation True Connection to disease')
order_idx = np.array([5,4,1,9]) # 5(0) and 4(2) are effect at order level, 1(1) and 9(1) are effects at phylum level
phylum_idx = np.array([1,0,2]) # 1(1, 9) are effect at phylum level, 1(6) and 3(5) are effects at order level
true_tree_idx_list = []
for i in range(5):
idx_order = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,2]==k] for k in order_idx]))
idx_phylum = np.unique(np.concatenate([phylogenetic_tree[:,i][phylogenetic_tree[:,4]==k] for k in phylum_idx]))
idx = np.intersect1d(idx_order, idx_phylum)
print("%6s idx: %s"% (tree_level_list[i], idx))
true_tree_idx_list.append(idx)
print('------------------------------------------------------------------------------------------')
true_tree_weight_list = []
num_dict = phylogenetic_tree_dict['Number']
for i in range(len(tree_level_list)-1):
print('Build true edge weights between [%6s, %6s]'%(tree_level_list[i],tree_level_list[i+1]))
lower = phylogenetic_tree[:,i]
upper = phylogenetic_tree[:,i+1]
n_lower = num_dict[tree_level_list[i]]
n_upper = num_dict[tree_level_list[i+1]]
tree_w = np.zeros((n_lower,n_upper), dtype=np.float32)
for j in true_tree_idx_list[i]:
tree_w[j,upper[lower==j]] = 1.
true_tree_weight_list.append(tree_w)
np.save('%s/ver0/tw_%d.npy'%(path,i+1), np.repeat([tree_w], x_list.shape[0], axis=0))
print('Saved true edge weights between [%6s, %6s] to %s/ver0/tw_%d.npy'%(tree_level_list[i],tree_level_list[i+1],path,i+1))
print('-------------------------------------------------------------------------------')
print('Generation y')
yweight_list = []
ybias_list = []
for i in range(len(true_tree_weight_list)):
yw = np.random.uniform(-4.5,3, true_tree_weight_list[i].shape).astype(np.float32) * true_tree_weight_list[i] # left only the weights on the true connections to the disease
yb = np.random.normal(0,4, true_tree_weight_list[i].shape[-1]).astype(np.float32)
yw = np.repeat([yw], x_list.shape[0], axis=0)
yb = np.repeat([yb], x_list.shape[0], axis=0)
yweight_list.append(yw)
ybias_list.append(yb)
np.save('%s/ver0/solw_%d.npy'%(path,i), yw)
np.save('%s/ver0/solb_%d.npy'%(path,i), yb)
ywc = np.zeros((true_tree_weight_list[3].shape[-1],3), dtype=np.float32)
ywc[:3,0] = np.random.uniform(-4.5,3, 3).astype(np.float32)
ywc[:3,1] = np.random.uniform(-4.5,3, 3).astype(np.float32)
ywc[:3,2] = np.random.uniform(-4.5,3, 3).astype(np.float32)
# ywc[:3,2] = 1. - ywc[:3,0] - ywc[:3,1]
ywc[:,0] = ywc[:,0] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc[:,1] = ywc[:,1] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc[:,2] = ywc[:,2] * (np.sum(true_tree_weight_list[-1], axis=0) > 0).astype(np.float32)
ywc = np.repeat([ywc], x_list.shape[0], axis=0)
ybc = np.random.normal(0,4, ywc.shape[-1]).astype(np.float32)
ybc = np.repeat([ybc], x_list.shape[0], axis=0)
np.save('%s/ver0/solw_%d.npy'%(path,len(true_tree_weight_list)), ywc)
# np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
print('-------------------------------------------------------------------------------')
print('Last activation')
# palette = plt.get_cmap('Set1')
# fig, ax = plt.subplots(nrows =1, ncols=2, figsize=(10,4))
print('-------------------------------------------------------------------------------')
newy_all = []
for fold in range(x_list.shape[0]):
x = pd.read_csv('%s/%s'%(count_path, x_list[fold])) # input x
mat = np.matrix(x)
prepro = MinMaxScaler()
prepro.fit(mat)
x = pd.DataFrame(prepro.transform(mat), columns = list(x.columns))
h = np.array(x, dtype=np.float32)
for i, (yw, yb) in enumerate(zip(yweight_list,ybias_list)):
yw_noise = yw[fold]
# yw_noise += np.random.normal(0,0.0001, true_tree_weight_list[i].shape) \
# *(1.-true_tree_weight_list[i])*(tree_weight_list[i]) # add noise on the tree
h = relu(np.dot(h, yw_noise) + np.repeat([yb[fold]], h.shape[0], axis=0))
h = np.dot(h, ywc[fold])
pp = h + np.repeat([ybc[fold]], h.shape[0], axis=0)
pp += np.random.normal(0,1)
p = softmax(pp) # imbalanced y
order = np.argsort(pp[:,0])
# if fold < 10:
# c = palette(fold)
# ax[0].plot(pp[order,0], p[order,0], marker='', c=c, label=fold)
# ax[0].set_title('Before adjusting')
# ax[0].get_xaxis().set_visible(False)
# if verbose: print("[%d fold] mean (sd): %6.3f (%6.3f)" % (fold, np.mean(pp[:,0]), np.std(pp[:,0])))
ybc[fold] = ybc[fold] - np.mean(h, axis=0) # imbalanced y, adjusted
pp = h + np.repeat([ybc[fold]], h.shape[0], axis=0)
pp += np.random.normal(0,1)
p = softmax(pp)
order = np.argsort(pp[:,0])
# if fold < 10:
# ax[1].plot(pp[order,0], p[order,0], marker='', c=c, label=fold)
# ax[1].set_title('After adjusting')
# ax[1].get_xaxis().set_visible(False)
# if verbose: print("[%d fold] (adjusted) mean (sd): %6.3f (%6.3f)" % (fold, np.mean(pp[:,0]), np.std(pp[:,0])))
# print(p.shape, p[0], np.argmax(p[0]), np.argmax(p, axis=1)[0])
newy = np.argmax(p, axis=1).astype(np.int32)
newy_all.append(newy)
newy_all = pd.DataFrame(np.stack(newy_all).T)
if (min(np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0))) > 100 and max(np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0))) < 800):
newy_all.to_csv('%s/ver0/y.csv'%path, index=False)
np.save('%s/ver0/solb_%d.npy'%(path,len(true_tree_weight_list)), ybc)
fig.tight_layout()
print('-------------------------------------------------------------------------------')
print('Data imbalance: %s' % np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0)))
break
else:
print('-------------------------------------------------------------------------------')
print('fail')
print('Data imbalance: %s' % np.mean(np.sum(newy_all==0, axis=0)), np.mean(np.sum(newy_all==1, axis=0)), np.mean(np.sum(newy_all==2, axis=0)))
import os
import json
import numpy as np
import pandas as pd
import copy
import logging
import sys
import matplotlib.pyplot as plt
%matplotlib inline
os.environ['CUDA_VISIBLE_DEVICES']='0'
import configuration
from loss_and_metric import metric_test, metric_texa_test
import readers
import build_network
import configuration
from utils import file_path_fold, plot_history
from keras.utils import to_categorical
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
fold = 0
model_path = 'simulation_s3_v0/simulation_s3_deepbiome_wo_noise_trueinit/'
config_data = configuration.Configurator('%s/config/path_info.cfg' % model_path, log, verbose=False)
config_data.set_config_map(config_data.get_section_map())
config_network = configuration.Configurator('%s/config/network_info.cfg' % model_path, log, verbose=False)
config_network.set_config_map(config_network.get_section_map())
path_info = config_data.get_config_map()
network_info = config_network.get_config_map()
path_info['data_info']['data_path'] = '/'.join(path_info['data_info']['data_path'].split('/')[2:])
path_info['data_info']['tree_info_path'] = '/'.join(path_info['data_info']['tree_info_path'].split('/')[2:])
path_info['data_info']['count_list_path'] = '/'.join(path_info['data_info']['count_list_path'].split('/')[2:])
path_info['data_info']['count_path'] = '/'.join(path_info['data_info']['count_path'].split('/')[2:])
path_info['data_info']['idx_path'] = '/'.join(path_info['data_info']['idx_path'].split('/')[2:])
try: path_info['data_info']['disease_weight_path'] = '/'.join(path_info['data_info']['disease_weight_path'].split('/')[2:])
except: pass
model_weight_path = './%s/%s' % (model_path, path_info['model_info']['weight'])
reader_class = getattr(readers, network_info['model_info']['reader_class'].strip())
reader = reader_class(log, verbose=True)
data_path = path_info['data_info']['data_path']
count_path = path_info['data_info']['count_path']
x_list = np.array(pd.read_csv(path_info['data_info']['count_list_path'], header=None)[0])
y_path = '%s/%s'%(data_path, path_info['data_info']['y_path'])
idxs = np.array(pd.read_csv(path_info['data_info']['idx_path'])-1, dtype=np.int)
reader.read_dataset('%s/%s'%(count_path, x_list[fold]), y_path, fold)
x_train, x_test, y_train, y_test = reader.get_dataset(idxs[:,fold])
network_class = getattr(build_network, network_info['model_info']['network_class'].strip())
network = network_class(network_info, path_info['data_info'], log, fold=0, num_classes=3)
network.model_compile()
phat = network.predict(x_train)
yhat = to_categorical(np.argmax(phat, axis=1), num_classes=3)
kernel_lists = network.get_trained_weight()
bias_lists = network.get_trained_bias()
yweight_list, ybias_list = network.load_true_disease_weight_list(path_info['data_info']['disease_weight_path'])
print(np.sum((y_train - yhat)**2))
for i in range(len(yweight_list)):
print(np.sum((kernel_lists[i] - yweight_list[i])**2), np.sum((bias_lists[i] - ybias_list[i])**2))
# Figure
# (+ effect) dark red: Actinobacteria (phylum level)
# (- effect) dark blue: Clostridiales, Bacteroidales (Order level)
# (0 effect): black
| 0.233532 | 0.885285 |
<a href="https://colab.research.google.com/github/jemima1992/Exercicio_python_SoulCode/blob/https%2Fgithub.com%2Fjemima1992%2FPanda-COVID-19/While%2CFor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
1) Faça um programa que imprima na tela os números de 20 a 50 em ordem crescente
```
contador = 20
while (contador < 50):
print(contador)
contador += 1
else:
print(contador)
```
2) Faça um algoritmo que imprima na tela os números de 200 a 100 em ordem decrescente
```
contador = 200
while (contador > 100):
print(contador)
contador-= 1
else:
print(contador)
```
3) Faça um algoritmo que imprima na tela os 20 primeiros números pares a partir do 2
```
contador = 2
while (contador < 20):
if contador % 2 == 0:
print(contador)
contador += 1
contador = 2
while (contador < 20):
print(contador)
contador += 2
```
# Obrigar o usuário a digitar uma altura válida entre 40cm e 2.5m
#Solicitar ao usuário que ele digite um número Natural diferente de 0
```
altura = float(input('Digite sua altura'))
while 0.4<altura>2.5:
print('Altura invalida digite novamente')
altura = float(input('Digite sua altura'))
else:
print(f'A altura é {altura} valida')
numero = int(input('Digite um numero natural diferente de 0: '))
while numero <= 0:
numero = float(input('Numero não é natural!Digite o numero novamente: '))
else:
print(f'O numero é {numero} natural')
for contador in range(40, -1, -1):
print(contador)
```
1) Faça um programa que efetue a soma dos números de 1 a 100 e exiba o valor na tela
```
soma= 0
for contador in range(1, 101):
soma+=contador
print(soma)
```
2) Faça um programa que leia a altura de 10 pessoas. Este programa deverá calcular e mostrar:
a) A menor altura do grupo
b) A maior altura do grupo
```
for contador in range(5):
altura = float(input(f'Digite {contador + 1} sua altura\n'))
while 0<altura>2.5:
altura = float(input(f'Altura invalida!! Digite {contador + 1} novamente a Altura \n'))
if contador == 0:
maior = altura
menor = altura
else:
if altura > maior:
maior = altura
if altura < menor:
menor = altura
print(f'Maior altura {maior}')
print(f'Menor altura {menor}')
```
3)Faça um programa que calcule o fatorial de um número digitado pelo usuário.
```
numero = int(input("Fatorial de: ") )
resultado=1
for n in range(1,numero+1):
resultado *= n
print(resultado)
```
|
github_jupyter
|
contador = 20
while (contador < 50):
print(contador)
contador += 1
else:
print(contador)
contador = 200
while (contador > 100):
print(contador)
contador-= 1
else:
print(contador)
contador = 2
while (contador < 20):
if contador % 2 == 0:
print(contador)
contador += 1
contador = 2
while (contador < 20):
print(contador)
contador += 2
altura = float(input('Digite sua altura'))
while 0.4<altura>2.5:
print('Altura invalida digite novamente')
altura = float(input('Digite sua altura'))
else:
print(f'A altura é {altura} valida')
numero = int(input('Digite um numero natural diferente de 0: '))
while numero <= 0:
numero = float(input('Numero não é natural!Digite o numero novamente: '))
else:
print(f'O numero é {numero} natural')
for contador in range(40, -1, -1):
print(contador)
soma= 0
for contador in range(1, 101):
soma+=contador
print(soma)
for contador in range(5):
altura = float(input(f'Digite {contador + 1} sua altura\n'))
while 0<altura>2.5:
altura = float(input(f'Altura invalida!! Digite {contador + 1} novamente a Altura \n'))
if contador == 0:
maior = altura
menor = altura
else:
if altura > maior:
maior = altura
if altura < menor:
menor = altura
print(f'Maior altura {maior}')
print(f'Menor altura {menor}')
numero = int(input("Fatorial de: ") )
resultado=1
for n in range(1,numero+1):
resultado *= n
print(resultado)
| 0.093417 | 0.946745 |
# A Closer Look at Python List Comprehensions
Inspired by Trey Hunner's blog post (https://treyhunner.com/2015/12/python-list-comprehensions-now-in-color/), I'm taking a bit deeper look into list comprehensions. List comprehensions in Python are definitely not syntatic sugar. They are executed in a very different way than a for loop, and are faster. Stay with me and let's see why.
I ran this notebook on my 21" iMac Retina.
## An Explicit Loop
We start with a typical task in Python - transform an existing list into a new one.
```
def func(item):
return item > 0
old_list = range(1, 10000)
def f1(my_list):
new_list = []
for item in my_list:
if func(item):
new_list.append(item)
return new_list
```
Let's use the dis module to examine what's going on at the byte code level.
```
import dis
dis.dis(f1)
```
Whew! That's a lot of byte codes. The first column is the line number from the code fragment we entered. There's not much documentation on the rest of the output, but it's simple enough to see what's going on.
First, LOAD_CONST loads a constant into the stack. BUILD_LIST takes the constants and builds a list, which STORE_FAST pushes on the stack. This is the byte code corresponding to line 3. We next begin with line 9, which creates an empty list.
Line 6 sets up the loop, which ends at bytecode line 48 in our listing, and LOAD_FAST pushes a reference onto the stack. Next, get the iterator, and we start the loop. FOR_ITER calls next() on the iteratable object (in this case a list), and pushes it onto the stack.
There are a few other bookkeeping calls here, but note the LOAD_ATTR byte code. This loads the append() function from the virtual table for the list object. As we shall see below, this is the biggest bottleneck for performance.
The complete list of byte codes for version 3.5 is https://docs.python.org/3.5/library/dis.html. Take a look at this page and take a further look at the byte codes as we go along.
## A Simple Performance Gain
We note above that line 34 loads an attribute - the append method of list - every time the loop is executed. This is the first topic we investigate. First let's load the timeit module and see how it's used.
```
import timeit
```
The [`timeit`](https://docs.python.org/3/library/timeit.html) module disables garbage collection.
Now we make a small change to the function defined above, and time both of them.
```
def f2(my_list):
new_list = []
my_append = new_list.append
for item in my_list:
if func(item):
my_append(item)
return new_list
```
First we time the original loop.
```
timeit.timeit(stmt="f1(old_list)", setup="from __main__ import f1; from __main__ import old_list", number=20000)
```
Next, we time the new incarnation, by loading the attribute before the loop starts.
```
timeit.timeit(stmt="f2(old_list)", setup="from __main__ import f2; from __main__ import old_list", number=20000)
```
Caching the method lookup saves about six seconds.
We should note here that timeit turns off garbage collection while running the statement. This eliminates a lot of ambiguity in the timings. There's also no caching and the setup code is run once.
Additionally, here are the actual byte codes, for your amusement and enlightenment.
```
dis.dis(f2)
```
We see from line 40 that we're doing a pointer load instead of looking up an attribute.
## The Next Step - List Comprehension
As we stated before, a list comprehension is not simply syntatic sugar, but is used very differently at the byte code level. Let's see why.
```
def f3(my_list):
return [item for item in my_list if func(item)]
timeit.timeit(stmt="f3(old_list)", setup="from __main__ import f3; from __main__ import old_list", number=20000)
```
We picked up a bit of time here, but not as much as expected. However, the gain over the original function is quite significant. But let's see those byte codes.
```
dis.dis(f3)
```
There's much less code here, but you can see the CALL_FUNCTION is used, and there's no loop iterator set up. The loop is performed at the C level, not in the byte codes.
At this point, we can definitively say that when optimizing for performance, both a list comprehension and caching method lookups result in significant gains. You can imagine that these gains are important when doing anything with large datasets.
## Conclusion
1. Prefer list comprehensions for speed and Pythonic code.
2. If you can't write a loop as a list comprehension, cache methods to avoid lookups in the vtable.
3. You can use a list comprehension to loop over two lists.
4. The same principles apply for dictionary and set comprehensions.
|
github_jupyter
|
def func(item):
return item > 0
old_list = range(1, 10000)
def f1(my_list):
new_list = []
for item in my_list:
if func(item):
new_list.append(item)
return new_list
import dis
dis.dis(f1)
import timeit
def f2(my_list):
new_list = []
my_append = new_list.append
for item in my_list:
if func(item):
my_append(item)
return new_list
timeit.timeit(stmt="f1(old_list)", setup="from __main__ import f1; from __main__ import old_list", number=20000)
timeit.timeit(stmt="f2(old_list)", setup="from __main__ import f2; from __main__ import old_list", number=20000)
dis.dis(f2)
def f3(my_list):
return [item for item in my_list if func(item)]
timeit.timeit(stmt="f3(old_list)", setup="from __main__ import f3; from __main__ import old_list", number=20000)
dis.dis(f3)
| 0.179207 | 0.949482 |
<a href="https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Colab RDP** : Remote Desktop to Colab Instance
> **Warning : Not for Cryptocurrency Mining<br></br>**
>**Why are hardware resources such as T4 GPUs not available to me?** The best available hardware is prioritized for users who use Colaboratory interactively rather than for long-running computations. Users who use Colaboratory for long-running computations may be temporarily restricted in the type of hardware made available to them, and/or the duration that the hardware can be used for. We encourage users with high computational needs to use Colaboratory’s UI with a local runtime. Please note that using Colaboratory for cryptocurrency mining is disallowed entirely, and may result in being banned from using Colab altogether.
Google Colab can give you Instance with 12GB of RAM and GPU for 12 hours (Max.) for Free users. Anyone can use it to perform Heavy Tasks.
To use other similiar Notebooks use my Repository **[Colab Hacks](https://github.com/PradyumnaKrishna/Colab-Hacks)**
```
#@title **Create User**
#@markdown Enter Username and Password
import os
username = "user" #@param {type:"string"}
password = "root" #@param {type:"string"}
print("Creating User and Setting it up")
# Creation of user
os.system(f"useradd -m {username}")
# Add user to sudo group
os.system(f"adduser {username} sudo")
# Set password of user to 'root'
os.system(f"echo '{username}:{password}' | sudo chpasswd")
# Change default shell from sh to bash
os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd")
print(f"User created and configured having username `{username}` and password `{password}`")
#@title **RDP**
#@markdown It takes 4-5 minutes for installation
import os
import subprocess
#@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication
CRP = "" #@param {type:"string"}
#@markdown Enter a Pin (more or equal to 6 digits)
Pin = 123456 #@param {type: "integer"}
#@markdown Autostart Notebook in RDP
Autostart = False #@param {type: "boolean"}
class CRD:
def __init__(self, user):
os.system("apt update")
self.installCRD()
self.installDesktopEnvironment()
self.installGoogleChorme()
self.finish(user)
print("\nRDP created succesfully move to https://remotedesktop.google.com/access")
@staticmethod
def installCRD():
print("Installing Chrome Remote Desktop")
subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def installDesktopEnvironment():
print("Installing Desktop Environment")
os.system("export DEBIAN_FRONTEND=noninteractive")
os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal")
os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'")
os.system("apt remove --assume-yes gnome-terminal")
os.system("apt install --assume-yes xscreensaver")
os.system("systemctl disable lightdm.service")
@staticmethod
def installGoogleChorme():
print("Installing Google Chrome")
subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def finish(user):
print("Finalizing")
if Autostart:
os.makedirs(f"/home/{user}/.config/autostart", exist_ok=True)
link = "https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb"
colab_autostart = """[Desktop Entry]
Type=Application
Name=Colab
Exec=sh -c "sensible-browser {}"
Icon=
Comment=Open a predefined notebook at session signin.
X-GNOME-Autostart-enabled=true""".format(link)
with open(f"/home/{user}/.config/autostart/colab.desktop", "w") as f:
f.write(colab_autostart)
os.system(f"chmod +x /home/{user}/.config/autostart/colab.desktop")
os.system(f"chown {user}:{user} /home/{user}/.config")
os.system(f"adduser {user} chrome-remote-desktop")
command = f"{CRP} --pin={Pin}"
os.system(f"su - {user} -c '{command}'")
os.system("service chrome-remote-desktop start")
print("Finished Succesfully")
try:
if CRP == "":
print("Please enter authcode from the given link")
elif len(str(Pin)) < 6:
print("Enter a pin more or equal to 6 digits")
else:
CRD(username)
except NameError as e:
print("'username' variable not found, Create a user first")
%env USE_AUTH_EPHEM=0
#@title **Google Drive Mount**
#@markdown Google Drive can be used as Persistance HDD for files.<br>
#@markdown **Choose a method (GDFuse Recommended)**
mount_method = "GDFuse" #@param ["GDFuse", "Native"]
#@markdown **Options for GDFuse** <br>
#@markdown - Visit https://github.com/astrada/google-drive-ocamlfuse/wiki/Team-Drives
label = "default" #@param {type:"string"}
mount_team_drive = False #@param {type:"boolean"}
force_mount = False #@param {type:"boolean"}
import os
import subprocess
class Drive():
creds = {}
mountpoint = ""
deps = False
debug = False
def __init__(self, mountpoint="/content/drives", debug=False):
os.makedirs(mountpoint, exist_ok=True)
self.mountpoint = mountpoint
self.debug = debug
def _mount_gdfuse(self, mount_dir):
os.makedirs(mount_dir, exist_ok=True)
subprocess.run(
['google-drive-ocamlfuse',
'-o',
'allow_other',
'-label',
label,
mount_dir,
]
)
print(f"Drive Mounted at {mount_dir}. If you get input/output error, then `team_drive_id` might be wrong or not accessible.")
def _unmount_gdfuse(self, mount_dir):
subprocess.run(
['fusermount',
'-u',
mount_dir,
]
)
os.rmdir(mount_dir)
def auth(self):
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
creds = GoogleCredentials.get_application_default()
self.creds = {
"id": creds.client_id,
"secret": creds.client_secret
}
def gdfuse(self, label, mound_team_drive=False, force_mount=False):
import getpass
if not self.creds:
self.auth()
if not self.deps:
print("Installing google-drive-ocamlfuse")
subprocess.run(['apt', 'install', 'software-properties-common python-software-properties module-init-tools', '-y'])
subprocess.run(['add-apt-repository', 'ppa:alessandro-strada/ppa', '-y'])
subprocess.run(['apt', 'update'])
subprocess.run(['apt', 'install', '--assume-yes', 'google-drive-ocamlfuse'])
self.deps = True
base_dir = '/root/.gdfuse'
config_dir = f'{base_dir}/{label}'
mount_dir = f"{self.mountpoint}/{label}"
if force_mount and os.path.exists(mount_dir):
self._unmount_gdfuse(mount_dir)
elif os.path.exists(mount_dir):
print("Drive already mounted")
return
if not os.path.exists(config_dir) or force_mount:
print(f"Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id={self.creds['id']}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force")
vcode = getpass.getpass("Enter the Auth Code: ")
subprocess.run(
['google-drive-ocamlfuse',
'-headless',
'-id',
self.creds['id'],
'-secret',
self.creds['secret'],
'-label',
label,
],
text=True,
input=vcode
)
if mount_team_drive:
team_drive_id = input("Enter Team Drive ID: ")
subprocess.run(
['sed',
'-i',
f's/team_drive_id=.*$/team_drive_id={team_drive_id}/g',
f'{config_dir}/config'
]
)
else:
subprocess.run(
['sed',
'-i',
f's/team_drive_id=.*$/team_drive_id=/g',
f'{config_dir}/config'
]
)
self._mount_gdfuse(mount_dir)
def native(self):
from google.colab import drive
mount_dir = f"{self.mountpoint}/Native"
drive.mount(mount_dir)
if 'drive' not in globals():
try:
drive = Drive(f"/home/{username}/drives")
except NameError:
drive = Drive('/content/drives')
if mount_method == "Native":
drive.native()
if mount_method == "GDFuse":
drive.gdfuse(label, mount_team_drive, force_mount)
#@title **SSH**
! pip install colab_ssh --upgrade &> /dev/null
#@markdown Choose a method (Agro Recommended)
ssh_method = "Agro" #@param ["Agro", "Ngrok"]
#@markdown Copy authtoken from https://dashboard.ngrok.com/auth (only for ngrok)
ngrokRegion = "us" #@param ["us", "eu", "ap", "au", "sa", "jp", "in"]
def runAgro():
from colab_ssh import launch_ssh_cloudflared
launch_ssh_cloudflared(password=password)
def runNgrok():
from colab_ssh import launch_ssh
from IPython.display import clear_output
import getpass
ngrokToken = getpass.getpass("Enter the ngrokToken: ")
launch_ssh(ngrokToken, password, region=ngrokRegion)
clear_output()
print("ssh", user, end='@')
! curl -s http://localhost:4040/api/tunnels | python3 -c \
"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'][6:].replace(':', ' -p '))"
try:
user = username
password = password
except NameError:
print("No user found, using username and password as 'root'")
user='root'
password='root'
if ssh_method == "Agro":
runAgro()
if ssh_method == "Ngrok":
runNgrok()
#@title **Colab Shutdown**
#@markdown To Kill NGROK Tunnel
NGROK = False #@param {type:'boolean'}
#@markdown To Sleep Colab
Sleep = True #@param {type:'boolean'}
if NGROK:
! killall ngrok
if Sleep:
from time import sleep
sleep(43200)
```
|
github_jupyter
|
#@title **Create User**
#@markdown Enter Username and Password
import os
username = "user" #@param {type:"string"}
password = "root" #@param {type:"string"}
print("Creating User and Setting it up")
# Creation of user
os.system(f"useradd -m {username}")
# Add user to sudo group
os.system(f"adduser {username} sudo")
# Set password of user to 'root'
os.system(f"echo '{username}:{password}' | sudo chpasswd")
# Change default shell from sh to bash
os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd")
print(f"User created and configured having username `{username}` and password `{password}`")
#@title **RDP**
#@markdown It takes 4-5 minutes for installation
import os
import subprocess
#@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication
CRP = "" #@param {type:"string"}
#@markdown Enter a Pin (more or equal to 6 digits)
Pin = 123456 #@param {type: "integer"}
#@markdown Autostart Notebook in RDP
Autostart = False #@param {type: "boolean"}
class CRD:
def __init__(self, user):
os.system("apt update")
self.installCRD()
self.installDesktopEnvironment()
self.installGoogleChorme()
self.finish(user)
print("\nRDP created succesfully move to https://remotedesktop.google.com/access")
@staticmethod
def installCRD():
print("Installing Chrome Remote Desktop")
subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def installDesktopEnvironment():
print("Installing Desktop Environment")
os.system("export DEBIAN_FRONTEND=noninteractive")
os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal")
os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'")
os.system("apt remove --assume-yes gnome-terminal")
os.system("apt install --assume-yes xscreensaver")
os.system("systemctl disable lightdm.service")
@staticmethod
def installGoogleChorme():
print("Installing Google Chrome")
subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE)
subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE)
@staticmethod
def finish(user):
print("Finalizing")
if Autostart:
os.makedirs(f"/home/{user}/.config/autostart", exist_ok=True)
link = "https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb"
colab_autostart = """[Desktop Entry]
Type=Application
Name=Colab
Exec=sh -c "sensible-browser {}"
Icon=
Comment=Open a predefined notebook at session signin.
X-GNOME-Autostart-enabled=true""".format(link)
with open(f"/home/{user}/.config/autostart/colab.desktop", "w") as f:
f.write(colab_autostart)
os.system(f"chmod +x /home/{user}/.config/autostart/colab.desktop")
os.system(f"chown {user}:{user} /home/{user}/.config")
os.system(f"adduser {user} chrome-remote-desktop")
command = f"{CRP} --pin={Pin}"
os.system(f"su - {user} -c '{command}'")
os.system("service chrome-remote-desktop start")
print("Finished Succesfully")
try:
if CRP == "":
print("Please enter authcode from the given link")
elif len(str(Pin)) < 6:
print("Enter a pin more or equal to 6 digits")
else:
CRD(username)
except NameError as e:
print("'username' variable not found, Create a user first")
%env USE_AUTH_EPHEM=0
#@title **Google Drive Mount**
#@markdown Google Drive can be used as Persistance HDD for files.<br>
#@markdown **Choose a method (GDFuse Recommended)**
mount_method = "GDFuse" #@param ["GDFuse", "Native"]
#@markdown **Options for GDFuse** <br>
#@markdown - Visit https://github.com/astrada/google-drive-ocamlfuse/wiki/Team-Drives
label = "default" #@param {type:"string"}
mount_team_drive = False #@param {type:"boolean"}
force_mount = False #@param {type:"boolean"}
import os
import subprocess
class Drive():
creds = {}
mountpoint = ""
deps = False
debug = False
def __init__(self, mountpoint="/content/drives", debug=False):
os.makedirs(mountpoint, exist_ok=True)
self.mountpoint = mountpoint
self.debug = debug
def _mount_gdfuse(self, mount_dir):
os.makedirs(mount_dir, exist_ok=True)
subprocess.run(
['google-drive-ocamlfuse',
'-o',
'allow_other',
'-label',
label,
mount_dir,
]
)
print(f"Drive Mounted at {mount_dir}. If you get input/output error, then `team_drive_id` might be wrong or not accessible.")
def _unmount_gdfuse(self, mount_dir):
subprocess.run(
['fusermount',
'-u',
mount_dir,
]
)
os.rmdir(mount_dir)
def auth(self):
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
creds = GoogleCredentials.get_application_default()
self.creds = {
"id": creds.client_id,
"secret": creds.client_secret
}
def gdfuse(self, label, mound_team_drive=False, force_mount=False):
import getpass
if not self.creds:
self.auth()
if not self.deps:
print("Installing google-drive-ocamlfuse")
subprocess.run(['apt', 'install', 'software-properties-common python-software-properties module-init-tools', '-y'])
subprocess.run(['add-apt-repository', 'ppa:alessandro-strada/ppa', '-y'])
subprocess.run(['apt', 'update'])
subprocess.run(['apt', 'install', '--assume-yes', 'google-drive-ocamlfuse'])
self.deps = True
base_dir = '/root/.gdfuse'
config_dir = f'{base_dir}/{label}'
mount_dir = f"{self.mountpoint}/{label}"
if force_mount and os.path.exists(mount_dir):
self._unmount_gdfuse(mount_dir)
elif os.path.exists(mount_dir):
print("Drive already mounted")
return
if not os.path.exists(config_dir) or force_mount:
print(f"Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id={self.creds['id']}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force")
vcode = getpass.getpass("Enter the Auth Code: ")
subprocess.run(
['google-drive-ocamlfuse',
'-headless',
'-id',
self.creds['id'],
'-secret',
self.creds['secret'],
'-label',
label,
],
text=True,
input=vcode
)
if mount_team_drive:
team_drive_id = input("Enter Team Drive ID: ")
subprocess.run(
['sed',
'-i',
f's/team_drive_id=.*$/team_drive_id={team_drive_id}/g',
f'{config_dir}/config'
]
)
else:
subprocess.run(
['sed',
'-i',
f's/team_drive_id=.*$/team_drive_id=/g',
f'{config_dir}/config'
]
)
self._mount_gdfuse(mount_dir)
def native(self):
from google.colab import drive
mount_dir = f"{self.mountpoint}/Native"
drive.mount(mount_dir)
if 'drive' not in globals():
try:
drive = Drive(f"/home/{username}/drives")
except NameError:
drive = Drive('/content/drives')
if mount_method == "Native":
drive.native()
if mount_method == "GDFuse":
drive.gdfuse(label, mount_team_drive, force_mount)
#@title **SSH**
! pip install colab_ssh --upgrade &> /dev/null
#@markdown Choose a method (Agro Recommended)
ssh_method = "Agro" #@param ["Agro", "Ngrok"]
#@markdown Copy authtoken from https://dashboard.ngrok.com/auth (only for ngrok)
ngrokRegion = "us" #@param ["us", "eu", "ap", "au", "sa", "jp", "in"]
def runAgro():
from colab_ssh import launch_ssh_cloudflared
launch_ssh_cloudflared(password=password)
def runNgrok():
from colab_ssh import launch_ssh
from IPython.display import clear_output
import getpass
ngrokToken = getpass.getpass("Enter the ngrokToken: ")
launch_ssh(ngrokToken, password, region=ngrokRegion)
clear_output()
print("ssh", user, end='@')
! curl -s http://localhost:4040/api/tunnels | python3 -c \
"import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'][6:].replace(':', ' -p '))"
try:
user = username
password = password
except NameError:
print("No user found, using username and password as 'root'")
user='root'
password='root'
if ssh_method == "Agro":
runAgro()
if ssh_method == "Ngrok":
runNgrok()
#@title **Colab Shutdown**
#@markdown To Kill NGROK Tunnel
NGROK = False #@param {type:'boolean'}
#@markdown To Sleep Colab
Sleep = True #@param {type:'boolean'}
if NGROK:
! killall ngrok
if Sleep:
from time import sleep
sleep(43200)
| 0.446736 | 0.648077 |
# UN/EDIFACT Message Reader
## Overview
### Requirements
- Python 3.6 or higher
### Features
- All encodings
- Version independent
- XML
### Functions
- parse_edi(data: bytes) -> list
- make_edi(segments: list) -> bytes
- parse_xml(root: ElementTree.Element) -> list
- make_xml(segments: list) -> ElementTree.Element
- pretty_xml(root: ElementTree.Element) -> str
### Experimental
- report(segments: list) -> str
- make_edi_xml(segments: list, sd: dict, ed: dict) -> ElementTree.Element
## EDI
Specifications can be found here http://www.unece.org
### Example message
Reading a binary edifact file
```
edi = open('order.edi', 'rb').read()
edi[:80] # frist 80 bytes
```
The decoding of an edifact message depends on the optional __UNB__-Segment.
In this example __UNOY__ indicates __UTF-8__-encoding.
```
from edixml import ENCODINGS
ENCODINGS['UNOY']
edi.decode('utf8')[:80]
```
### Message-Syntax
The __UNB-Segment__ indicates the encoding and __Syntax-Version__ for the __Service-Segments__.
The type of the message is indicated in the __UNH-Segment__. <br>
In this example the version of the message is __D18A__ (Year 2018, 1st release) and the type of the message is __ORDERS__.
The __UNA-Segment__ indicates the _special characters_, for splitting the message in its __Segments__, __Dataelements__ and __Components__
- Each __Segment__ is identified by its three character __Segment Qualifier__ (UNA, UNB, UNH...) and ends by its __Segment-Terminator__ (')
- Each __Segment__ has __Dataelements__, separated by its __Dataelement-Separator__ (+)
- Each __Dataelement__ has __Components__, separated by its __Component-Separator__ (:)
- The __Decimal-Point-Character__ (.) defines the representation of __Numeric-Values__,
```
from edixml import parse_edi
segments = parse_edi(edi)
segments
```
### Indexing
```
segments[7]
['LIN', [['1'], ['1'], ['0764569104', 'IB']]]
seg, elements = segments[7]
seg
elements
```
### Semantics - Code Table
For __each__ version there are:
- hundreds of message-code-tables
- hundreds of segment-code-tables
- hundrets of element-code-tables with over 10.000 different codes
The _full_ implementation of one message for one version was expected to take half a year.
- LIN - Segment-table: https://service.unece.org/trade/untdid/d18a/trsd/trsdlin.htm
- 7143 - Code-table: https://service.unece.org/trade/untdid/d18a/tred/tred7143.htm
```
isbns = [elements[2][0] for seg, elements in segments
if seg == 'LIN' and elements[2][1] == 'IB']
isbns
```
### Formatting
```
from edixml import make_edi
edmoji = make_edi(segments,
component_separator='✉',
dataelement_separator='☺',
decimal_mark='☣',
release_char='☎',
segment_terminator='❤',
with_newline=True)
print(edmoji.decode('utf8'))
```
## XML
### Mapping to XML
```
from edixml import make_xml
xml = make_xml(segments)
type(xml)
```
### Indexing
```
xml[7].tag
```
### Index-Semantics
```
isbns = [elements[2][0].text for elements in xml
if elements.tag == 'LIN' and elements[2][1].text == 'IB']
isbns
```
### Formatting
```
from xml.etree import ElementTree
ElementTree.tostring(xml, encoding='utf8').decode('utf8')[:100]
from edixml import pretty_xml
print(pretty_xml(xml)[:140])
```
## Mapping EDI/XML
```
from edixml import parse_xml
edi == make_edi(parse_xml(xml))
edi == make_edi(parse_xml(make_xml(parse_edi(edi))))
edi == make_edi(parse_xml(make_xml(parse_edi(edmoji))))
```
## Experimental - D18A with Service-Segments (Version 4, Release 2)
### Messages, Segments and Elements in JSON
```
import json
# The Service-Segments and Service-Elements
v42_sd = json.loads(open('V42-9735-10_service_segments.json').read())
v42_ed = json.loads(open('V42-9735-10_service_codes.json').read())
# The Segments, Elements and Messages
d18a_sd = json.loads(open('d18a_segments.json').read())
d18a_ed = json.loads(open('d18a_codes.json').read())
d18a_md = json.loads(open('d18a_messages.json').read()) # only description
sd = {**v42_sd, **d18a_sd}
ed = {**v42_ed, **d18a_ed}
md = {**d18a_md}
total_codes = sum([len(ed[code]['table']) if 'table' in ed[code] else 0
for code in ed])
print(f"Version: D18A, Messages: {len(md)}, Segments: {len(sd)}, Codes: {total_codes}")
```
### Segment-Definitions
```
sd['LIN']['name'], sd['LIN']['description'], sd['LIN']['table'][0]
```
### Element Definitions
```
ed['7143']['name'], ed['7143']['table']['IB']
```
### Message Definition
```
print(md['ORDERS']['description'][:1200], '...')
```
### Reporting
Helper Function to quickly translate an arbitrary message.
```
from edixml import report
print(report(segments, sd, ed))
```
### Descriptive EDI-XML
```
from edixml import make_edi_xml
edifact_xml = make_edi_xml(segments, sd, ed)
print(pretty_xml(edifact_xml)[:300], '...')
```
### Mapping
```
edi == make_edi(parse_xml(edifact_xml))
```
|
github_jupyter
|
edi = open('order.edi', 'rb').read()
edi[:80] # frist 80 bytes
from edixml import ENCODINGS
ENCODINGS['UNOY']
edi.decode('utf8')[:80]
from edixml import parse_edi
segments = parse_edi(edi)
segments
segments[7]
['LIN', [['1'], ['1'], ['0764569104', 'IB']]]
seg, elements = segments[7]
seg
elements
isbns = [elements[2][0] for seg, elements in segments
if seg == 'LIN' and elements[2][1] == 'IB']
isbns
from edixml import make_edi
edmoji = make_edi(segments,
component_separator='✉',
dataelement_separator='☺',
decimal_mark='☣',
release_char='☎',
segment_terminator='❤',
with_newline=True)
print(edmoji.decode('utf8'))
from edixml import make_xml
xml = make_xml(segments)
type(xml)
xml[7].tag
isbns = [elements[2][0].text for elements in xml
if elements.tag == 'LIN' and elements[2][1].text == 'IB']
isbns
from xml.etree import ElementTree
ElementTree.tostring(xml, encoding='utf8').decode('utf8')[:100]
from edixml import pretty_xml
print(pretty_xml(xml)[:140])
from edixml import parse_xml
edi == make_edi(parse_xml(xml))
edi == make_edi(parse_xml(make_xml(parse_edi(edi))))
edi == make_edi(parse_xml(make_xml(parse_edi(edmoji))))
import json
# The Service-Segments and Service-Elements
v42_sd = json.loads(open('V42-9735-10_service_segments.json').read())
v42_ed = json.loads(open('V42-9735-10_service_codes.json').read())
# The Segments, Elements and Messages
d18a_sd = json.loads(open('d18a_segments.json').read())
d18a_ed = json.loads(open('d18a_codes.json').read())
d18a_md = json.loads(open('d18a_messages.json').read()) # only description
sd = {**v42_sd, **d18a_sd}
ed = {**v42_ed, **d18a_ed}
md = {**d18a_md}
total_codes = sum([len(ed[code]['table']) if 'table' in ed[code] else 0
for code in ed])
print(f"Version: D18A, Messages: {len(md)}, Segments: {len(sd)}, Codes: {total_codes}")
sd['LIN']['name'], sd['LIN']['description'], sd['LIN']['table'][0]
ed['7143']['name'], ed['7143']['table']['IB']
print(md['ORDERS']['description'][:1200], '...')
from edixml import report
print(report(segments, sd, ed))
from edixml import make_edi_xml
edifact_xml = make_edi_xml(segments, sd, ed)
print(pretty_xml(edifact_xml)[:300], '...')
edi == make_edi(parse_xml(edifact_xml))
| 0.201106 | 0.612194 |
# Geothermics Exercises
These exercises are inspired by [Turcotte and Schubert *Geodynamics*](https://www.cambridge.org/de/academic/subjects/earth-and-environmental-science/structural-geology-tectonics-and-geodynamics/geodynamics-3rd-edition?format=PB&isbn=9780521186230) and [Beardsmore *Crustal Heat Flow*](https://www.cambridge.org/core/books/crustal-heat-%20flow/90C25BAC9C22E55B59D1261CFB49946C).
1. [Increase in oceanic lithosphere thickness with time](#litho)
2. [Solidification of a magma lake](#solid)
<a id="litho"></a>
## 1. Heating of Cooling of a Semi-Infinite Half-Space
In the lecture, we briefly about Lord Kelvin's approach to assess the age of the Earth by assuming the solution of a cooling semi-infinite half-space. Basically, he assumed that the specific heat flow at the surface is the result of a cooling process of the Earth. In the beginning, he assumed, Earth had the same temperature still present at its core.
As we saw, his approach is using the diffusion equation flawed because he did not consider / did not know about concepts like radiogenic heat generation in the mantle or thermal convection (solid-state) in the mantle.
$$ \frac{\partial T}{\partial t} = \kappa \frac{\partial^2 T}{\partial x^2} $$
Nonetheless, this equation can also be used for determining the thermal structure of oceanig lithosphere. At a MOR (Mid Ocean Ridge), new hot crust is exposed to cold sea water. With time and seafloor spreading, rocks near the interface between seafloor and water cool down and lose their heat to the water.
In a semi-infinite half-space defined with y > 0, we can obtain the solution to the scenario above by solving the equation above. At t=0, the lithosphere (half-space) has its original temperature $T_0$. Now, at the interface to the water (the *surface*), the temperature changes to a lower temperature $T_1$ at times $t > 0$. That causes a cooling from above, i.e. heat flowing upwards towards the surface.

One can change the diffusion equation above for including the different temperatures by introducing a dimensionless time $\Theta$ using the concept of *similarity*.
$$\Theta = \frac{T - T_0}{T_1 - T_0} $$
$$ \frac{\partial \Theta}{\partial t} = \kappa \frac{\partial^2 \Theta}{\partial x^2} $$
With the conditions $\Theta(x,0) = 0$, $\Theta(0,t) = 1$, $\Theta(\infty,t) = 0$.
As stated above, the half-space solution can be used to model the cooling of oceanic lithosphere. The bottom of said lithosphere, which is moving horizontally with a velocity $v$ above the mantle, can be seen as an isotherm. So, the lithosphere is a package, moving relative to the mantle, and bounded by the surface (seafloor), and an isotherm (around 1600 K). The lithosphere thickens over time, so the isothermal boundary at its bottom will be deeper the older the lithosphere is. Due to the seafloor spreading at a MOR, age is also a function of velocity. With a constant spreading-velocity, the lithosphere at a distance $x$ to the MOR can be considered Y years old.
The cooling / thickening of the lithosphere can be described as an equation similar to the one in Kelvin's exercise:
$$ t = x v^{-1} $$
$$\Theta = erf\bigg(\frac{y}{2\sqrt{\kappa x v^{-1}}}\bigg)$$
<div class="alert alert-info"> **Task:**
Re-arrange the equation above to come up with a formulation of `y` (the depth, thickness of the oceanic lithosphere).
Use the obtained equation to calculate and plot the age dependent thickness of oceanic lithosphere (so `y`) for the following parameters (i.e. plot isotherms):
T_0 = 277.15 K
T_1 = 1600 K
T = 600 K - 1400 K in steps of 200 K
$\kappa$ = 1.2 mm² s$^{-1}$
t = 0 Myr to 150 Myr
</div>
The error function can be used as a function in `scipy.special`.
```
# used libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy.special import erf, erfinv
# define the parameters
# solve the re-arranged equation
# plot the result
# solution
T0 = 277.15
T1 = 1600
T = np.linspace(600, 1400, 5)
t = np.linspace(0.01, 150, 151)
k = 1.2e-6
y2s = 3600*24*365.25
y = erfinv((T[0]-T0)/(T1-T0))*2*np.sqrt(k*t*y2s*1e6)
for i in T[1:]:
yt = erfinv((i-T0)/(T1-T0))*2*np.sqrt(k*t*y2s*1e6)
y = np.vstack([y,yt])
fig = plt.figure(figsize=[10,6])
for i in range(len(y[:,0])):
plt.plot(t, y[i,:]/1000, label='T = {} °C'.format(T[i]))
plt.gca().invert_yaxis()
plt.legend()
plt.xlabel('t (Myr)')
plt.ylabel('depth (km)')
```
<a id='solid'></a>
## 2. Cooling and Solidification of a lava lake
Assume a lava lake on the infant moon which formed by a collision of a mars sized body with the proto-earth. Assume that the moon was covered by a lava sea of about 50 km depth. Its initial temperature was at 1500 K with a surface temperature of 500 K.
A solidification boundary $y_m$ can be determined by
$$ y_m = 2 \xi \sqrt{\kappa t} $$
To obtain $\xi$, we have to iteratively solve
$$\frac{L\sqrt{\pi}}{c(T_m - T_0)} = \frac{e^{-\xi^2}}{\lambda_1 erf\xi} $$
<div class="alert alert-info">**Task:**
Take the following values for the left side of the second equation to obtain a value for lambda: </div>
* L = 320 kJ kg$^{-1}$
* $\kappa$ = 1e-6 m² s$^{-1}$
* c = 1 kJ kg$^{-1}$ K$^{-1}$
* T$_0$ = 500 K
* T$_m$ = 1500 K
```
# set up values and calculate left side of the equation
# solution
L = 320
k = 1e-6
c = 1
T0 = 500
Tm = 1500
# calc leftside
lefts = (L * np.sqrt(np.pi))/(c*(Tm-T0))
print("left side of the equation is {:0.4f}".format(lefts))
```
<div class="alert alert-info">**Task:**
Once you have obtained a value for the left side, iteratively solve the right side of the equation with different $\xi$. Find the value for $\xi$ which yields a result equal to the previous left side of the equation (an accuracy of 4 decimals is enough). </div>
```
# lambdas
xi = np.linspace(0.01, 1.6, 10000)
# calc rightside for the different lambdas
# solution
lam = np.linspace(0.01, 1.6, 10000)
# calc rightside
rights = np.exp(-(lam**2))/(lam*erf(lam))
# plot the right side of the equation vs. the different lambdas
# solution
fig = plt.figure(figsize=[10,6])
plt.plot(lam, rights)
plt.axhline(y=lefts,xmin=0,xmax=1.6, color='red')
plt.ylim([0,2])
plt.xlim([0.4, 1.6])
plt.xlabel('$\lambda$')
plt.ylabel('$ e^{-\lambda^2}/ (\lambda erf \lambda)$');
# find the value for lambda which is approximately equal to the left side of the equation
# solution
idx = np.where(np.round(lefts,4) == np.round(rights,4))
print("For the right side, the value {:.4f} is obtained by a lambda value of {:.4f}".format(*(rights[idx[0][0]],
lam[idx[0][0]])))
```
<div class="alert alert-info">**Task:**
Assuming a depth $y_m$ of 50 km for the lake, and we want to know the duration for the lake to fully solidify, we have to take the first equation with the obtained value for $\xi$ and re-arrange for t.
$$ y_m = 2 \xi \sqrt{\kappa t} $$
Once you come up with a formulation for t, plug in the numbers and calculate the time the lava lake would need to solidify. </div>
```
# calculate the time
# solution
t = 50000**2/(4*lam[idx[0][0]]**2*k)/y2s/1e6
print("It takes {:.2f} Million years for the magma lake to solidify.".format(t))
```
|
github_jupyter
|
# used libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from scipy.special import erf, erfinv
# define the parameters
# solve the re-arranged equation
# plot the result
# solution
T0 = 277.15
T1 = 1600
T = np.linspace(600, 1400, 5)
t = np.linspace(0.01, 150, 151)
k = 1.2e-6
y2s = 3600*24*365.25
y = erfinv((T[0]-T0)/(T1-T0))*2*np.sqrt(k*t*y2s*1e6)
for i in T[1:]:
yt = erfinv((i-T0)/(T1-T0))*2*np.sqrt(k*t*y2s*1e6)
y = np.vstack([y,yt])
fig = plt.figure(figsize=[10,6])
for i in range(len(y[:,0])):
plt.plot(t, y[i,:]/1000, label='T = {} °C'.format(T[i]))
plt.gca().invert_yaxis()
plt.legend()
plt.xlabel('t (Myr)')
plt.ylabel('depth (km)')
# set up values and calculate left side of the equation
# solution
L = 320
k = 1e-6
c = 1
T0 = 500
Tm = 1500
# calc leftside
lefts = (L * np.sqrt(np.pi))/(c*(Tm-T0))
print("left side of the equation is {:0.4f}".format(lefts))
# lambdas
xi = np.linspace(0.01, 1.6, 10000)
# calc rightside for the different lambdas
# solution
lam = np.linspace(0.01, 1.6, 10000)
# calc rightside
rights = np.exp(-(lam**2))/(lam*erf(lam))
# plot the right side of the equation vs. the different lambdas
# solution
fig = plt.figure(figsize=[10,6])
plt.plot(lam, rights)
plt.axhline(y=lefts,xmin=0,xmax=1.6, color='red')
plt.ylim([0,2])
plt.xlim([0.4, 1.6])
plt.xlabel('$\lambda$')
plt.ylabel('$ e^{-\lambda^2}/ (\lambda erf \lambda)$');
# find the value for lambda which is approximately equal to the left side of the equation
# solution
idx = np.where(np.round(lefts,4) == np.round(rights,4))
print("For the right side, the value {:.4f} is obtained by a lambda value of {:.4f}".format(*(rights[idx[0][0]],
lam[idx[0][0]])))
# calculate the time
# solution
t = 50000**2/(4*lam[idx[0][0]]**2*k)/y2s/1e6
print("It takes {:.2f} Million years for the magma lake to solidify.".format(t))
| 0.526586 | 0.990857 |
# 1. Geometric Sum
Given k, find the geometric sum i.e.
1 + 1/2 + 1/4 + 1/8 + ... + 1/(2^k)
using recursion.
Input format :
Integer k
Output format :
Geometric sum (upto 5 decimal places)
Constraints :
0 <= k <= 1000
Sample Input 1 :
3
Sample Output 1 :
1.87500
Sample Input 2 :
4
Sample Output 2 :
1.93750
```
def geometricSum(n):
if n==0:
return 1
else:
return (1/pow(2,n)) + geometricSum(n-1)
n = int(input())
print('%.5f'%geometricSum(n))
```
# 2. Check Palindrome (recursive)
Check whether a given String S is a palindrome using recursion. Return true or false.
Input Format :
String S
Output Format :
'true' or 'false'
Constraints :
0 <= |S| <= 1000
where |S| represents length of string S.
Sample Input 1 :
racecar
Sample Output 1:
true
Sample Input 2 :
ninja
Sample Output 2:
false
```
def isPalRec(st, s, e) :
if (s == e):
return True
if (st[s] != st[e]) :
return False
if (s < e + 1) :
return isPalRec(st, s + 1, e - 1)
return True
def isPalindrome(st) :
n = len(st)
if (n == 0) :
return True
return isPalRec(st, 0, n - 1)
print('true' if isPalindrome(input()) else 'false')
```
# 3. Sum of digits (recursive)
Write a recursive function that returns the sum of the digits of a given integer.
Input format :
Integer N
Output format :
Sum of digits of N
Constraints :
0 <= N <= 10^9
Sample Input 1 :
12345
Sample Output 1 :
15
Sample Input 2 :
9
Sample Output 2 :
9
```
def sumofDigits(n):
if n//10==0:
return n
else:
return (n%10)+sumofDigits(n//10)
print(sumofDigits(int(input())))
```
# 5. Multiplication (Recursive)
Given two integers M & N, calculate and return their multiplication using recursion. You can only use subtraction and addition for your calculation. No other operators are allowed.
Input format :
Line 1 : Integer M
Line 2 : Integer N
Output format :
M x N
Constraints :
0 <= M <= 1000
0 <= N <= 1000
Sample Input 1 :
3
5
Sample Output 1 :
15
Sample Input 2 :
4
0
Sample Output 2 :
0
```
def product( x , y ):
if x < y:
return product(y, x)
elif y != 0:
return (x + product(x, y - 1))
else:
return 0
print(product(int(input()),int(input())))
```
# 6. Count Zeros
Given an integer N, count and return the number of zeros that are present in the given integer using recursion.
Input Format :
Integer N
Output Format :
Number of zeros in N
Constraints :
0 <= N <= 10^9
Sample Input 1 :
0
Sample Output 1 :
1
Sample Input 2 :
00010204
Sample Output 2 :
2
Explanation for Sample Output 2 :
Even though "00010204" has 5 zeros, the output would still be 2 because when you convert it to an integer, it becomes 10204.
Sample Input 3 :
708000
Sample Output 3 :
4
```
def CountZeros(n,c=0):
if n//10==0:
if n==0:
return c+1
else:
return c
else:
if n%10==0:
return CountZeros(n//10,c+1)
else:
return CountZeros(n//10,c)
print(CountZeros(int(input())) )
```
# 7. String to Integer
Write a recursive function to convert a given string into the number it represents. That is input will be a numeric string that contains only numbers, you need to convert the string into corresponding integer and return the answer.
Input format :
Numeric string S (string, Eg. "1234")
Output format :
Corresponding integer N (int, Eg. 1234)
Constraints :
0 <= |S| <= 9
where |S| represents length of string S.
Sample Input 1 :
00001231
Sample Output 1 :
1231
Sample Input 2 :
12567
Sample Output 2 :
12567
```
def stringToInteger(str:str):
if (len(str) == 1):
return ord(str[0]) - ord('0')
y = stringToInteger(str[1:])
x = ord(str[0]) - ord('0')
x = x * (10**(len(str) - 1)) + y
return x
print(stringToInteger(input()))
```
# 8. Pair Star
Given a string S, compute recursively a new string where identical chars that are adjacent in the original string are separated from each other by a "*".
Input format :
String S
Output format :
Modified string
Constraints :
0 <= |S| <= 1000
where |S| represents length of string S.
Sample Input 1 :
hello
Sample Output 1:
hel*lo
Sample Input 2 :
aaaa
Sample Output 2 :
a*a*a*a
```
def pairStar(Input, Output, i = 0) :
Output = Output + Input[i]
if (i == len(Input) - 1) :
print(Output)
return
if (Input[i] == Input[i + 1]):
Output = Output + '*'
pairStar(Input, Output, i + 1)
pairStar(input(),'')
```
# 9. Check AB
Suppose you have a string, S, made up of only 'a's and 'b's. Write a recursive function that checks if the string was generated using the following rules:
a. The string begins with an 'a'
b. Each 'a' is followed by nothing or an 'a' or "bb"
c. Each "bb" is followed by nothing or an 'a'
If all the rules are followed by the given string, return true otherwise return false.
Input format :
String S
Output format :
'true' or 'false'
Constraints :
1 <= |S| <= 1000
where |S| represents length of string S.
Sample Input 1 :
abb
Sample Output 1 :
true
Sample Input 2 :
abababa
Sample Output 2 :
false
Explanation for Sample Input 2
In the above example, a is not followed by either "a" or "bb", instead it's followed by "b" which results in false to be returned.
```
def checkAB(str):
if (len(str) == 0):
return True
if (str[0] == 'a'):
if (len(str[1:]) > 1 and str[1:3] == 'bb'):
return checkAB(str[3:])
else:
return checkAB(str[1:])
else:
return False
print('true' if checkAB(input()) else 'false')
```
# 10. Staircase
A child is running up a staircase with N steps, and can hop either 1 step, 2 steps or 3 steps at a time. Implement a method to count how many possible ways the child can run up to the stairs. You need to return number of possible ways W.
Input format :
Integer N
Output Format :
Integer W
Constraints :
1 <= N <= 30
Sample Input 1 :
4
Sample Output 1 :
7
Sample Input 2 :
5
Sample Output 2 :
13
```
def findStep(n):
if ( n == 0 ):
return 1
elif (n < 0):
return 0
else:
return findStep(n - 3) + findStep(n - 2) + findStep(n - 1)
print(findStep(int(input())))
```
|
github_jupyter
|
def geometricSum(n):
if n==0:
return 1
else:
return (1/pow(2,n)) + geometricSum(n-1)
n = int(input())
print('%.5f'%geometricSum(n))
def isPalRec(st, s, e) :
if (s == e):
return True
if (st[s] != st[e]) :
return False
if (s < e + 1) :
return isPalRec(st, s + 1, e - 1)
return True
def isPalindrome(st) :
n = len(st)
if (n == 0) :
return True
return isPalRec(st, 0, n - 1)
print('true' if isPalindrome(input()) else 'false')
def sumofDigits(n):
if n//10==0:
return n
else:
return (n%10)+sumofDigits(n//10)
print(sumofDigits(int(input())))
def product( x , y ):
if x < y:
return product(y, x)
elif y != 0:
return (x + product(x, y - 1))
else:
return 0
print(product(int(input()),int(input())))
def CountZeros(n,c=0):
if n//10==0:
if n==0:
return c+1
else:
return c
else:
if n%10==0:
return CountZeros(n//10,c+1)
else:
return CountZeros(n//10,c)
print(CountZeros(int(input())) )
def stringToInteger(str:str):
if (len(str) == 1):
return ord(str[0]) - ord('0')
y = stringToInteger(str[1:])
x = ord(str[0]) - ord('0')
x = x * (10**(len(str) - 1)) + y
return x
print(stringToInteger(input()))
def pairStar(Input, Output, i = 0) :
Output = Output + Input[i]
if (i == len(Input) - 1) :
print(Output)
return
if (Input[i] == Input[i + 1]):
Output = Output + '*'
pairStar(Input, Output, i + 1)
pairStar(input(),'')
def checkAB(str):
if (len(str) == 0):
return True
if (str[0] == 'a'):
if (len(str[1:]) > 1 and str[1:3] == 'bb'):
return checkAB(str[3:])
else:
return checkAB(str[1:])
else:
return False
print('true' if checkAB(input()) else 'false')
def findStep(n):
if ( n == 0 ):
return 1
elif (n < 0):
return 0
else:
return findStep(n - 3) + findStep(n - 2) + findStep(n - 1)
print(findStep(int(input())))
| 0.117231 | 0.934574 |
# Introduction - Problem Definition
We will investigate the Boston House Price dataset. Each record in the database describes a Boston suburb or town. The data was drawn from the Boston Standard Metropolitan
Statistical Area (SMSA) in 1970. The attributes are defined as follows (taken from the UCI Machine Learning Repository):
1. CRIM: per capita crime rate by town
2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft.
3. INDUS: proportion of non-retail business acres per town
4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
5. NOX: nitric oxides concentration (parts per 10 million)
6. RM: average number of rooms per dwelling
7. AGE: proportion of owner-occupied units built prior to 1940
8. DIS: weighted distances to ve Boston employment centers
9. RAD: index of accessibility to radial highways
10. TAX: full-value property-tax rate per USD 10,000
11. PTRATIO: pupil-teacher ratio by town
12. B: 1000(Bk - 0:63)^2 where Bk is the proportion of blacks by town
13. LSTAT: % lower status of the population
14. MEDV: Median value of owner-occupied homes in USD 1000s
We can see that the input attributes have a mixture of units.
We will perform the following steps:
2. Load the Dataset (Import libraries and load dataset)
3. Analyze Data (Descriptive Statistics)
4. Data Visualizations (Unimodal and Multimodal, Summary of Ideas)
5. Validation Dataset
6. Evaluate Algorithms: Baseline
7. Evaluate Algorithms: Standardization
8. Improve Results with Tuning
9. Ensemble Methods
10. Tune Ensemble Methods
11. Finalize Model
12. Summary
13. References
<u>Goal</u>: Prediction of Boston house prices
source: https://www.kaggle.com/sugamkhetrapal/boston-house-price-prediction-end-to-end-project/code
# 2. Load the Dataset
Let's start off by loading the libraries required for this project.
## 2.1 Import libraries
First, let's import all of the modules, functions and objects we are going to use in this project.
```
# Load libraries
import numpy
from numpy import arange
from matplotlib import pyplot
from pandas import read_csv
from pandas import set_option
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
```
## 2.2 Load dataset
```
# Load dataset
filename = '../data/Boston_housing_prices.csv'
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
dataset = read_csv(filename, delim_whitespace=True, names=names)
```
# 3. Analyze Data
We can now take a closer look at our loaded data.
## 3.1 Descriptive Statistics
Let's start off by confirming the dimensions of the dataset, e.g. the number of rows and columns.
```
# shape
print(dataset.shape)
```
<u>Inference</u>: We have 506 instances to work with and can confirm the data has 14 attributes including the output attribute MEDV.
Let's also look at the data types of each attribute.
```
# types
print(dataset.dtypes)
```
<u>Inference</u>: We can see that all of the attributes are numeric, mostly real values (float) and some have been interpreted as integers (int).
Let's now take a peek at the first 5 rows of the data.
```
# head
print(dataset.head(5))
```
<u>Inference</u>: We can confirm that the scales of the attributes are all over the place because of the differing units. We may benefit from some transforms later on.
Let's summarize the distribution of each attribute.
```
set_option('precision', 1)
print(dataset.describe())
```
<u>Inference</u>: We now have a better feeling for how different the attributes are. The min and max values as well as the means vary a lot. We are likely going to get better results by rescaling the data in some way.
Now, let's now take a look at the correlation between all of the numeric attributes.
```
# correlation
set_option('precision', 2)
print(dataset.corr(method='pearson'))
```
<u>Inference</u>: We can see that many of the attributes have a strong correlation (e.g. > 0:70 or < -0:70). For example:
* NOX and INDUS with 0.76.
* DIS and INDUS with -0.71.
* TAX and INDUS with 0.72.
* AGE and NOX with 0.73.
* DIS and NOX with -0.77.
It also looks like LSTAT has a good negative correlation with the output variable MEDV with a value of -0.74.
# 4 Data Visualizations
## 4.1 Unimodal Data Visualizations
Let's look at visualizations of individual attributes. It is often useful to look at our data using multiple different visualizations in order to spark ideas. Let's look at histograms of each attribute to get a sense of the data distributions.
```
# histograms
dataset.hist(sharex=False, sharey=False, xlabelsize=1, ylabelsize=1)
pyplot.show()
```
<u>Inference</u>:
* We can see that some attributes may have an exponential distribution, such as CRIM, ZN, AGE and B.
* We can see that others may have a bimodal distribution such as RAD and TAX.
Let's look at the same distributions using density plots that smooth them out a bit.
```
# density
dataset.plot(kind='density', subplots=True, layout=(4,4), sharex=False, legend=False, fontsize=1)
pyplot.show()
```
<u>Inference</u>:
* This perhaps adds more evidence to our suspicion about possible exponential and bimodal distributions.
* It also looks like NOX, RM and LSTAT may be skewed Gaussian distributions, which might be helpful later with transforms.
Let's look at the data with box and whisker plots of each attribute.
```
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(4,4), sharex=False, sharey=False, fontsize=8)
pyplot.show()
```
<u>Inference</u>: This helps point out the skew in many distributions so much so that data looks like outliers (e.g. beyond the whisker of the plots).
## 4.2 Multimodal Data Visualizations
Let's look at some visualizations of the interactions between variables. The best place to start is a scatter plot matrix.
```
# scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
```
<u>Inference</u>: We can see that some of the higher correlated attributes do show good structure in their relationship. Not linear, but nice predictable curved relationships.
Let's also visualize the correlations between the attributes.
```
# correlation matrix
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(dataset.corr(), vmin=-1, vmax=1, interpolation='none')
fig.colorbar(cax)
ticks = numpy.arange(0,14,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(names)
ax.set_yticklabels(names)
pyplot.show()
```
<u>Inference</u>:
* The yellow color shows positive correlation whereas the purple color shows negative correlation.
* We can also see some yellow and purple that suggest candidates for removal to better improve accuracy of models later on.
## 4.3 Summary of Ideas
There is a lot of structure in this dataset. We need to think about transforms that we could use later to better expose the structure which in turn may improve modeling accuracy. So far it would be worth trying:
* Feature selection and removing the most correlated attributes.
* Normalizing the dataset to reduce the effect of differing scales.
* Standardizing the dataset to reduce the effects of differing distributions.
With lots of additional time we would also explore the possibility of binning (discretization) of the data. This can often improve accuracy for decision tree algorithms.
# 5. Validation Dataset
It is a good idea to use a validation hold-out set. This is a sample of the data that we hold back from our analysis and modeling. We use it right at the end of our project to confirm the
accuracy of our final model. It is a smoke test that we can use to see if we messed up and to give us confidence on our estimates of accuracy on unseen data. We will use 80% of the dataset for modeling and hold back 20% for validation.
```
# Split-out validation dataset
array = dataset.values
X = array[:,0:13]
Y = array[:,13]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
```
# 6. Evaluate Algorithms: Baseline
We have no idea what algorithms will do well on this problem. Gut feel suggests regression algorithms like Linear Regression and ElasticNet may do well. It is also possible that decision trees and even SVM may do well. We have no idea. Let's design our test harness. We will use 10-fold cross validation. The dataset is not too small and this is a good standard test harness configuration. We will evaluate algorithms using the Mean Squared Error (MSE) metric. MSE will give a gross idea of how wrong all predictions are (0 is perfect).
```
# Test options and evaluation metric
num_folds = 10
seed = 7
scoring = 'neg_mean_squared_error'
```
Let's create a baseline of performance on this problem and spot-check a number of different algorithms. We will select a suite of different algorithms capable of working on this regression problem. The six algorithms selected include:
* Linear Algorithms: Linear Regression (LR), Lasso Regression (LASSO) and ElasticNet (EN).
* Nonlinear Algorithms: Classification and Regression Trees (CART), Support Vector Regression (SVR) and k-Nearest Neighbors (KNN).
```
# Spot-Check Algorithms
models = []
models.append(('LR', LinearRegression()))
models.append(('LASSO', Lasso()))
models.append(('EN', ElasticNet()))
models.append(('KNN', KNeighborsRegressor()))
models.append(('CART', DecisionTreeRegressor()))
models.append(('SVR', SVR()))
```
The algorithms all use default tuning parameters. Let's compare the algorithms. We will display the mean and standard deviation of MSE for each algorithm as we calculate it and collect the results for use later.
```
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
```
<u>Inference</u>:
* It looks like LR (-21) has the lowest MSE, followed closely by CART (-25), LASSO (-26) and EN (-27).
Let's take a look at the distribution of scores across all cross validation folds by algorithm.
```
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
```
<u>Inference</u>: We can see similar distributions for the regression algorithms and perhaps a tighter distribution of scores for CART.
The differing scales of the data is probably hurting the skill of all of the algorithms and perhaps more so for SVR and KNN. In the next section we will look at running the same algorithms using a standardized copy of the data.
# 7. Evaluate Algorithms: Standardization
We suspect that the differing scales of the raw data may be negatively impacting the skill of some of the algorithms. Let's evaluate the same algorithms with a standardized copy of the dataset. This is where the data is transformed such that each attribute has a mean value of zero and a standard deviation of 1. We also need to avoid data leakage when we transform the data. A good way to avoid leakage is to use pipelines that standardize the data and build the model for each fold in the cross validation test harness. That way we can get a fair estimation of how each model with standardized data might perform on unseen data.
```
# Standardize the dataset
pipelines = []
pipelines.append(('ScaledLR', Pipeline([('Scaler', StandardScaler()),('LR',
LinearRegression())])))
pipelines.append(('ScaledLASSO', Pipeline([('Scaler', StandardScaler()),('LASSO',
Lasso())])))
pipelines.append(('ScaledEN', Pipeline([('Scaler', StandardScaler()),('EN',
ElasticNet())])))
pipelines.append(('ScaledKNN', Pipeline([('Scaler', StandardScaler()),('KNN',
KNeighborsRegressor())])))
pipelines.append(('ScaledCART', Pipeline([('Scaler', StandardScaler()),('CART',
DecisionTreeRegressor())])))
pipelines.append(('ScaledSVR', Pipeline([('Scaler', StandardScaler()),('SVR', SVR())])))
results = []
names = []
for name, model in pipelines:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
```
<u>Inference</u>:
* Running the example provides a list of mean squared errors. We can see that scaling did have an effect on KNN, driving the error lower than the other models.
Let's take a look at the distribution of the scores across the cross validation folds.
```
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Scaled Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
```
<u>Inference</u>:
* We can see that KNN has both a tight distribution of error and has the lowest score.
# 8. Improve Results With Tuning
We know from the results in the previous section that KNN achieves good results on a scaled version of the dataset. But can it do better? The default value for the number of neighbors in KNN is 7. We can use a grid search to try a set of different numbers of neighbors and see if we can improve the score. The below example tries odd k values from 1 to 21, an arbitrary range covering a known good value of 7. Each k value (n neighbors) is evaluated using 10-fold cross validation on a standardized copy of the training dataset.
```
# KNN Algorithm tuning
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
k_values = numpy.array([1,3,5,7,9,11,13,15,17,19,21])
param_grid = dict(n_neighbors=k_values)
model = KNeighborsRegressor()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
```
We can display the mean and standard deviation scores as well as the best performing value for k below.
```
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
```
<u>Inference</u>: We can see that the best for k (n_neighbors) is 3 providing a mean squared error of -18.109304, the best so far.
# 9. Ensemble Methods
Another way that we can improve the performance of algorithms on this problem is by using ensemble methods. In this section we will evaluate four different ensemble machine learning
algorithms, two boosting and two bagging methods:
* Boosting Methods: AdaBoost (AB) and Gradient Boosting (GBM).
* Bagging Methods: Random Forests (RF) and Extra Trees (ET).
We will use the same test harness as before, 10-fold cross validation and pipelines that standardize the training data for each fold.
```
# ensembles
ensembles = []
ensembles.append(('ScaledAB', Pipeline([('Scaler', StandardScaler()),('AB',
AdaBoostRegressor())])))
ensembles.append(('ScaledGBM', Pipeline([('Scaler', StandardScaler()),('GBM',
GradientBoostingRegressor())])))
ensembles.append(('ScaledRF', Pipeline([('Scaler', StandardScaler()),('RF',
RandomForestRegressor())])))
ensembles.append(('ScaledET', Pipeline([('Scaler', StandardScaler()),('ET',
ExtraTreesRegressor())])))
results = []
names = []
for name, model in ensembles:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
```
Running the above calculates the mean squared error for each method using the default parameters.
<u>Inference</u>: We can see that we're generally getting better scores than our linear and nonlinear algorithms in previous sections.
We can also plot the distribution of scores across the cross validation folds.
```
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Scaled Ensemble Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
```
<u>Inference</u>: It looks like Gradient Boosting has a better mean score, it also looks like Extra Trees has a similar distribution and perhaps a better median score.
We can probably do better, given that the ensemble techniques used the default parameters. In the next section we will look at tuning the Gradient Boosting to further lift the performance.
# 10. Tune Ensemble Methods
The default number of boosting stages to perform (n_estimators) is 100. This is a good candidate parameter of Gradient Boosting to tune. Often, the larger the number of boosting stages, the better the performance but the longer the training time. In this section we will look at tuning the number of stages for gradient boosting. Below we define a parameter grid n_estimators values from 50 to 400 in increments of 50. Each setting is evaluated using 10-fold cross validation.
```
# Tune scaled GBM
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = dict(n_estimators=numpy.array([50,100,150,200,250,300,350,400]))
model = GradientBoostingRegressor(random_state=seed)
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
```
As before, we can summarize the best configuration and get an idea of how performance changed with each different configuration.
```
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
```
We can see that the best configuration was n_estimators=400 resulting in a mean squared error of -9.356471, about 0.65 units better than the untuned method.
Next we can finalize the model and prepare it for general use.
# 11. Finalize Model
In this section we will finalize the gradient boosting model and evaluate it on our hold out validation dataset. First we need to prepare the model and train it on the entire training dataset. This includes standardizing the training dataset before training.
```
# prepare the model
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
model = GradientBoostingRegressor(random_state=seed, n_estimators=400)
model.fit(rescaledX, Y_train)
```
We can then scale the inputs for the validation dataset and generate predictions.
```
# transform the validation dataset
rescaledValidationX = scaler.transform(X_validation)
predictions = model.predict(rescaledValidationX)
print(mean_squared_error(Y_validation, predictions))
```
We can see that the estimated mean squared error is 11.8, close to our estimate of -9.3.
# 12. Summary
We performed the following steps:
* Loading the Dataset.
* Analyze Data (some skewed distributions and correlated attributes).
* Evaluate Algorithms (Linear Regression looked good).
* Evaluate Algorithms with Standardization (KNN looked good).
* Algorithm Tuning (K=3 for KNN was best).
* Ensemble Methods (Bagging and Boosting, Gradient Boosting looked good).
* Tuning Ensemble Methods (getting the most from Gradient Boosting).
* Finalize Model (use all training data and confirm using validation dataset).
# 13. References
* Thank you to Jason Brownlee https://machinelearningmastery.com/
|
github_jupyter
|
# Load libraries
import numpy
from numpy import arange
from matplotlib import pyplot
from pandas import read_csv
from pandas import set_option
from pandas.plotting import scatter_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.metrics import mean_squared_error
import warnings
warnings.filterwarnings('ignore')
# Load dataset
filename = '../data/Boston_housing_prices.csv'
names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
dataset = read_csv(filename, delim_whitespace=True, names=names)
# shape
print(dataset.shape)
# types
print(dataset.dtypes)
# head
print(dataset.head(5))
set_option('precision', 1)
print(dataset.describe())
# correlation
set_option('precision', 2)
print(dataset.corr(method='pearson'))
# histograms
dataset.hist(sharex=False, sharey=False, xlabelsize=1, ylabelsize=1)
pyplot.show()
# density
dataset.plot(kind='density', subplots=True, layout=(4,4), sharex=False, legend=False, fontsize=1)
pyplot.show()
# box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(4,4), sharex=False, sharey=False, fontsize=8)
pyplot.show()
# scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
# correlation matrix
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(dataset.corr(), vmin=-1, vmax=1, interpolation='none')
fig.colorbar(cax)
ticks = numpy.arange(0,14,1)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.set_xticklabels(names)
ax.set_yticklabels(names)
pyplot.show()
# Split-out validation dataset
array = dataset.values
X = array[:,0:13]
Y = array[:,13]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Test options and evaluation metric
num_folds = 10
seed = 7
scoring = 'neg_mean_squared_error'
# Spot-Check Algorithms
models = []
models.append(('LR', LinearRegression()))
models.append(('LASSO', Lasso()))
models.append(('EN', ElasticNet()))
models.append(('KNN', KNeighborsRegressor()))
models.append(('CART', DecisionTreeRegressor()))
models.append(('SVR', SVR()))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# Standardize the dataset
pipelines = []
pipelines.append(('ScaledLR', Pipeline([('Scaler', StandardScaler()),('LR',
LinearRegression())])))
pipelines.append(('ScaledLASSO', Pipeline([('Scaler', StandardScaler()),('LASSO',
Lasso())])))
pipelines.append(('ScaledEN', Pipeline([('Scaler', StandardScaler()),('EN',
ElasticNet())])))
pipelines.append(('ScaledKNN', Pipeline([('Scaler', StandardScaler()),('KNN',
KNeighborsRegressor())])))
pipelines.append(('ScaledCART', Pipeline([('Scaler', StandardScaler()),('CART',
DecisionTreeRegressor())])))
pipelines.append(('ScaledSVR', Pipeline([('Scaler', StandardScaler()),('SVR', SVR())])))
results = []
names = []
for name, model in pipelines:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Scaled Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# KNN Algorithm tuning
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
k_values = numpy.array([1,3,5,7,9,11,13,15,17,19,21])
param_grid = dict(n_neighbors=k_values)
model = KNeighborsRegressor()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# ensembles
ensembles = []
ensembles.append(('ScaledAB', Pipeline([('Scaler', StandardScaler()),('AB',
AdaBoostRegressor())])))
ensembles.append(('ScaledGBM', Pipeline([('Scaler', StandardScaler()),('GBM',
GradientBoostingRegressor())])))
ensembles.append(('ScaledRF', Pipeline([('Scaler', StandardScaler()),('RF',
RandomForestRegressor())])))
ensembles.append(('ScaledET', Pipeline([('Scaler', StandardScaler()),('ET',
ExtraTreesRegressor())])))
results = []
names = []
for name, model in ensembles:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Scaled Ensemble Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# Tune scaled GBM
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
param_grid = dict(n_estimators=numpy.array([50,100,150,200,250,300,350,400]))
model = GradientBoostingRegressor(random_state=seed)
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# prepare the model
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
model = GradientBoostingRegressor(random_state=seed, n_estimators=400)
model.fit(rescaledX, Y_train)
# transform the validation dataset
rescaledValidationX = scaler.transform(X_validation)
predictions = model.predict(rescaledValidationX)
print(mean_squared_error(Y_validation, predictions))
| 0.777764 | 0.991636 |
# Neighbourhood Cleaning Rule
The Neighbourhood Cleaning Rule works as follows:
1) Remove noisy observations from the majority class with ENN:
- explores the 3 closest neighbours
- uses majority vote of neighbours to retain observations
2) Remove observations from the majority class if:
- they are 1 of the 3 closest neighbours to a minority sample, and
- most / all of those 3 closest neighbours are not minority, and
- the majority class has at least half as many observations as those in the minority (this can be regulated)
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NeighbourhoodCleaningRule
```
## Create data
We will create data where the classes have different degrees of separateness.
https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html
```
def make_data(sep):
# returns arrays
X, y = make_classification(n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,# how separate the classes are
random_state=1)
# trasform arrays into pandas df and series
X = pd.DataFrame(X, columns =['varA', 'varB'])
y = pd.Series(y)
return X, y
```
## Neighbourhood Cleaning Rule
[NeighbourhoodCleaningRule](https://imbalanced-learn.org/stable/references/generated/imblearn.under_sampling.NeighbourhoodCleaningRule.html)
### Well separated classes
```
# create data
X, y = make_data(sep=2)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='all', # all neighbouring need to disagree, only applies to cleaning step
# alternatively, we can se this to mode, and then most neighbours
# need to disagree to be removed.
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.5, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
# Note, the threshold_cleaning implementation by imbalanced-learn does not
# match what was described in the original article. At the moment, it will
# only clean observations if the class has at least threshold * X.shape[0]
# number of observations
X_resampled, y_resampled = ncr.fit_resample(X, y)
# size of original data
X.shape, y.shape
# size of undersampled data
X_resampled.shape, y_resampled.shape
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
Note how some yellow dots can now be seen clearer as their neighbouring observations from the other class were removed from the data set.
**HOMEWORK**
- play with the parameters of the Neighbourhood cleaning Rule transformer and examine the size of the returned dataset and the distribution of the samples.
### Partially separated classes
Let's repeat the exercise in data where the classes are not so clearly separated.
```
# create data
X, y = make_data(sep=0)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='mode', # most neighbouring need to disagree, only applies to cleaning step
# alternatively, we can se this to all, and then all neighbours
# need to disagree to be removed.
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.5, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
X_resampled, y_resampled = ncr.fit_resample(X, y)
# original data
X.shape, y.shape
# undersampled data
X_resampled.shape, y_resampled.shape
```
As the classes are not so clearly distinguished, more samples were removed from the dataset.
```
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
```
Notice again, that observations have been cleared from the surroundings of those of the minority class.
## Neighbourhood Cleaning Rule
### Real data - Performance comparison
Does it work well with real datasets?
Well, it will depend on the dataset, so we need to try and compare the models built on the whole dataset, and that built on the undersampled dataset.
```
# load data
# only a few observations to speed the computaton
data = pd.read_csv('../kdd2004.csv').sample(10000, random_state=10)
data.head()
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='mode', # all / most neighbouring need to disagree, only applies to cleaning step
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.1, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
X_ncr, y_ncr = ncr.fit_resample(X_train, y_train)
# compare shapes
X_train.shape, X_ncr.shape
```
## Machine learning performance comparison
Let's compare model performance with and without undersampling.
```
# function to train random forests and evaluate the performance
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=39, max_depth=4)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = rf.predict_proba(X_test)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# evaluate performance of algorithm built
# using imbalanced dataset
run_randomForests(X_train,
X_test,
y_train,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_ncr,
X_test,
y_ncr,
y_test)
```
There was a tiny improvement in performance.
**HOMEWORK**
- Change the number of neighbours. Compare the results in terms of data size, distributions and model performance. Change also the selection method from all to mode and evaluate the same aspects.
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from imblearn.under_sampling import NeighbourhoodCleaningRule
def make_data(sep):
# returns arrays
X, y = make_classification(n_samples=1000,
n_features=2,
n_redundant=0,
n_clusters_per_class=1,
weights=[0.99],
class_sep=sep,# how separate the classes are
random_state=1)
# trasform arrays into pandas df and series
X = pd.DataFrame(X, columns =['varA', 'varB'])
y = pd.Series(y)
return X, y
# create data
X, y = make_data(sep=2)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='all', # all neighbouring need to disagree, only applies to cleaning step
# alternatively, we can se this to mode, and then most neighbours
# need to disagree to be removed.
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.5, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
# Note, the threshold_cleaning implementation by imbalanced-learn does not
# match what was described in the original article. At the moment, it will
# only clean observations if the class has at least threshold * X.shape[0]
# number of observations
X_resampled, y_resampled = ncr.fit_resample(X, y)
# size of original data
X.shape, y.shape
# size of undersampled data
X_resampled.shape, y_resampled.shape
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# create data
X, y = make_data(sep=0)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='mode', # most neighbouring need to disagree, only applies to cleaning step
# alternatively, we can se this to all, and then all neighbours
# need to disagree to be removed.
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.5, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
X_resampled, y_resampled = ncr.fit_resample(X, y)
# original data
X.shape, y.shape
# undersampled data
X_resampled.shape, y_resampled.shape
# plot original data
sns.scatterplot(
data=X, x="varA", y="varB", hue=y
)
plt.title('Original dataset')
plt.show()
# plot undersampled data
sns.scatterplot(
data=X_resampled, x="varA", y="varB", hue=y_resampled
)
plt.title('Undersampled dataset')
plt.show()
# load data
# only a few observations to speed the computaton
data = pd.read_csv('../kdd2004.csv').sample(10000, random_state=10)
data.head()
# imbalanced target
data.target.value_counts() / len(data)
# separate dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
data.drop(labels=['target'], axis=1), # drop the target
data['target'], # just the target
test_size=0.3,
random_state=0)
# set up Neighbourhood cleaning rule
ncr = NeighbourhoodCleaningRule(
sampling_strategy='auto',# undersamples from all classes except minority
n_neighbors=3, # explores 3 neighbours per observation
kind_sel='mode', # all / most neighbouring need to disagree, only applies to cleaning step
n_jobs=4, # 4 processors in my laptop
threshold_cleaning=0.1, # the threshold to evaluate a class for cleaning (used only for clearning step)
)
X_ncr, y_ncr = ncr.fit_resample(X_train, y_train)
# compare shapes
X_train.shape, X_ncr.shape
# function to train random forests and evaluate the performance
def run_randomForests(X_train, X_test, y_train, y_test):
rf = RandomForestClassifier(n_estimators=200, random_state=39, max_depth=4)
rf.fit(X_train, y_train)
print('Train set')
pred = rf.predict_proba(X_train)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_train, pred[:,1])))
print('Test set')
pred = rf.predict_proba(X_test)
print('Random Forests roc-auc: {}'.format(roc_auc_score(y_test, pred[:,1])))
# evaluate performance of algorithm built
# using imbalanced dataset
run_randomForests(X_train,
X_test,
y_train,
y_test)
# evaluate performance of algorithm built
# using renn undersampled dataset
run_randomForests(X_ncr,
X_test,
y_ncr,
y_test)
| 0.770119 | 0.969237 |
## CLASIFICADOR KNN ( K-nearest neighbours )
### Julian Ferres - Nro.Padrón 101483
## Enunciado
Se tienen $n$ puntos, de los cuales la mitad son de $clase$ $0$ y la restante mitad de $clase$ $1$
Utilizando la regla de los *k primeros vecinos* $(KNN)$
Clasificar algunos puntos ( generados con probabilidad uniforme ) de la región cuadrada $\left[-4;4\right] \times \left[-4;4\right]$
## Solución
```
#Import libraries
import numpy as np
from scipy.spatial import ConvexHull
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
#Plots
import matplotlib.pyplot as plt
%matplotlib inline
n = 10000 #Tamaño de muestra
train = np.zeros((n,3))
```
#### Toma de muestra (train)
```
mean0 = [-1, 0]
mean1 = [1, 0]
cov = [[1, 0], [0, 1]] # diagonal covariance
x1, y1 = np.random.multivariate_normal(mean0, cov, int(n/2)).T
x2, y2 = np.random.multivariate_normal(mean1, cov, int(n/2)).T
clase = np.concatenate( (np.zeros(int(n/2)), np.ones(int(n/2))) )
normal_0 = np.column_stack((x1, y1))
normal_1 = np.column_stack((x2, y2))
```
### Puntos a clasificar
```
#Cantidad de puntos a clasificar
m= 1000
x , y = np.random.uniform(-4,4,m) , np.random.uniform(-4,4,m)
u_test_points = np.column_stack(( x, y )) #Uniform test points
u_test_points.shape
```
### Muestro los puntos con su envolvente convexa
```
hull = ConvexHull(u_test_points)
hull0 = ConvexHull(normal_0[:,:2])
hull1 = ConvexHull(normal_1[:,:2])
plt.figure(figsize = (5,5))
plt.scatter(u_test_points[:,0], u_test_points[:,1], color='lightgreen' , alpha = 0.5)
for simplex in hull.simplices:
plt.plot(u_test_points[simplex, 0], u_test_points[simplex, 1], 'k-' , color = 'green')
plt.figure(figsize=(8, 8))
plt.scatter( x1, y1, alpha='0.5', color='darkviolet' , label = 'Clase 0');
plt.scatter( x2, y2, alpha='0.5', color='yellow' , label = 'Clase 1');
plt.legend()
plt.show()
```
## Clasificador KNN
```
train = np.concatenate((normal_0 , normal_1))
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(train, clase)
output = np.zeros(m)
idx = 0
for i in u_test_points:
output[idx] = (neigh.predict([i]))
idx+=1
salida = pd.DataFrame({'xpoints' : u_test_points[:,0], 'ypoints' : u_test_points[:,1] , 'clase' : output})
idx0 = (salida['clase'] == 0 )
idx1 = (salida['clase'] == 1 )
clase_0 = salida.loc[idx0]
clase_1 = salida.loc[idx1]
plt.figure(figsize = (8,8))
plt.scatter( clase_0.xpoints , clase_0.ypoints , alpha='0.5', color='darkviolet' , label = 'Clase 0');
plt.scatter( clase_1.xpoints , clase_1.ypoints , alpha='0.5', color='yellow' , label = 'Clase 1');
plt.legend()
plt.show()
```
## Bonus: Test con puntos identicamente distribuidos a los de train
### Puntos a clasificar
```
#Cantidad de puntos a clasificar
m1=1000
```
#### Puntos test, tanto de clase 0 como de clase 1
```
t_x1, t_y1 = np.random.multivariate_normal(mean0, cov, m1).T
t_x2, t_y2 = np.random.multivariate_normal(mean1, cov, m1).T
n0_test = np.column_stack((t_x1, t_y1))
n1_test = np.column_stack((t_x2, t_y2))
```
#### Se toma como estimador de la función perdida del clasificador $\Phi$ a:
$$ \widehat{L}_n (\Phi)= \frac{1}{n} \sum_{i=1}^{n-test} 1\{ \Phi(X_i) \neq Y_i \}$$
Con $X_i$ el vector aleatorio de features, con $Y_i$ su label correspondiente.
```
prediction_n0 = np.zeros(m1)
idx = 0
for i in n0_test:
prediction_n0[idx] = (neigh.predict([i]))
idx+=1
error_n0 = prediction_n0.mean()
error_n0
prediction_n1 = np.zeros(m1)
idx = 0
for i in n1_test:
prediction_n1[idx] = (neigh.predict([i]))
idx+=1
error_n1 = 1 - prediction_n1.mean()
error_n1
```
Link al Repo de GitHub: https://github.com/julianferres/Aprendizaje-Estadistico.git
|
github_jupyter
|
#Import libraries
import numpy as np
from scipy.spatial import ConvexHull
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
#Plots
import matplotlib.pyplot as plt
%matplotlib inline
n = 10000 #Tamaño de muestra
train = np.zeros((n,3))
mean0 = [-1, 0]
mean1 = [1, 0]
cov = [[1, 0], [0, 1]] # diagonal covariance
x1, y1 = np.random.multivariate_normal(mean0, cov, int(n/2)).T
x2, y2 = np.random.multivariate_normal(mean1, cov, int(n/2)).T
clase = np.concatenate( (np.zeros(int(n/2)), np.ones(int(n/2))) )
normal_0 = np.column_stack((x1, y1))
normal_1 = np.column_stack((x2, y2))
#Cantidad de puntos a clasificar
m= 1000
x , y = np.random.uniform(-4,4,m) , np.random.uniform(-4,4,m)
u_test_points = np.column_stack(( x, y )) #Uniform test points
u_test_points.shape
hull = ConvexHull(u_test_points)
hull0 = ConvexHull(normal_0[:,:2])
hull1 = ConvexHull(normal_1[:,:2])
plt.figure(figsize = (5,5))
plt.scatter(u_test_points[:,0], u_test_points[:,1], color='lightgreen' , alpha = 0.5)
for simplex in hull.simplices:
plt.plot(u_test_points[simplex, 0], u_test_points[simplex, 1], 'k-' , color = 'green')
plt.figure(figsize=(8, 8))
plt.scatter( x1, y1, alpha='0.5', color='darkviolet' , label = 'Clase 0');
plt.scatter( x2, y2, alpha='0.5', color='yellow' , label = 'Clase 1');
plt.legend()
plt.show()
train = np.concatenate((normal_0 , normal_1))
neigh = KNeighborsClassifier(n_neighbors=1)
neigh.fit(train, clase)
output = np.zeros(m)
idx = 0
for i in u_test_points:
output[idx] = (neigh.predict([i]))
idx+=1
salida = pd.DataFrame({'xpoints' : u_test_points[:,0], 'ypoints' : u_test_points[:,1] , 'clase' : output})
idx0 = (salida['clase'] == 0 )
idx1 = (salida['clase'] == 1 )
clase_0 = salida.loc[idx0]
clase_1 = salida.loc[idx1]
plt.figure(figsize = (8,8))
plt.scatter( clase_0.xpoints , clase_0.ypoints , alpha='0.5', color='darkviolet' , label = 'Clase 0');
plt.scatter( clase_1.xpoints , clase_1.ypoints , alpha='0.5', color='yellow' , label = 'Clase 1');
plt.legend()
plt.show()
#Cantidad de puntos a clasificar
m1=1000
t_x1, t_y1 = np.random.multivariate_normal(mean0, cov, m1).T
t_x2, t_y2 = np.random.multivariate_normal(mean1, cov, m1).T
n0_test = np.column_stack((t_x1, t_y1))
n1_test = np.column_stack((t_x2, t_y2))
prediction_n0 = np.zeros(m1)
idx = 0
for i in n0_test:
prediction_n0[idx] = (neigh.predict([i]))
idx+=1
error_n0 = prediction_n0.mean()
error_n0
prediction_n1 = np.zeros(m1)
idx = 0
for i in n1_test:
prediction_n1[idx] = (neigh.predict([i]))
idx+=1
error_n1 = 1 - prediction_n1.mean()
error_n1
| 0.317955 | 0.848471 |
```
import pandas as pd
import sonify
import numpy as np
#calculates average for each day in a span of 7 days
#96 represents the amount of updates each day(every 15 minutes
#means 4 per hour -> 4*24 = 96 per day)
def average_data_week(data):
day_averages = np.array([])
for a in range(0,96*7,96):
week = data[a:a+96]
avg = week.mean()
day_averages = np.append(day_averages, avg)
return day_averages
#calaculates average for each week in last year (52 weeks)
#returns 52 data points, each for one week (turns to be around 25 seconds of sound)
#OUR METHOD FOR YEAR DATA
def data_year(data):
week_avgs = np.array([])
for a in range(0, 96*7*52, 96*7):
week = data[a:a+96*7]
avg = week.mean()
week_avgs = np.append(week_avgs, avg)
return week_avgs
#splits the day in 6 parts, 96 data points per day, day split by 16 for weekly data
#returns 42 data points
#OUR METHOD FOR WEEK DATA
def data_week(data):
week_avg = np.array([])
for a in range(0, 16*6*7, 16):
week = data[a:a+16]
avg = week.mean()
week_avg = np.append(week_avg, avg)
return week_avg
#splits the day in 2 parts, 96 data points per day, split by 42 for monthly data
#returns 60 data points
#OUR METHOD FOR MONTH DATA
def data_month(data):
month_avg = np.array([])
for a in range(0, 42*2*30, 42):
month = data[a:a+42]
avg = month.mean()
month_avg = np.append(month_avg, avg)
return month_avg
#reading the data
water_data = pd.read_csv('adaSensorData.csv')
#initializing the parameters
temp = water_data['Temp']
ph = water_data['pH']
cond = water_data['Cond']
dopct = water_data['DOpct']
turb = water_data['Turb']
sal = water_data['Sal']
#initializing the weekly averages for each parameter - short version
temp_weekly_avg = average_data_week(temp)
ph_weekly_avg = average_data_week(ph)
cond_weekly_avg = average_data_week(cond)
#divide conductivity values by 10 to get them in a readable form
cond_weekly_avg = [cond / 10 for cond in cond_weekly_avg]
temp_weekly_avg = [temp * 10 for temp in temp_weekly_avg]
dopct_weekly_avg = average_data_week(dopct)
turb_weekly_avg = average_data_week(turb)
turb_weekly_avg = [turb / 25 for turb in turb_weekly_avg]
sal_weekly_avg = average_data_week(sal)
#getting the weekly averages into a playable form - short version
index = [0,1,2,3,4,5,6] #7 indexes = 7 notes played for each
temp_data = list(zip(index, temp_weekly_avg))
ph_data = list(zip(index, ph_weekly_avg))
cond_data = list(zip(index, cond_weekly_avg))
dopct_data = list(zip(index, dopct_weekly_avg))
turb_data = list(zip(index, turb_weekly_avg))
sal_data = list(zip(index, sal_weekly_avg))
#initializing the year average
temp_year = data_year(temp)
ph_year = data_year(ph)
cond_year = data_year(cond)
dopct_year = data_year(dopct)
turb_year = data_year(turb)
sal_year = data_year(sal)
#initializing the extended weekly averages
temp_week = data_week(temp)
ph_week = data_week(ph)
cond_week = data_week(cond)
dopct_week = data_week(dopct)
turb_week = data_week(turb)
sal_week = data_week(sal)
#initializing the monthly averages
temp_month = data_month(temp)
ph_month = data_month(ph)
cond_month = data_month(cond)
dopct_month = data_month(dopct)
turb_month = data_month(turb)
sal_month = data_month(sal)
#saving the data into the right format for sonification
#index values
week_index = []
for i in range(0, 42):
week_index.append(i)
month_index = []
for i in range(0, 60):
month_index.append(i)
year_index = []
for i in range(0, 52):
year_index.append(i)
#temperature data
temp_week_data = list(zip(week_index, temp_week))
temp_month_data =list(zip(month_index, temp_month))
temp_year_data = list(zip(year_index, temp_year))
#ph data
ph_week_data = list(zip(week_index, ph_week))
ph_month_data =list(zip(month_index, ph_month))
ph_year_data = list(zip(year_index, ph_year))
#conductivity data
cond_week_data = list(zip(week_index, cond_week))
cond_month_data =list(zip(month_index, cond_month))
cond_year_data = list(zip(year_index, cond_year))
#dissolved oxygen percentage data
dopct_week_data = list(zip(week_index, dopct_week))
dopct_month_data =list(zip(month_index, dopct_month))
dopct_year_data = list(zip(year_index, dopct_year))
#turbidity data
turb_week_data = list(zip(week_index, turb_week))
turb_month_data =list(zip(month_index, turb_month))
turb_year_data = list(zip(year_index, turb_year))
#salinity data
sal_week_data = list(zip(week_index, sal_week))
sal_month_data =list(zip(month_index, sal_month))
sal_year_data = list(zip(year_index, sal_year))
#playing temperature data - short week
sonify.play_midi_from_data(['voice oohs'] + temp_data, key = 'c_major')
#playing pH data - short week
sonify.play_midi_from_data(['blown bottle'] + ph_data, key = 'c_major')
#playing conductivity data - short week
sonify.play_midi_from_data(['tremolo strings'] + cond_data, key = 'c_major')
#playing dissolved oxygen percentage data - short week
sonify.play_midi_from_data(['flute'] + dopct_data, key = 'c_major')
#playing turbidity data - short week
sonify.play_midi_from_data(['electric guitar (jazz)'] + turb_data, key = 'c_major')
#playing salinity data - short week
sonify.play_midi_from_data(['bassoon'] + sal_data, key = 'c_major')
#playing all parameters (7 days in a week) - short version
temp_sound = ['voice oohs'] + temp_data
ph_sound = ['blown bottle'] + ph_data
cond_sound = ['tremolo strings'] + cond_data
dopct_sound = ['flute'] + dopct_data
turb_sound = ['electric guitar (jazz)'] + turb_data
sal_sound = ['bassoon'] + sal_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing temperature data - extended week
sonify.play_midi_from_data(['choir aahs'] + temp_week_data, 'g_major')
#playing ph data - extended week
sonify.play_midi_from_data(['acoustic grand piano'] + ph_week_data, 'g_major')
#playing conductivity data - extended week
sonify.play_midi_from_data(['harpsichord'] + cond_week_data, 'g_major')
#playing dissolved oxygen percentage data - extended week
sonify.play_midi_from_data(['fiddle'] + dopct_week_data, 'g_major')
#playing turbidity data - extended week
sonify.play_midi_from_data(['cello'] + turb_week_data, 'g_major')
#playing salinity data - extended week
sonify.play_midi_from_data(['viola'] + sal_week_data, 'g_major')
#playing all parameters (extended week)
temp_sound = ['choir aahs'] + temp_week_data
ph_sound = ['acoustic grand piano'] + ph_week_data
cond_sound = ['harpsichord'] + cond_week_data
dopct_sound = ['fiddle'] + dopct_week_data
turb_sound = ['cello'] + turb_week_data
sal_sound = ['viola'] + sal_week_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'g_major')
#playing temperature month data
sonify.play_midi_from_data(['voice oohs'] + temp_month_data, 'c_major')
#playing ph month data
sonify.play_midi_from_data(['synth voice'] + ph_month_data, 'c_major')
#playing conductivity month data
sonify.play_midi_from_data(['choir aahs'] + cond_month_data, 'c_major')
#playing dissolved oxygen percentage month data
sonify.play_midi_from_data(['lead 6 (voice)'] + dopct_month_data, 'c_major')
#playing turbidity month data
sonify.play_midi_from_data(['synth voice'] + turb_month_data, 'c_major')
#playing salinity month data
sonify.play_midi_from_data(['choir aahs'] + sal_month_data, 'c_major')
#playing all parameters (month) *
temp_sound = ['voice oohs'] + temp_month_data
ph_sound = ['synth voice'] + ph_month_data
cond_sound = ['choir aahs'] + cond_month_data
dopct_sound = ['lead 6 (voice)'] + dopct_month_data
turb_sound = ['synth voice'] + turb_month_data
sal_sound = ['choir aahs'] + sal_month_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing all parameters (month)
temp_sound = ['voice oohs'] + temp_month_data
ph_sound = ['synth voice'] + ph_month_data
cond_sound = ['choir aahs'] + cond_month_data
dopct_sound = ['whistle'] + dopct_month_data
turb_sound = ['synth voice'] + turb_month_data
sal_sound = ['choir aahs'] + sal_month_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing temperature year data
sonify.play_midi_from_data(['voice oohs'] + temp_year_data, 'c_major')
#playing ph year data
sonify.play_midi_from_data(['flute'] + ph_year_data, 'c_major')
#playing conductivity year data
sonify.play_midi_from_data(['electric guitar (jazz)'] + cond_year_data, 'c_major')
#playing dissolved oxygen year data
sonify.play_midi_from_data(['trombone'] + dopct_year_data, 'c_major')
#playing turbidity year data
sonify.play_midi_from_data(['synth voice'] + turb_year_data, 'c_major')
#playing salinity year data
sonify.play_midi_from_data(['electric piano 1'] + sal_year_data, 'c_major')
#playing all parameters (52 weeks in a year) ***
temp_sound = ['voice oohs'] + temp_year_data
ph_sound = ['flute'] + ph_year_data
cond_sound = ['electric guitar (jazz)'] + cond_year_data
dopct_sound = ['trombone'] + dopct_year_data
turb_sound = ['synth voice'] + turb_year_data
sal_sound = ['electric piano 1'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing all parameters (52 weeks in a year)
temp_sound = ['voice oohs'] + temp_year_data
ph_sound = ['flute'] + ph_year_data
cond_sound = ['electric guitar (jazz)'] + cond_year_data
dopct_sound = ['trombone'] + dopct_year_data
turb_sound = ['synth voice'] + turb_year_data
sal_sound = ['electric piano 1'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'g_major')
#playing all parameters (52 weeks in a year) - semi-joke
temp_sound = ['bagpipe'] + temp_year_data
ph_sound = ['banjo'] + ph_year_data
cond_sound = ['kalimba'] + cond_year_data
dopct_sound = ['shakuhachi'] + dopct_year_data
turb_sound = ['cabasa'] + turb_year_data
sal_sound = ['whistle'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
```
|
github_jupyter
|
import pandas as pd
import sonify
import numpy as np
#calculates average for each day in a span of 7 days
#96 represents the amount of updates each day(every 15 minutes
#means 4 per hour -> 4*24 = 96 per day)
def average_data_week(data):
day_averages = np.array([])
for a in range(0,96*7,96):
week = data[a:a+96]
avg = week.mean()
day_averages = np.append(day_averages, avg)
return day_averages
#calaculates average for each week in last year (52 weeks)
#returns 52 data points, each for one week (turns to be around 25 seconds of sound)
#OUR METHOD FOR YEAR DATA
def data_year(data):
week_avgs = np.array([])
for a in range(0, 96*7*52, 96*7):
week = data[a:a+96*7]
avg = week.mean()
week_avgs = np.append(week_avgs, avg)
return week_avgs
#splits the day in 6 parts, 96 data points per day, day split by 16 for weekly data
#returns 42 data points
#OUR METHOD FOR WEEK DATA
def data_week(data):
week_avg = np.array([])
for a in range(0, 16*6*7, 16):
week = data[a:a+16]
avg = week.mean()
week_avg = np.append(week_avg, avg)
return week_avg
#splits the day in 2 parts, 96 data points per day, split by 42 for monthly data
#returns 60 data points
#OUR METHOD FOR MONTH DATA
def data_month(data):
month_avg = np.array([])
for a in range(0, 42*2*30, 42):
month = data[a:a+42]
avg = month.mean()
month_avg = np.append(month_avg, avg)
return month_avg
#reading the data
water_data = pd.read_csv('adaSensorData.csv')
#initializing the parameters
temp = water_data['Temp']
ph = water_data['pH']
cond = water_data['Cond']
dopct = water_data['DOpct']
turb = water_data['Turb']
sal = water_data['Sal']
#initializing the weekly averages for each parameter - short version
temp_weekly_avg = average_data_week(temp)
ph_weekly_avg = average_data_week(ph)
cond_weekly_avg = average_data_week(cond)
#divide conductivity values by 10 to get them in a readable form
cond_weekly_avg = [cond / 10 for cond in cond_weekly_avg]
temp_weekly_avg = [temp * 10 for temp in temp_weekly_avg]
dopct_weekly_avg = average_data_week(dopct)
turb_weekly_avg = average_data_week(turb)
turb_weekly_avg = [turb / 25 for turb in turb_weekly_avg]
sal_weekly_avg = average_data_week(sal)
#getting the weekly averages into a playable form - short version
index = [0,1,2,3,4,5,6] #7 indexes = 7 notes played for each
temp_data = list(zip(index, temp_weekly_avg))
ph_data = list(zip(index, ph_weekly_avg))
cond_data = list(zip(index, cond_weekly_avg))
dopct_data = list(zip(index, dopct_weekly_avg))
turb_data = list(zip(index, turb_weekly_avg))
sal_data = list(zip(index, sal_weekly_avg))
#initializing the year average
temp_year = data_year(temp)
ph_year = data_year(ph)
cond_year = data_year(cond)
dopct_year = data_year(dopct)
turb_year = data_year(turb)
sal_year = data_year(sal)
#initializing the extended weekly averages
temp_week = data_week(temp)
ph_week = data_week(ph)
cond_week = data_week(cond)
dopct_week = data_week(dopct)
turb_week = data_week(turb)
sal_week = data_week(sal)
#initializing the monthly averages
temp_month = data_month(temp)
ph_month = data_month(ph)
cond_month = data_month(cond)
dopct_month = data_month(dopct)
turb_month = data_month(turb)
sal_month = data_month(sal)
#saving the data into the right format for sonification
#index values
week_index = []
for i in range(0, 42):
week_index.append(i)
month_index = []
for i in range(0, 60):
month_index.append(i)
year_index = []
for i in range(0, 52):
year_index.append(i)
#temperature data
temp_week_data = list(zip(week_index, temp_week))
temp_month_data =list(zip(month_index, temp_month))
temp_year_data = list(zip(year_index, temp_year))
#ph data
ph_week_data = list(zip(week_index, ph_week))
ph_month_data =list(zip(month_index, ph_month))
ph_year_data = list(zip(year_index, ph_year))
#conductivity data
cond_week_data = list(zip(week_index, cond_week))
cond_month_data =list(zip(month_index, cond_month))
cond_year_data = list(zip(year_index, cond_year))
#dissolved oxygen percentage data
dopct_week_data = list(zip(week_index, dopct_week))
dopct_month_data =list(zip(month_index, dopct_month))
dopct_year_data = list(zip(year_index, dopct_year))
#turbidity data
turb_week_data = list(zip(week_index, turb_week))
turb_month_data =list(zip(month_index, turb_month))
turb_year_data = list(zip(year_index, turb_year))
#salinity data
sal_week_data = list(zip(week_index, sal_week))
sal_month_data =list(zip(month_index, sal_month))
sal_year_data = list(zip(year_index, sal_year))
#playing temperature data - short week
sonify.play_midi_from_data(['voice oohs'] + temp_data, key = 'c_major')
#playing pH data - short week
sonify.play_midi_from_data(['blown bottle'] + ph_data, key = 'c_major')
#playing conductivity data - short week
sonify.play_midi_from_data(['tremolo strings'] + cond_data, key = 'c_major')
#playing dissolved oxygen percentage data - short week
sonify.play_midi_from_data(['flute'] + dopct_data, key = 'c_major')
#playing turbidity data - short week
sonify.play_midi_from_data(['electric guitar (jazz)'] + turb_data, key = 'c_major')
#playing salinity data - short week
sonify.play_midi_from_data(['bassoon'] + sal_data, key = 'c_major')
#playing all parameters (7 days in a week) - short version
temp_sound = ['voice oohs'] + temp_data
ph_sound = ['blown bottle'] + ph_data
cond_sound = ['tremolo strings'] + cond_data
dopct_sound = ['flute'] + dopct_data
turb_sound = ['electric guitar (jazz)'] + turb_data
sal_sound = ['bassoon'] + sal_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing temperature data - extended week
sonify.play_midi_from_data(['choir aahs'] + temp_week_data, 'g_major')
#playing ph data - extended week
sonify.play_midi_from_data(['acoustic grand piano'] + ph_week_data, 'g_major')
#playing conductivity data - extended week
sonify.play_midi_from_data(['harpsichord'] + cond_week_data, 'g_major')
#playing dissolved oxygen percentage data - extended week
sonify.play_midi_from_data(['fiddle'] + dopct_week_data, 'g_major')
#playing turbidity data - extended week
sonify.play_midi_from_data(['cello'] + turb_week_data, 'g_major')
#playing salinity data - extended week
sonify.play_midi_from_data(['viola'] + sal_week_data, 'g_major')
#playing all parameters (extended week)
temp_sound = ['choir aahs'] + temp_week_data
ph_sound = ['acoustic grand piano'] + ph_week_data
cond_sound = ['harpsichord'] + cond_week_data
dopct_sound = ['fiddle'] + dopct_week_data
turb_sound = ['cello'] + turb_week_data
sal_sound = ['viola'] + sal_week_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'g_major')
#playing temperature month data
sonify.play_midi_from_data(['voice oohs'] + temp_month_data, 'c_major')
#playing ph month data
sonify.play_midi_from_data(['synth voice'] + ph_month_data, 'c_major')
#playing conductivity month data
sonify.play_midi_from_data(['choir aahs'] + cond_month_data, 'c_major')
#playing dissolved oxygen percentage month data
sonify.play_midi_from_data(['lead 6 (voice)'] + dopct_month_data, 'c_major')
#playing turbidity month data
sonify.play_midi_from_data(['synth voice'] + turb_month_data, 'c_major')
#playing salinity month data
sonify.play_midi_from_data(['choir aahs'] + sal_month_data, 'c_major')
#playing all parameters (month) *
temp_sound = ['voice oohs'] + temp_month_data
ph_sound = ['synth voice'] + ph_month_data
cond_sound = ['choir aahs'] + cond_month_data
dopct_sound = ['lead 6 (voice)'] + dopct_month_data
turb_sound = ['synth voice'] + turb_month_data
sal_sound = ['choir aahs'] + sal_month_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing all parameters (month)
temp_sound = ['voice oohs'] + temp_month_data
ph_sound = ['synth voice'] + ph_month_data
cond_sound = ['choir aahs'] + cond_month_data
dopct_sound = ['whistle'] + dopct_month_data
turb_sound = ['synth voice'] + turb_month_data
sal_sound = ['choir aahs'] + sal_month_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing temperature year data
sonify.play_midi_from_data(['voice oohs'] + temp_year_data, 'c_major')
#playing ph year data
sonify.play_midi_from_data(['flute'] + ph_year_data, 'c_major')
#playing conductivity year data
sonify.play_midi_from_data(['electric guitar (jazz)'] + cond_year_data, 'c_major')
#playing dissolved oxygen year data
sonify.play_midi_from_data(['trombone'] + dopct_year_data, 'c_major')
#playing turbidity year data
sonify.play_midi_from_data(['synth voice'] + turb_year_data, 'c_major')
#playing salinity year data
sonify.play_midi_from_data(['electric piano 1'] + sal_year_data, 'c_major')
#playing all parameters (52 weeks in a year) ***
temp_sound = ['voice oohs'] + temp_year_data
ph_sound = ['flute'] + ph_year_data
cond_sound = ['electric guitar (jazz)'] + cond_year_data
dopct_sound = ['trombone'] + dopct_year_data
turb_sound = ['synth voice'] + turb_year_data
sal_sound = ['electric piano 1'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
#playing all parameters (52 weeks in a year)
temp_sound = ['voice oohs'] + temp_year_data
ph_sound = ['flute'] + ph_year_data
cond_sound = ['electric guitar (jazz)'] + cond_year_data
dopct_sound = ['trombone'] + dopct_year_data
turb_sound = ['synth voice'] + turb_year_data
sal_sound = ['electric piano 1'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'g_major')
#playing all parameters (52 weeks in a year) - semi-joke
temp_sound = ['bagpipe'] + temp_year_data
ph_sound = ['banjo'] + ph_year_data
cond_sound = ['kalimba'] + cond_year_data
dopct_sound = ['shakuhachi'] + dopct_year_data
turb_sound = ['cabasa'] + turb_year_data
sal_sound = ['whistle'] + sal_year_data
input_data = [
temp_sound, ph_sound, cond_sound, dopct_sound, turb_sound, sal_sound
]
sonify.play_midi_from_data(input_data, track_type='multiple', key = 'c_major')
| 0.476823 | 0.739011 |
# Aujourd'hui on se fait rouler par les mecs de l'ENS
https://challengedata.ens.fr/en/challenge/39/prediction_of_transaction_claims_status.html
Ici, c'est le notebook dédié à la gestion de l'imbalanced des classes. On ne touche pas au `test set`.
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
#load packages
import sys #access to system parameters https://docs.python.org/3/library/sys.html
print("Python version: {}". format(sys.version))
import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features
print("pandas version: {}". format(pd.__version__))
import matplotlib #collection of functions for scientific and publication-ready visualization
print("matplotlib version: {}". format(matplotlib.__version__))
import numpy as np #foundational package for scientific computing
print("NumPy version: {}". format(np.__version__))
import scipy as sp #collection of functions for scientific computing and advance mathematics
print("SciPy version: {}". format(sp.__version__))
import IPython
from IPython import display #pretty printing of dataframes in Jupyter notebook
print("IPython version: {}". format(IPython.__version__))
import sklearn #collection of machine learning algorithms
print("scikit-learn version: {}". format(sklearn.__version__))
import seaborn as sns
print("seaborn version: {}". format(sns.__version__))
#misc libraries
import time
import random as rnd
import os, gc
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
print('-'*25)
```
Moi j'ai ça:
Python version: 3.6.3 |Anaconda custom (64-bit)| (default, Oct 15 2017, 03:27:45) [MSC v.1900 64 bit (AMD64)]
pandas version: 0.22.0
matplotlib version: 2.1.2
NumPy version: 1.12.1
SciPy version: 1.0.0
IPython version: 6.2.1
scikit-learn version: 0.19.1
seaborn version: 0.8.1
# Définition de la seed pour le random
Très important pour qu'on voit les mêmes choses entre nos deux ordis
```
RANDOM_SEED = 42;
np.random.seed(RANDOM_SEED)
# Run "! pip install jyquickhelper" dans une cellule si ca ne marche pas la commande suivante
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
```
## Chargement des données
```
# Where to save the figures
PROJECT_ROOT_DIR = "."
DATA_PROCESSED = os.path.join(PROJECT_ROOT_DIR, "data_processed")
def load_data(file,data_path=DATA_PROCESSED, sep=','):
csv_path = os.path.join(data_path, file)
return pd.read_csv(csv_path, sep)
train_df = load_data(file = "train_notebook_3.csv", sep=';');
test_df = load_data(file = "test_notebook_3.csv", sep=';');
```
## Analyse des types de données
```
train_df.info()
```
## Plan d'action
Comme je suis newbie dans l'imbalanced, on va procéder par étape:
1. Test de l'oversampling avec SMOTE puis ADASYN
2. Combinaison de l'over et under sampling
3. Ensemble of samplers
### Test de l'oversampling avec SMOTE puis ADASYN
While the `RandomOverSampler` is over-sampling by duplicating some of the original samples of the minority class, `SMOTE` and `ADASYN` generate new samples in by interpolation.
However, the samples used to interpolate/generate new synthetic samples differ. In fact, `ADASYN` focuses on generating samples next to the original samples which are wrongly classified using a `k-Nearest Neighbors` classifier while the basic implementation of `SMOTE` will not make any distinction between easy and hard samples to be classified using the nearest neighbors rule.
Therefore, the decision function found during training will be different among the algorithms.
```
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.preprocessing import LabelEncoder
y=train_df["CLAIM_TYPE"]
X=train_df.drop("CLAIM_TYPE", axis=1, inplace=False)
le_SHIPPING_MODE = LabelEncoder()
X.SHIPPING_MODE=le_SHIPPING_MODE.fit_transform(X.SHIPPING_MODE)
le_SELLER_COUNTRY = LabelEncoder()
X.SELLER_COUNTRY=le_SELLER_COUNTRY.fit_transform(X.SELLER_COUNTRY)
le_PRODUCT_TYPE = LabelEncoder()
X.PRODUCT_TYPE=le_PRODUCT_TYPE.fit_transform(X.PRODUCT_TYPE)
le_PRODUCT_FAMILY = LabelEncoder()
X.PRODUCT_FAMILY=le_PRODUCT_FAMILY.fit_transform(X.PRODUCT_FAMILY)
X.shape
y.value_counts()
X_resampled, y_resampled = SMOTE().fit_sample(X, y)
X_resampled = pd.DataFrame(X_resampled, columns=X.columns)
y_resampled = pd.Series(y_resampled, name="CLAIM_TYPE")
X_resampled.shape
y_resampled.value_counts()
X_resampled.info()
```
Magnifique, on a des classes biens equilibrées, mais je n'ai aucune idée si c'est un bon sampling ou pas.
On a qu'a dire que oui, de toute façon, ça fait la baseline de l'oversampling. Maintenant on remet les classes comme il faut.
```
X_resampled['SHIPPING_MODE']=le_SHIPPING_MODE.inverse_transform(X_resampled['SHIPPING_MODE'].round().astype(int))
X_resampled['SELLER_COUNTRY']=le_SELLER_COUNTRY.inverse_transform(X_resampled.SELLER_COUNTRY.round().astype(int))
X_resampled['PRODUCT_TYPE']=le_PRODUCT_TYPE.inverse_transform(X_resampled.PRODUCT_TYPE.round().astype(int))
X_resampled['PRODUCT_FAMILY']=le_PRODUCT_FAMILY.inverse_transform(X_resampled.PRODUCT_FAMILY.round().astype(int))
X_resampled["CLAIM_TYPE"]=y_resampled
X_resampled.info()
```
On inscrit nos résultats
```
filename_train_balanced = DATA_PROCESSED+"/train_balanced.csv"
filename_train = DATA_PROCESSED+"/train_notebook_4.csv"
filename_test = DATA_PROCESSED+"/test_notebook_4.csv"
try:
os.remove(filename_train)
os.remove(filename_test)
os.remove(filename_train_balanced)
except:
pass;
X_resampled.to_csv(filename_train_balanced, index=False, sep=";")
train_df.to_csv(filename_train, index=False, sep=";")
test_df.to_csv(filename_test, index=False, sep=";")
```
|
github_jupyter
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
#load packages
import sys #access to system parameters https://docs.python.org/3/library/sys.html
print("Python version: {}". format(sys.version))
import pandas as pd #collection of functions for data processing and analysis modeled after R dataframes with SQL like features
print("pandas version: {}". format(pd.__version__))
import matplotlib #collection of functions for scientific and publication-ready visualization
print("matplotlib version: {}". format(matplotlib.__version__))
import numpy as np #foundational package for scientific computing
print("NumPy version: {}". format(np.__version__))
import scipy as sp #collection of functions for scientific computing and advance mathematics
print("SciPy version: {}". format(sp.__version__))
import IPython
from IPython import display #pretty printing of dataframes in Jupyter notebook
print("IPython version: {}". format(IPython.__version__))
import sklearn #collection of machine learning algorithms
print("scikit-learn version: {}". format(sklearn.__version__))
import seaborn as sns
print("seaborn version: {}". format(sns.__version__))
#misc libraries
import time
import random as rnd
import os, gc
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
print('-'*25)
RANDOM_SEED = 42;
np.random.seed(RANDOM_SEED)
# Run "! pip install jyquickhelper" dans une cellule si ca ne marche pas la commande suivante
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
DATA_PROCESSED = os.path.join(PROJECT_ROOT_DIR, "data_processed")
def load_data(file,data_path=DATA_PROCESSED, sep=','):
csv_path = os.path.join(data_path, file)
return pd.read_csv(csv_path, sep)
train_df = load_data(file = "train_notebook_3.csv", sep=';');
test_df = load_data(file = "test_notebook_3.csv", sep=';');
train_df.info()
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.preprocessing import LabelEncoder
y=train_df["CLAIM_TYPE"]
X=train_df.drop("CLAIM_TYPE", axis=1, inplace=False)
le_SHIPPING_MODE = LabelEncoder()
X.SHIPPING_MODE=le_SHIPPING_MODE.fit_transform(X.SHIPPING_MODE)
le_SELLER_COUNTRY = LabelEncoder()
X.SELLER_COUNTRY=le_SELLER_COUNTRY.fit_transform(X.SELLER_COUNTRY)
le_PRODUCT_TYPE = LabelEncoder()
X.PRODUCT_TYPE=le_PRODUCT_TYPE.fit_transform(X.PRODUCT_TYPE)
le_PRODUCT_FAMILY = LabelEncoder()
X.PRODUCT_FAMILY=le_PRODUCT_FAMILY.fit_transform(X.PRODUCT_FAMILY)
X.shape
y.value_counts()
X_resampled, y_resampled = SMOTE().fit_sample(X, y)
X_resampled = pd.DataFrame(X_resampled, columns=X.columns)
y_resampled = pd.Series(y_resampled, name="CLAIM_TYPE")
X_resampled.shape
y_resampled.value_counts()
X_resampled.info()
X_resampled['SHIPPING_MODE']=le_SHIPPING_MODE.inverse_transform(X_resampled['SHIPPING_MODE'].round().astype(int))
X_resampled['SELLER_COUNTRY']=le_SELLER_COUNTRY.inverse_transform(X_resampled.SELLER_COUNTRY.round().astype(int))
X_resampled['PRODUCT_TYPE']=le_PRODUCT_TYPE.inverse_transform(X_resampled.PRODUCT_TYPE.round().astype(int))
X_resampled['PRODUCT_FAMILY']=le_PRODUCT_FAMILY.inverse_transform(X_resampled.PRODUCT_FAMILY.round().astype(int))
X_resampled["CLAIM_TYPE"]=y_resampled
X_resampled.info()
filename_train_balanced = DATA_PROCESSED+"/train_balanced.csv"
filename_train = DATA_PROCESSED+"/train_notebook_4.csv"
filename_test = DATA_PROCESSED+"/test_notebook_4.csv"
try:
os.remove(filename_train)
os.remove(filename_test)
os.remove(filename_train_balanced)
except:
pass;
X_resampled.to_csv(filename_train_balanced, index=False, sep=";")
train_df.to_csv(filename_train, index=False, sep=";")
test_df.to_csv(filename_test, index=False, sep=";")
| 0.446012 | 0.824285 |
# Reading and Writing Parquet Files
- [Overview](#overview)
- [Setup](#setup)
- [Write Data to a Parquet Table in the Platform](#write-data-to-a-parquet-platform-table)
- [Advanced Usage](#advanced-usage)
- [Cleanup](#cleanup)
<a id="overview"></a>
## Overview
[Parquet](https://parquet.apache.org/) is a columnar storage format that provides high-density high-performance file organization.
The following section demonstrates how to create and write data to a Parquet table the Iguazio Data Science Platform ("the platform") and read its contents.
For information about reading an writing Parquet files from Python applications, see https://arrow.apache.org/docs/python/parquet.html.
<a id="setup"></a>
## Setup
Run the following code to import required libraries and ingest CSV data into a pandas DataFrame, which will be converted to a Parquet table in the next step.
```
import pyarrow.parquet as pq
import pandas as pd
import pyarrow as pa
# Read a CSV file into a pandas DataFrame and display the data and metadata that was read
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header = None)
# Specify columns extracted from wbdc.names
data.columns = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
data.head()
```
<a id="write-data-to-a-parquet-platform-table"></a>
## Write Data to a Parquet Table in the Platform
Write the CSV data that was read into the pandas DataFrame to a Parquet table in a platform data container (i.e., in the distributed file system of the Iguazio Data Science Platform).
> **Note:** For information about using the `v3io` or `User` data mounts to reference data in the platform's data containers, see [Platform Data Containers](collect-n-explore.ipynb/#platform-data-containers) in the **getting-started/collect-n-explore.ipynb** notebook.
```
# Write the pandas DataFrame to a new Parquet table in the platform's file system
import os
filepath = os.path.join('/User/examples/parquet_example')
table = pa.Table.from_pandas(data)
pq.write_table(table, filepath)
# Read the Parquet table to local memory and print the head
table2 = pq.read_table(filepath)
table2.to_pandas().head()
```
<a id="advanced-usage"></a>
## Advanced Usage
```
# Read specific columns (attributes) from the Parquet table to save bandwidth/memory and accelerate load
pq.read_table(filepath, columns=['id', 'area_mean']).to_pandas().head()
# For comparison, read the same columns from the pandas DataFrame that was used to create the Parequet table
pq.read_pandas(filepath, columns=['id', 'area_mean']).to_pandas().head()
# Read the table as a Parquet file and display its schema
parquet_file = pq.ParquetFile(filepath)
parquet_file.schema
# Display the table metadata stored in the Parquet file
parquet_file.metadata
# Write data to multiple table partitions
pq.write_to_dataset(table, root_path='my_prqt', partition_cols=['radius_mean'])
# Read the partitioned table
table3 = pq.read_table('my_prqt')
table3.to_pandas().head()
# Read data from the table using multiple threads to accelerate performance
pq.read_table('my_prqt', use_threads=4).to_pandas().head()
```
<a id="cleanup"></a>
## Cleanup
```
# Uncommemt the following line and run the code to delete the example directory
#!rm -rf $HOME/examples/parquet_example/
```
|
github_jupyter
|
import pyarrow.parquet as pq
import pandas as pd
import pyarrow as pa
# Read a CSV file into a pandas DataFrame and display the data and metadata that was read
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data', header = None)
# Specify columns extracted from wbdc.names
data.columns = ["id","diagnosis","radius_mean","texture_mean","perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave points_mean","symmetry_mean","fractal_dimension_mean",
"radius_se","texture_se","perimeter_se","area_se","smoothness_se","compactness_se","concavity_se",
"concave points_se","symmetry_se","fractal_dimension_se","radius_worst","texture_worst",
"perimeter_worst","area_worst","smoothness_worst","compactness_worst","concavity_worst",
"concave points_worst","symmetry_worst","fractal_dimension_worst"]
data.head()
# Write the pandas DataFrame to a new Parquet table in the platform's file system
import os
filepath = os.path.join('/User/examples/parquet_example')
table = pa.Table.from_pandas(data)
pq.write_table(table, filepath)
# Read the Parquet table to local memory and print the head
table2 = pq.read_table(filepath)
table2.to_pandas().head()
# Read specific columns (attributes) from the Parquet table to save bandwidth/memory and accelerate load
pq.read_table(filepath, columns=['id', 'area_mean']).to_pandas().head()
# For comparison, read the same columns from the pandas DataFrame that was used to create the Parequet table
pq.read_pandas(filepath, columns=['id', 'area_mean']).to_pandas().head()
# Read the table as a Parquet file and display its schema
parquet_file = pq.ParquetFile(filepath)
parquet_file.schema
# Display the table metadata stored in the Parquet file
parquet_file.metadata
# Write data to multiple table partitions
pq.write_to_dataset(table, root_path='my_prqt', partition_cols=['radius_mean'])
# Read the partitioned table
table3 = pq.read_table('my_prqt')
table3.to_pandas().head()
# Read data from the table using multiple threads to accelerate performance
pq.read_table('my_prqt', use_threads=4).to_pandas().head()
# Uncommemt the following line and run the code to delete the example directory
#!rm -rf $HOME/examples/parquet_example/
| 0.429908 | 0.984155 |
# Introduction
This notebook illustrates how to access a DB2 database on Cloud using Python by following the steps below:
1. Import the `ibm_db` Python library
1. Enter the database connection credentials
1. Create the database connection
1. Close the database connection
__Note:__ Please follow the instructions given in the first Lab of this course to Create a database service instance of Db2 on Cloud and retrieve your database Service Credentials.
## Import the `ibm_db` Python library
The `ibm_db` [API ](https://pypi.python.org/pypi/ibm_db/) provides a variety of useful Python functions for accessing and manipulating data in an IBM® data server database, including functions for connecting to a database, preparing and issuing SQL statements, fetching rows from result sets, calling stored procedures, committing and rolling back transactions, handling errors, and retrieving metadata.
We first import the ibm_db library into our Python Application
Execute the following cell by clicking within it and then
press `Shift` and `Enter` keys simultaneously
```
import ibm_db
```
When the command above completes, the `ibm_db` library is loaded in your notebook.
## Identify the database connection credentials
Connecting to dashDB or DB2 database requires the following information:
* Driver Name
* Database name
* Host DNS name or IP address
* Host port
* Connection protocol
* User ID (or username)
* User Password
__Notice:__ To obtain credentials please refer to the instructions given in the first Lab of this course
Now enter your database credentials below and execute the cell with `Shift` + `Enter`
```
#Replace the placeholder values with your actual Db2 hostname, username, and password:
dsn_hostname = "dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net"
dsn_uid = "cps24149" # e.g. "abc12345"
dsn_pwd = "53p6+1xfzsj8bpjk" # e.g. "7dBZ3wWt9XN6$o0J"
dsn_driver = "DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=cps24149;PWD=53p6+1xfzsj8bpjk;"
dsn_database = "BLUDB" # e.g. "BLUDB"
dsn_port = "50000" # e.g. "50000"
dsn_protocol = "TCPIP" # i.e. "TCPIP"
```
## Create the DB2 database connection
Ibm_db API uses the IBM Data Server Driver for ODBC and CLI APIs to connect to IBM DB2 and Informix.
Lets build the dsn connection string using the credentials you entered above
```
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create the dsn connection string
dsn = (
"DRIVER={0};"
"DATABASE={1};"
"HOSTNAME={2};"
"PORT={3};"
"PROTOCOL={4};"
"UID={5};"
"PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)
#print the connection string to check correct values are specified
print(dsn)
```
Now establish the connection to the database
```
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create database connection
try:
conn = ibm_db.connect(dsn, "", "")
print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname)
except:
print ("Unable to connect: ", ibm_db.conn_errormsg() )
```
Congratulations if you were able to connect successfuly. Otherwise check the error and try again.
```
#Retrieve Metadata for the Database Server
server = ibm_db.server_info(conn)
print ("DBMS_NAME: ", server.DBMS_NAME)
print ("DBMS_VER: ", server.DBMS_VER)
print ("DB_NAME: ", server.DB_NAME)
#Retrieve Metadata for the Database Client / Driver
client = ibm_db.client_info(conn)
print ("DRIVER_NAME: ", client.DRIVER_NAME)
print ("DRIVER_VER: ", client.DRIVER_VER)
print ("DATA_SOURCE_NAME: ", client.DATA_SOURCE_NAME)
print ("DRIVER_ODBC_VER: ", client.DRIVER_ODBC_VER)
print ("ODBC_VER: ", client.ODBC_VER)
print ("ODBC_SQL_CONFORMANCE: ", client.ODBC_SQL_CONFORMANCE)
print ("APPL_CODEPAGE: ", client.APPL_CODEPAGE)
print ("CONN_CODEPAGE: ", client.CONN_CODEPAGE)
```
## Close the Connection
We free all resources by closing the connection. Remember that it is always important to close connections so that we can avoid unused connections taking up resources.
```
ibm_db.close(conn)
```
## Summary
In this tutorial you established a connection to a DB2 database on Cloud database from a Python notebook using ibm_db API.
Copyright © 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
|
github_jupyter
|
import ibm_db
#Replace the placeholder values with your actual Db2 hostname, username, and password:
dsn_hostname = "dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net"
dsn_uid = "cps24149" # e.g. "abc12345"
dsn_pwd = "53p6+1xfzsj8bpjk" # e.g. "7dBZ3wWt9XN6$o0J"
dsn_driver = "DATABASE=BLUDB;HOSTNAME=dashdb-txn-sbox-yp-dal09-03.services.dal.bluemix.net;PORT=50000;PROTOCOL=TCPIP;UID=cps24149;PWD=53p6+1xfzsj8bpjk;"
dsn_database = "BLUDB" # e.g. "BLUDB"
dsn_port = "50000" # e.g. "50000"
dsn_protocol = "TCPIP" # i.e. "TCPIP"
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create the dsn connection string
dsn = (
"DRIVER={0};"
"DATABASE={1};"
"HOSTNAME={2};"
"PORT={3};"
"PROTOCOL={4};"
"UID={5};"
"PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd)
#print the connection string to check correct values are specified
print(dsn)
#DO NOT MODIFY THIS CELL. Just RUN it with Shift + Enter
#Create database connection
try:
conn = ibm_db.connect(dsn, "", "")
print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname)
except:
print ("Unable to connect: ", ibm_db.conn_errormsg() )
#Retrieve Metadata for the Database Server
server = ibm_db.server_info(conn)
print ("DBMS_NAME: ", server.DBMS_NAME)
print ("DBMS_VER: ", server.DBMS_VER)
print ("DB_NAME: ", server.DB_NAME)
#Retrieve Metadata for the Database Client / Driver
client = ibm_db.client_info(conn)
print ("DRIVER_NAME: ", client.DRIVER_NAME)
print ("DRIVER_VER: ", client.DRIVER_VER)
print ("DATA_SOURCE_NAME: ", client.DATA_SOURCE_NAME)
print ("DRIVER_ODBC_VER: ", client.DRIVER_ODBC_VER)
print ("ODBC_VER: ", client.ODBC_VER)
print ("ODBC_SQL_CONFORMANCE: ", client.ODBC_SQL_CONFORMANCE)
print ("APPL_CODEPAGE: ", client.APPL_CODEPAGE)
print ("CONN_CODEPAGE: ", client.CONN_CODEPAGE)
ibm_db.close(conn)
| 0.096567 | 0.870377 |
```
from glob import glob
import json
import numpy as np
import pandas as pd
pd.set_option('max_colwidth', 200)
snips_files_train = glob('../data/snips/2017-06-custom-intent-engines/*/train*full.json')
snips_files_test = glob('../data/snips/2017-06-custom-intent-engines/*/validate*.json')
def snips2top(snips_example, intent):
"""Convert Snips format to TOP format
Args:
snips_example: dict, one example following snips example format
intent: str
Returns:
query_text, top_format_schema
"""
query_text = ''
top_format_str = f'[IN:{intent.upper()}'
for text_chunk in snips_example:
text = text_chunk["text"].strip(' ')
if "entity" in text_chunk:
entity_name = text_chunk["entity"].upper()
top_format_str += f" [SL:{entity_name} {text} ]"
else:
top_format_str += ' ' + text
query_text += ' ' + text
query_text = query_text.strip(' ')
top_format_str += " ]"
return query_text, top_format_str
ex1 = [
{'text': 'What will the weather be '},
{'text': 'this year', 'entity': 'timeRange'},
{'text': ' in '},
{'text': 'Horseshoe Lake State Fish and Wildlife Area',
'entity': 'geographic_poi'},
{'text': '?'}
]
intent1 = 'GetWeather'
expected_out = ("What will the weather be this year in Horseshoe Lake State Fish and Wildlife Area ?", "[IN:GETWEATHER What will the weather be [SL:TIMERANGE this year ] in [SL:GEOGRAPHIC_POI Horseshoe Lake State Fish and Wildlife Area ] ? ]")
out = snips2top(ex1, intent1)
assert out == expected_out, out
def make_snips_df(snips_files):
snips_data = []
for train_file in snips_files:
with open(train_file, encoding='latin-1') as f:
data = json.load(f)
assert len(data.keys()) == 1, data.keys()
intent = list(data.keys())[0]
print(f'Intent: {intent}, n examples: {len(data[intent])}')
for example in data[intent]:
assert len(example.keys()) == 1
text, schema = snips2top(example['data'], intent)
snips_data.append([text, text, schema])
snips_df = pd.DataFrame(snips_data, columns=['text', 'tokens', 'schema'])
return snips_df
print('Processing train')
snips_trainval = make_snips_df(glob('../data/snips/2017-06-custom-intent-engines/*/train*full.json'))
print()
print('Processing test')
snips_test = make_snips_df(glob('../data/snips/2017-06-custom-intent-engines/*/validate*.json'))
permutation = np.random.permutation(len(snips_trainval))
train_subset_ids = permutation[700:]
valid_subset_ids = permutation[:700]
snips_train = snips_trainval.iloc[train_subset_ids]
snips_valid = snips_trainval.iloc[valid_subset_ids]
print('Train dataset len: ', len(snips_train))
print('Valid dataset len: ', len(snips_valid))
print('Test dataset len: ', len(snips_test))
import os
from os.path import join as path_join
snips_path = path_join('snips', 'top_format')
os.makedirs(snips_path)
snips_train.to_csv(os.path.join(snips_path, 'train.tsv'), sep='\t', index=False, header=False)
snips_valid.to_csv(os.path.join(snips_path, 'eval.tsv'), sep='\t', index=False, header=False)
snips_test.to_csv(os.path.join(snips_path, 'test.tsv'), sep='\t', index=False, header=False)
```
|
github_jupyter
|
from glob import glob
import json
import numpy as np
import pandas as pd
pd.set_option('max_colwidth', 200)
snips_files_train = glob('../data/snips/2017-06-custom-intent-engines/*/train*full.json')
snips_files_test = glob('../data/snips/2017-06-custom-intent-engines/*/validate*.json')
def snips2top(snips_example, intent):
"""Convert Snips format to TOP format
Args:
snips_example: dict, one example following snips example format
intent: str
Returns:
query_text, top_format_schema
"""
query_text = ''
top_format_str = f'[IN:{intent.upper()}'
for text_chunk in snips_example:
text = text_chunk["text"].strip(' ')
if "entity" in text_chunk:
entity_name = text_chunk["entity"].upper()
top_format_str += f" [SL:{entity_name} {text} ]"
else:
top_format_str += ' ' + text
query_text += ' ' + text
query_text = query_text.strip(' ')
top_format_str += " ]"
return query_text, top_format_str
ex1 = [
{'text': 'What will the weather be '},
{'text': 'this year', 'entity': 'timeRange'},
{'text': ' in '},
{'text': 'Horseshoe Lake State Fish and Wildlife Area',
'entity': 'geographic_poi'},
{'text': '?'}
]
intent1 = 'GetWeather'
expected_out = ("What will the weather be this year in Horseshoe Lake State Fish and Wildlife Area ?", "[IN:GETWEATHER What will the weather be [SL:TIMERANGE this year ] in [SL:GEOGRAPHIC_POI Horseshoe Lake State Fish and Wildlife Area ] ? ]")
out = snips2top(ex1, intent1)
assert out == expected_out, out
def make_snips_df(snips_files):
snips_data = []
for train_file in snips_files:
with open(train_file, encoding='latin-1') as f:
data = json.load(f)
assert len(data.keys()) == 1, data.keys()
intent = list(data.keys())[0]
print(f'Intent: {intent}, n examples: {len(data[intent])}')
for example in data[intent]:
assert len(example.keys()) == 1
text, schema = snips2top(example['data'], intent)
snips_data.append([text, text, schema])
snips_df = pd.DataFrame(snips_data, columns=['text', 'tokens', 'schema'])
return snips_df
print('Processing train')
snips_trainval = make_snips_df(glob('../data/snips/2017-06-custom-intent-engines/*/train*full.json'))
print()
print('Processing test')
snips_test = make_snips_df(glob('../data/snips/2017-06-custom-intent-engines/*/validate*.json'))
permutation = np.random.permutation(len(snips_trainval))
train_subset_ids = permutation[700:]
valid_subset_ids = permutation[:700]
snips_train = snips_trainval.iloc[train_subset_ids]
snips_valid = snips_trainval.iloc[valid_subset_ids]
print('Train dataset len: ', len(snips_train))
print('Valid dataset len: ', len(snips_valid))
print('Test dataset len: ', len(snips_test))
import os
from os.path import join as path_join
snips_path = path_join('snips', 'top_format')
os.makedirs(snips_path)
snips_train.to_csv(os.path.join(snips_path, 'train.tsv'), sep='\t', index=False, header=False)
snips_valid.to_csv(os.path.join(snips_path, 'eval.tsv'), sep='\t', index=False, header=False)
snips_test.to_csv(os.path.join(snips_path, 'test.tsv'), sep='\t', index=False, header=False)
| 0.505615 | 0.297007 |
# Data Visualization Exploration
### Scalar value ideas for dashboard:
`could try to emulate https://ncov19.us/ top bar`
- avg rating of recommendations/movies you liked
- avg year of recommendations
- "Your decade" (ex 80s) generated from recommendations
- three favorite genres (or two favorite genres and one recommendation)
### Chart ideas for dashboard:
- genre distribution of your watchlist or recommendation list
- avg rating for each genre in your ratings
### Random notes:
- recommendations table should be adjusted, each row should be a single movie
- it would make it so that movie_id is all we need to store (with user_id)
- then to get all movie data we just JOIN on imdb_movies
- because of the structure some of the processes are slowed down by being required to get movie_data one movie at a time
```
# imports
import pandas as pd
import numpy as np
import psycopg2
import matplotlib.pyplot as plt
from heapq import nlargest
import os
from dotenv import load_dotenv
# load .env file for keys
load_dotenv()
# create connection to prod DB
connection = psycopg2.connect(
user=os.getenv('db_user'),
password=os.getenv('db_password'),
host=os.getenv('db_host'),
port="5432",
database=os.getenv('db_user')
)
```
## Recommendation Exploration
```
# get all recommendations
cursor = connection.cursor()
query = 'SELECT * FROM recommendations'
cursor.execute(query)
recommendations = cursor.fetchall()
cursor.close()
len(recommendations)
# get all movies (will merge on rec df later)
cursor = connection.cursor()
postgreSQL_select_Query = "SELECT movie_id, is_adult, runtime_minutes, genres FROM imdb_movies;"
cursor.execute(postgreSQL_select_Query)
all_movies = cursor.fetchall()
len(all_movies)
# make movie_df
movie_ids = []
is_adults = []
times = []
genres = []
for movie in all_movies:
movie_ids.append(movie[0])
is_adults.append(movie[1])
times.append(movie[2])
genres.append(movie[3])
movie_df = pd.DataFrame({
'movie_id': movie_ids,
'is_adult': is_adults,
'runtime_minutes': times,
'genres': genres
})
print(movie_df.shape)
movie_df.head()
recommendations[0]
"""
user_id
recommendation_id
recommendation_json
date
model_type
"""
# store rec info in df
user_ids = []
rec_ids = []
rec_titles = []
rec_years = []
rec_ratings = []
rec_votes = []
rec_movie_ids = []
rec_gems = []
dates = []
model_types = []
"""
Could just save the movie_id
and merge a movie_data df to df made with this info
on movie_id
"""
for rec in recommendations:
for unit in rec[2]:
user_ids.append(rec[0])
rec_ids.append(rec[1])
rec_titles.append(unit['Title'])
rec_years.append(unit['Year'])
rec_ratings.append(unit['Mean Rating'])
rec_votes.append(unit['Votes'])
rec_movie_ids.append(unit['ID'])
rec_gems.append(unit['Gem'])
dates.append(rec[3])
model_types.append(rec[4])
len(user_ids)
# make df
df = pd.DataFrame({
'user_id': user_ids,
'rec_id': rec_ids,
'title': rec_titles,
'year': rec_years,
'avg_rating': rec_ratings,
'votes': rec_votes,
'movie_id': rec_movie_ids,
'gem': rec_gems,
'date': dates,
'model_type': model_types
})
print(df.shape)
df.head()
# merge with movie_df on movie_id to get additional movie info
df = pd.merge(df, movie_df, on='movie_id', how='inner')
print(df.shape)
df.head()
# get len of all movie_ids
lengths = {}
for movie_id in df['movie_id'].values:
if len(movie_id) in lengths:
lengths[len(movie_id)] += 1
else:
lengths[len(movie_id)] = 1
print(movie_id)
lengths
# no variance here, can probably drop
df['model_type'].value_counts()
df.dtypes
# replace 'None' with np.NaN
df = df.replace({'None': np.NaN})
# fix incorrect columns
df['avg_rating'] = df['avg_rating'].astype('float64')
# have to use float for these cause NaNs
df['year'] = df['year'].astype('float64')
df['votes'] = df['votes'].astype('float64')
df.dtypes
user_recs = df.groupby(by='user_id')
len(user_recs)
# get scaler values
counter = 0
for user, data in user_recs:
genre_counts = {}
print(user, data['avg_rating'].mean(), data['year'].mean())
for genre_string in data['genres'].values:
genres = genre_string.split(',')
for genre in genres:
if genre in genre_counts:
genre_counts[genre] += 1
else:
genre_counts[genre] = 1
print(genre_counts)
plt.bar(range(len(genre_counts)), genre_counts.values())
plt.xticks(range(len(genre_counts)), genre_counts.keys(), rotation=90)
plt.show()
counter += 1
if counter == 5:
break
# refactor to a function
"""
Currently gets user dashboard data
relating to recommendations made to the user
Because the movie data in recommendations is stored
in a JSON object this function is required to get the additional
movie data one by one rather than JOIN the movie data on the
initial select query. Should adjust how recommendations are stored to
improve speed
"""
def get_user_dashboard_data(user_id):
cursor = connection.cursor()
query = f"SELECT * FROM recommendations WHERE user_id = {user_id};"
cursor.execute(query)
recommendations = cursor.fetchall()
rec_ids = []
rec_titles = []
rec_years = []
rec_ratings = []
rec_votes = []
rec_movie_ids = []
# rec_gems = []
# dates = []
genres = []
# don't think it's needed
# model_types = []
for rec in recommendations:
for unit in rec[2]:
movie_query = f"SELECT genres FROM imdb_movies WHERE movie_id = '{unit['ID']}';"
cursor.execute(movie_query)
movie_data = cursor.fetchone()
rec_ids.append(rec[1])
rec_titles.append(unit['Title'])
rec_years.append(unit['Year'])
rec_ratings.append(unit['Mean Rating'])
rec_votes.append(unit['Votes'])
rec_movie_ids.append(unit['ID'])
genres.append(movie_data[0])
# rec_gems.append(unit['Gem'])
# dates.append(rec[3])
# model_types.append(rec[4])
cursor.close()
data = pd.DataFrame({
'rec_id': rec_ids,
'title': rec_titles,
'year': rec_years,
'avg_rating': rec_ratings,
'votes': rec_votes,
'movie_id': rec_movie_ids,
'genres': genres
})
genre_counts = {}
for genre_string in data['genres'].values:
genres = genre_string.split(',')
for genre in genres:
if genre in genre_counts:
genre_counts[genre] += 1
else:
genre_counts[genre] = 1
plt.bar(range(len(genre_counts)), genre_counts.values())
plt.xticks(range(len(genre_counts)), genre_counts.keys(), rotation=90)
plt.show()
top_three = nlargest(3, genre_counts, key=genre_counts.get)
for key in top_three:
print(f"{key}: {genre_counts[key]}")
return round(data['avg_rating'].mean(), 2), int(data['year'].mean())
get_user_dashboard_data(1)
```
## Rating Exploration
```
# get all ratings
cursor = connection.cursor()
query = 'SELECT user_id, name, year, rating, id FROM user_imdb_ratings;'
cursor.execute(query)
ratings = cursor.fetchall()
cursor.close()
len(ratings)
# make ratings data into df
user_ids = []
names = []
years = []
user_ratings = []
movie_ids = []
for rating in ratings:
user_ids.append(rating[0])
names.append(rating[1])
years.append(rating[2])
user_ratings.append(rating[3])
movie_ids.append(str(rating[4]))
ratings_df = pd.DataFrame({
'user_id': user_ids,
'title': names,
'year': years,
'rating': user_ratings,
'movie_id': movie_ids
})
print(ratings_df.shape)
ratings_df.head()
# get len of all movie_ids in ratings_df
lengths = {}
for movie_id in ratings_df['movie_id'].values:
if len(movie_id) in lengths:
lengths[len(movie_id)] += 1
else:
lengths[len(movie_id)] = 1
print(movie_id)
lengths
ratings_df_merged = pd.merge(ratings_df, movie_df, on='movie_id', how='inner')
print(ratings_df_merged.shape)
ratings_df_merged.head()
```
Seems that id in user_imdb ratings doesn't match up with imdb_id but that doesn't make much since because that column doesn't have a movie_id column. Hmmm
|
github_jupyter
|
# imports
import pandas as pd
import numpy as np
import psycopg2
import matplotlib.pyplot as plt
from heapq import nlargest
import os
from dotenv import load_dotenv
# load .env file for keys
load_dotenv()
# create connection to prod DB
connection = psycopg2.connect(
user=os.getenv('db_user'),
password=os.getenv('db_password'),
host=os.getenv('db_host'),
port="5432",
database=os.getenv('db_user')
)
# get all recommendations
cursor = connection.cursor()
query = 'SELECT * FROM recommendations'
cursor.execute(query)
recommendations = cursor.fetchall()
cursor.close()
len(recommendations)
# get all movies (will merge on rec df later)
cursor = connection.cursor()
postgreSQL_select_Query = "SELECT movie_id, is_adult, runtime_minutes, genres FROM imdb_movies;"
cursor.execute(postgreSQL_select_Query)
all_movies = cursor.fetchall()
len(all_movies)
# make movie_df
movie_ids = []
is_adults = []
times = []
genres = []
for movie in all_movies:
movie_ids.append(movie[0])
is_adults.append(movie[1])
times.append(movie[2])
genres.append(movie[3])
movie_df = pd.DataFrame({
'movie_id': movie_ids,
'is_adult': is_adults,
'runtime_minutes': times,
'genres': genres
})
print(movie_df.shape)
movie_df.head()
recommendations[0]
"""
user_id
recommendation_id
recommendation_json
date
model_type
"""
# store rec info in df
user_ids = []
rec_ids = []
rec_titles = []
rec_years = []
rec_ratings = []
rec_votes = []
rec_movie_ids = []
rec_gems = []
dates = []
model_types = []
"""
Could just save the movie_id
and merge a movie_data df to df made with this info
on movie_id
"""
for rec in recommendations:
for unit in rec[2]:
user_ids.append(rec[0])
rec_ids.append(rec[1])
rec_titles.append(unit['Title'])
rec_years.append(unit['Year'])
rec_ratings.append(unit['Mean Rating'])
rec_votes.append(unit['Votes'])
rec_movie_ids.append(unit['ID'])
rec_gems.append(unit['Gem'])
dates.append(rec[3])
model_types.append(rec[4])
len(user_ids)
# make df
df = pd.DataFrame({
'user_id': user_ids,
'rec_id': rec_ids,
'title': rec_titles,
'year': rec_years,
'avg_rating': rec_ratings,
'votes': rec_votes,
'movie_id': rec_movie_ids,
'gem': rec_gems,
'date': dates,
'model_type': model_types
})
print(df.shape)
df.head()
# merge with movie_df on movie_id to get additional movie info
df = pd.merge(df, movie_df, on='movie_id', how='inner')
print(df.shape)
df.head()
# get len of all movie_ids
lengths = {}
for movie_id in df['movie_id'].values:
if len(movie_id) in lengths:
lengths[len(movie_id)] += 1
else:
lengths[len(movie_id)] = 1
print(movie_id)
lengths
# no variance here, can probably drop
df['model_type'].value_counts()
df.dtypes
# replace 'None' with np.NaN
df = df.replace({'None': np.NaN})
# fix incorrect columns
df['avg_rating'] = df['avg_rating'].astype('float64')
# have to use float for these cause NaNs
df['year'] = df['year'].astype('float64')
df['votes'] = df['votes'].astype('float64')
df.dtypes
user_recs = df.groupby(by='user_id')
len(user_recs)
# get scaler values
counter = 0
for user, data in user_recs:
genre_counts = {}
print(user, data['avg_rating'].mean(), data['year'].mean())
for genre_string in data['genres'].values:
genres = genre_string.split(',')
for genre in genres:
if genre in genre_counts:
genre_counts[genre] += 1
else:
genre_counts[genre] = 1
print(genre_counts)
plt.bar(range(len(genre_counts)), genre_counts.values())
plt.xticks(range(len(genre_counts)), genre_counts.keys(), rotation=90)
plt.show()
counter += 1
if counter == 5:
break
# refactor to a function
"""
Currently gets user dashboard data
relating to recommendations made to the user
Because the movie data in recommendations is stored
in a JSON object this function is required to get the additional
movie data one by one rather than JOIN the movie data on the
initial select query. Should adjust how recommendations are stored to
improve speed
"""
def get_user_dashboard_data(user_id):
cursor = connection.cursor()
query = f"SELECT * FROM recommendations WHERE user_id = {user_id};"
cursor.execute(query)
recommendations = cursor.fetchall()
rec_ids = []
rec_titles = []
rec_years = []
rec_ratings = []
rec_votes = []
rec_movie_ids = []
# rec_gems = []
# dates = []
genres = []
# don't think it's needed
# model_types = []
for rec in recommendations:
for unit in rec[2]:
movie_query = f"SELECT genres FROM imdb_movies WHERE movie_id = '{unit['ID']}';"
cursor.execute(movie_query)
movie_data = cursor.fetchone()
rec_ids.append(rec[1])
rec_titles.append(unit['Title'])
rec_years.append(unit['Year'])
rec_ratings.append(unit['Mean Rating'])
rec_votes.append(unit['Votes'])
rec_movie_ids.append(unit['ID'])
genres.append(movie_data[0])
# rec_gems.append(unit['Gem'])
# dates.append(rec[3])
# model_types.append(rec[4])
cursor.close()
data = pd.DataFrame({
'rec_id': rec_ids,
'title': rec_titles,
'year': rec_years,
'avg_rating': rec_ratings,
'votes': rec_votes,
'movie_id': rec_movie_ids,
'genres': genres
})
genre_counts = {}
for genre_string in data['genres'].values:
genres = genre_string.split(',')
for genre in genres:
if genre in genre_counts:
genre_counts[genre] += 1
else:
genre_counts[genre] = 1
plt.bar(range(len(genre_counts)), genre_counts.values())
plt.xticks(range(len(genre_counts)), genre_counts.keys(), rotation=90)
plt.show()
top_three = nlargest(3, genre_counts, key=genre_counts.get)
for key in top_three:
print(f"{key}: {genre_counts[key]}")
return round(data['avg_rating'].mean(), 2), int(data['year'].mean())
get_user_dashboard_data(1)
# get all ratings
cursor = connection.cursor()
query = 'SELECT user_id, name, year, rating, id FROM user_imdb_ratings;'
cursor.execute(query)
ratings = cursor.fetchall()
cursor.close()
len(ratings)
# make ratings data into df
user_ids = []
names = []
years = []
user_ratings = []
movie_ids = []
for rating in ratings:
user_ids.append(rating[0])
names.append(rating[1])
years.append(rating[2])
user_ratings.append(rating[3])
movie_ids.append(str(rating[4]))
ratings_df = pd.DataFrame({
'user_id': user_ids,
'title': names,
'year': years,
'rating': user_ratings,
'movie_id': movie_ids
})
print(ratings_df.shape)
ratings_df.head()
# get len of all movie_ids in ratings_df
lengths = {}
for movie_id in ratings_df['movie_id'].values:
if len(movie_id) in lengths:
lengths[len(movie_id)] += 1
else:
lengths[len(movie_id)] = 1
print(movie_id)
lengths
ratings_df_merged = pd.merge(ratings_df, movie_df, on='movie_id', how='inner')
print(ratings_df_merged.shape)
ratings_df_merged.head()
| 0.260954 | 0.732173 |
Remember that the labels that we are using are: `thumbsup`,`thumbsdown`,`thankyou`,`livelong`
```
import os
CUSTOM_MODEL_NAME = 'my_ssd_mobnet_tuned' # Two models so far: my_ssd_mobnet and my_ssd_mobnet_tuned
PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'
PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'
TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'
LABEL_MAP_NAME = 'label_map.pbtxt'
paths = {
'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),
'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),
'APIMODEL_PATH': os.path.join('Tensorflow','models'),
'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),
'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),
'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),
'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),
'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME),
'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'),
'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'),
'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'),
'PROTOC_PATH':os.path.join('Tensorflow','protoc')
}
files = {
'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),
'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME),
'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)
}
for path in paths.values():
if not os.path.exists(path):
if os.name == 'posix':
!mkdir -p {path}
if os.name == 'nt':
!mkdir {path}
```
# 1. Download TF Models Pretrained Models from Tensorflow Model Zoo and Install TFOD
Let's start clonning the TFOD API on `'Tensorflow/models/research/object_detection'`
```
if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):
!git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}
```
Let's install the protocol buffer compiler, `protoc`, used to compile *.proto* files
```
!brew install protobuf
!cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install .
VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')
!python {VERIFICATION_SCRIPT}
!pip list
import object_detection
!brew install wget
```
`tar -zxvf`
- `z` means (un)z̲ip.
- `x` means ex̲tract files from the archive.
- `v` means print the filenames v̲erbosely.
- `f` means the following argument is a f̱ilename.
```
if os.name == 'posix':
!wget {PRETRAINED_MODEL_URL}
!mv {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}
!cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}
```
# 2. Create Label Map
```
labels = [{'name':'ThumbsUp', 'id':1},
{'name':'ThumbsDown', 'id':2},
{'name':'ThankYou', 'id':3},
{'name':'LiveLong', 'id':4}]
files['LABELMAP']
with open(files['LABELMAP'], 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
```
Eventually we're getting a file `label_map.pbtxt` like this one:
item {
name:'tongueout'
id:1
}
item {
name:'wink'
id:2
}
item {
name:'smile'
id:3
}
item {
name:'surprise'
id:4
}
# 3. Create TF records
`TFRecords` is a binary format to storing data. It helps to speed up training.
```
if not os.path.exists(files['TF_RECORD_SCRIPT']):
!git clone https://github.com/nicknochnack/GenerateTFRecord {paths['SCRIPTS_PATH']}
files['TF_RECORD_SCRIPT']
```
usage: generate_tfrecord.py [-h] [-x XML_DIR] [-l LABELS_PATH] [-o OUTPUT_PATH] [-i IMAGE_DIR] [-c CSV_PATH]
```
!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'train')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'train.record')}
!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'test')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'test.record')}
```
# 4. Copy Model Config to Training Folder
```
if os.name =='posix':
!cp {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}
if os.name == 'nt':
!copy {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}
```
# 5. Update Config For Transfer Learning
```
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
config = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
files['PIPELINE_CONFIG']
config
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
pipeline_config
```
Let's change some hyperparameters like the number of classes `num_classes`
```
pipeline_config.model.ssd.num_classes = len(labels)
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'checkpoint', 'ckpt-0')
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= files['LABELMAP']
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'train.record')]
pipeline_config.eval_input_reader[0].label_map_path = files['LABELMAP']
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'test.record')]
pipeline_config
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "wb") as f:
f.write(config_text)
```
# 6. Train the model
In order to train the model we need:
1. Script
2. Model directory (where config.py is) it has all the custom paths, labels
3. Pipeline config
4. Number of training steps
```
TRAINING_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'model_main_tf2.py')
command = "python {} --model_dir={} --pipeline_config_path={} --num_train_steps=2000"\
.format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'], files['PIPELINE_CONFIG'])
print(command)
!{command}
```
# 7. Evaluate the model
```
command = "python {} --model_dir={} --pipeline_config_path={} --checkpoint_dir={}".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'])
!{command}
```
# 8. Load Train Model From Checkpoint
```
import os
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
from object_detection.utils import config_util
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-3')).expect_partial()
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
```
# Detect from Image
```
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])
IMAGE_PATH = os.path.join(paths['IMAGE_PATH'], 'test', 'thumbsdown.44d93f20-35be-11ec-b8d7-acbc32cb08df.jpg')
img = cv2.imread(IMAGE_PATH)
image_np = np.array(img)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
plt.imshow(cv2.cvtColor(image_np_with_detections, cv2.COLOR_BGR2RGB))
plt.show()
```
# Real time detections from webcam
```
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while cap.isOpened():
ret, frame = cap.read()
image_np = np.array(frame)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
```
# Freezing the graph
```
FREEZE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'exporter_main_v2.py ')
command = "python {} --input_type=image_tensor --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}".format(FREEZE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['OUTPUT_PATH'])
!{command}
```
# Convert to TFJS
Export to TFJS. It will create a model.json file and the number of bin folders
```
!pip install tensorflowjs
command = "tensorflowjs_converter --input_format=tf_saved_model --output_node_names='detection_boxes,detection_classes,detection_features,detection_multiclass_scores,detection_scores,num_detections,raw_detection_boxes,raw_detection_scores' --output_format=tfjs_graph_model --signature_name=serving_default {} {}".format(os.path.join(paths['OUTPUT_PATH'], 'saved_model'), paths['TFJS_PATH'])
!{command}
```
# Convert to TFLite
```
TFLITE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'export_tflite_graph_tf2.py ')
command = "python {} --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}".format(TFLITE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['TFLITE_PATH'])
!{command}
```
|
github_jupyter
|
import os
CUSTOM_MODEL_NAME = 'my_ssd_mobnet_tuned' # Two models so far: my_ssd_mobnet and my_ssd_mobnet_tuned
PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'
PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'
TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'
LABEL_MAP_NAME = 'label_map.pbtxt'
paths = {
'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),
'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),
'APIMODEL_PATH': os.path.join('Tensorflow','models'),
'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),
'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),
'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),
'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),
'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME),
'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'),
'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'),
'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'),
'PROTOC_PATH':os.path.join('Tensorflow','protoc')
}
files = {
'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),
'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME),
'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)
}
for path in paths.values():
if not os.path.exists(path):
if os.name == 'posix':
!mkdir -p {path}
if os.name == 'nt':
!mkdir {path}
if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):
!git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}
!brew install protobuf
!cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install .
VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')
!python {VERIFICATION_SCRIPT}
!pip list
import object_detection
!brew install wget
if os.name == 'posix':
!wget {PRETRAINED_MODEL_URL}
!mv {PRETRAINED_MODEL_NAME+'.tar.gz'} {paths['PRETRAINED_MODEL_PATH']}
!cd {paths['PRETRAINED_MODEL_PATH']} && tar -zxvf {PRETRAINED_MODEL_NAME+'.tar.gz'}
labels = [{'name':'ThumbsUp', 'id':1},
{'name':'ThumbsDown', 'id':2},
{'name':'ThankYou', 'id':3},
{'name':'LiveLong', 'id':4}]
files['LABELMAP']
with open(files['LABELMAP'], 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
if not os.path.exists(files['TF_RECORD_SCRIPT']):
!git clone https://github.com/nicknochnack/GenerateTFRecord {paths['SCRIPTS_PATH']}
files['TF_RECORD_SCRIPT']
!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'train')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'train.record')}
!python {files['TF_RECORD_SCRIPT']} -x {os.path.join(paths['IMAGE_PATH'], 'test')} -l {files['LABELMAP']} -o {os.path.join(paths['ANNOTATION_PATH'], 'test.record')}
if os.name =='posix':
!cp {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}
if os.name == 'nt':
!copy {os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'pipeline.config')} {os.path.join(paths['CHECKPOINT_PATH'])}
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
config = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
files['PIPELINE_CONFIG']
config
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
pipeline_config
pipeline_config.model.ssd.num_classes = len(labels)
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = os.path.join(paths['PRETRAINED_MODEL_PATH'], PRETRAINED_MODEL_NAME, 'checkpoint', 'ckpt-0')
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= files['LABELMAP']
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'train.record')]
pipeline_config.eval_input_reader[0].label_map_path = files['LABELMAP']
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [os.path.join(paths['ANNOTATION_PATH'], 'test.record')]
pipeline_config
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(files['PIPELINE_CONFIG'], "wb") as f:
f.write(config_text)
TRAINING_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'model_main_tf2.py')
command = "python {} --model_dir={} --pipeline_config_path={} --num_train_steps=2000"\
.format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'], files['PIPELINE_CONFIG'])
print(command)
!{command}
command = "python {} --model_dir={} --pipeline_config_path={} --checkpoint_dir={}".format(TRAINING_SCRIPT, paths['CHECKPOINT_PATH'],files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'])
!{command}
import os
import tensorflow as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from object_detection.builders import model_builder
from object_detection.utils import config_util
# Load pipeline config and build a detection model
configs = config_util.get_configs_from_pipeline_file(files['PIPELINE_CONFIG'])
detection_model = model_builder.build(model_config=configs['model'], is_training=False)
# Restore checkpoint
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-3')).expect_partial()
@tf.function
def detect_fn(image):
image, shapes = detection_model.preprocess(image)
prediction_dict = detection_model.predict(image, shapes)
detections = detection_model.postprocess(prediction_dict, shapes)
return detections
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
category_index = label_map_util.create_category_index_from_labelmap(files['LABELMAP'])
IMAGE_PATH = os.path.join(paths['IMAGE_PATH'], 'test', 'thumbsdown.44d93f20-35be-11ec-b8d7-acbc32cb08df.jpg')
img = cv2.imread(IMAGE_PATH)
image_np = np.array(img)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
plt.imshow(cv2.cvtColor(image_np_with_detections, cv2.COLOR_BGR2RGB))
plt.show()
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
while cap.isOpened():
ret, frame = cap.read()
image_np = np.array(frame)
input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
label_id_offset = 1
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes']+label_id_offset,
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=5,
min_score_thresh=.8,
agnostic_mode=False)
cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))
if cv2.waitKey(10) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
FREEZE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'exporter_main_v2.py ')
command = "python {} --input_type=image_tensor --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}".format(FREEZE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['OUTPUT_PATH'])
!{command}
!pip install tensorflowjs
command = "tensorflowjs_converter --input_format=tf_saved_model --output_node_names='detection_boxes,detection_classes,detection_features,detection_multiclass_scores,detection_scores,num_detections,raw_detection_boxes,raw_detection_scores' --output_format=tfjs_graph_model --signature_name=serving_default {} {}".format(os.path.join(paths['OUTPUT_PATH'], 'saved_model'), paths['TFJS_PATH'])
!{command}
TFLITE_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'export_tflite_graph_tf2.py ')
command = "python {} --pipeline_config_path={} --trained_checkpoint_dir={} --output_directory={}".format(TFLITE_SCRIPT ,files['PIPELINE_CONFIG'], paths['CHECKPOINT_PATH'], paths['TFLITE_PATH'])
!{command}
| 0.304352 | 0.671093 |
<a href="https://colab.research.google.com/github/WISSAL-MN/deep_learning/blob/main/Premier_Neurone.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
```
# 1. Dataset
```
X, y = make_blobs(n_samples=100, n_features=2, centers=2, random_state=0)
y = y.reshape((y.shape[0], 1))
print('dimensions de X:', X.shape)
print('dimensions de y:', y.shape)
plt.scatter(X[:,0], X[:, 1], c=y, cmap='summer')
plt.show()
```
# 2. Fonctions du modele
```
def initialisation(X):
W = np.random.randn(X.shape[1], 1)
b = np.random.randn(1)
return (W, b)
def model(X, W, b):
Z = X.dot(W) + b
A = 1 / (1 + np.exp(-Z))
return A
def log_loss(A, y):
return 1 / len(y) * np.sum(-y * np.log(A) - (1 - y) * np.log(1 - A))
def gradients(A, X, y):
dW = 1 / len(y) * np.dot(X.T, A - y)
db = 1 / len(y) * np.sum(A - y)
return (dW, db)
def update(dW, db, W, b, learning_rate):
W = W - learning_rate * dW
b = b - learning_rate * db
return (W, b)
def predict(X, W, b):
A = model(X, W, b)
# print(A)
return A >= 0.5
from sklearn.metrics import accuracy_score
def artificial_neuron(X, y, learning_rate = 0.1, n_iter = 100):
# initialisation W, b
W, b = initialisation(X)
Loss = []
for i in range(n_iter):
A = model(X, W, b)
Loss.append(log_loss(A, y))
dW, db = gradients(A, X, y)
W, b = update(dW, db, W, b, learning_rate)
y_pred = predict(X, W, b)
print(accuracy_score(y, y_pred))
plt.plot(Loss)
plt.show()
return (W, b)
W, b = artificial_neuron(X, y)
```
# 3. Frontiere de décision
```
fig, ax = plt.subplots(figsize=(9, 6))
ax.scatter(X[:,0], X[:, 1], c=y, cmap='summer')
x1 = np.linspace(-1, 4, 100)
x2 = ( - W[0] * x1 - b) / W[1]
ax.plot(x1, x2, c='orange', lw=3)
```
# 4. Visualisations 3D
```
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter3d(
x=X[:, 0].flatten(),
y=X[:, 1].flatten(),
z=y.flatten(),
mode='markers',
marker=dict(
size=5,
color=y.flatten(),
colorscale='YlGn',
opacity=0.8,
reversescale=True
)
)])
fig.update_layout(template= "plotly_dark", margin=dict(l=0, r=0, b=0, t=0))
fig.layout.scene.camera.projection.type = "orthographic"
fig.show()
X0 = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
X1 = np.linspace(X[:, 1].min(), X[:, 1].max(), 100)
xx0, xx1 = np.meshgrid(X0, X1)
Z = W[0] * xx0 + W[1] * xx1 + b
A = 1 / (1 + np.exp(-Z))
fig = (go.Figure(data=[go.Surface(z=A, x=xx0, y=xx1, colorscale='YlGn', opacity = 0.7, reversescale=True)]))
fig.add_scatter3d(x=X[:, 0].flatten(), y=X[:, 1].flatten(), z=y.flatten(), mode='markers', marker=dict(size=5, color=y.flatten(), colorscale='YlGn', opacity = 0.9, reversescale=True))
fig.update_layout(template= "plotly_dark", margin=dict(l=0, r=0, b=0, t=0))
fig.layout.scene.camera.projection.type = "orthographic"
fig.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=100, n_features=2, centers=2, random_state=0)
y = y.reshape((y.shape[0], 1))
print('dimensions de X:', X.shape)
print('dimensions de y:', y.shape)
plt.scatter(X[:,0], X[:, 1], c=y, cmap='summer')
plt.show()
def initialisation(X):
W = np.random.randn(X.shape[1], 1)
b = np.random.randn(1)
return (W, b)
def model(X, W, b):
Z = X.dot(W) + b
A = 1 / (1 + np.exp(-Z))
return A
def log_loss(A, y):
return 1 / len(y) * np.sum(-y * np.log(A) - (1 - y) * np.log(1 - A))
def gradients(A, X, y):
dW = 1 / len(y) * np.dot(X.T, A - y)
db = 1 / len(y) * np.sum(A - y)
return (dW, db)
def update(dW, db, W, b, learning_rate):
W = W - learning_rate * dW
b = b - learning_rate * db
return (W, b)
def predict(X, W, b):
A = model(X, W, b)
# print(A)
return A >= 0.5
from sklearn.metrics import accuracy_score
def artificial_neuron(X, y, learning_rate = 0.1, n_iter = 100):
# initialisation W, b
W, b = initialisation(X)
Loss = []
for i in range(n_iter):
A = model(X, W, b)
Loss.append(log_loss(A, y))
dW, db = gradients(A, X, y)
W, b = update(dW, db, W, b, learning_rate)
y_pred = predict(X, W, b)
print(accuracy_score(y, y_pred))
plt.plot(Loss)
plt.show()
return (W, b)
W, b = artificial_neuron(X, y)
fig, ax = plt.subplots(figsize=(9, 6))
ax.scatter(X[:,0], X[:, 1], c=y, cmap='summer')
x1 = np.linspace(-1, 4, 100)
x2 = ( - W[0] * x1 - b) / W[1]
ax.plot(x1, x2, c='orange', lw=3)
import plotly.graph_objects as go
fig = go.Figure(data=[go.Scatter3d(
x=X[:, 0].flatten(),
y=X[:, 1].flatten(),
z=y.flatten(),
mode='markers',
marker=dict(
size=5,
color=y.flatten(),
colorscale='YlGn',
opacity=0.8,
reversescale=True
)
)])
fig.update_layout(template= "plotly_dark", margin=dict(l=0, r=0, b=0, t=0))
fig.layout.scene.camera.projection.type = "orthographic"
fig.show()
X0 = np.linspace(X[:, 0].min(), X[:, 0].max(), 100)
X1 = np.linspace(X[:, 1].min(), X[:, 1].max(), 100)
xx0, xx1 = np.meshgrid(X0, X1)
Z = W[0] * xx0 + W[1] * xx1 + b
A = 1 / (1 + np.exp(-Z))
fig = (go.Figure(data=[go.Surface(z=A, x=xx0, y=xx1, colorscale='YlGn', opacity = 0.7, reversescale=True)]))
fig.add_scatter3d(x=X[:, 0].flatten(), y=X[:, 1].flatten(), z=y.flatten(), mode='markers', marker=dict(size=5, color=y.flatten(), colorscale='YlGn', opacity = 0.9, reversescale=True))
fig.update_layout(template= "plotly_dark", margin=dict(l=0, r=0, b=0, t=0))
fig.layout.scene.camera.projection.type = "orthographic"
fig.show()
| 0.569853 | 0.979842 |
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
winning_model_df = pd.read_pickle('../code/app/subcortex-app/data/winning_models/qmri_age_models.pkl')
winning_model_df.head()
age_range = np.arange(19, 76)
predict_df_0 = pd.DataFrame({'Age': age_range, 'Age2': age_range**2, 'sex': 0})
f, ax = plt.subplots(2,4, figsize=(10,3), sharex=True, gridspec_kw={'hspace':.15, 'wspace':.1, 'width_ratios':[.3,.07,.3,.3]})
aucs = []
for i in range(2):
if i == 0:
roi = 'ic'
statistic = 'median'
measure = 'myelin'
elif i == 1:
roi = 'AMG'
statistic = 'median'
measure = 'iron'
result = winning_model_df.loc[(winning_model_df.ROI==roi) & (winning_model_df.qMRI==measure) & (winning_model_df.Statistic==statistic), 'result'].values[0]
params = result.params
print(params)
y = result.predict(exog=predict_df_0)
ax[i,0].plot(age_range,y)
ax[i,2].plot(age_range[1:], np.diff(y))
ax[i,2].axhline(color='k', linewidth=.5)
ax[i,2].set_ylim(-0.08, 0.08)
ax[i,3].plot(age_range[1:], np.abs(np.diff(y)))
ax[i,3].axhline(color='k', linewidth=.5)
ax[i,3].set_ylim(ax[i,2].get_ylim())
ax[i,3].set_yticklabels([])
ax[i,3].fill_between(age_range[1:], np.abs(np.diff(y)), color='lightgrey')
aucs.append(np.sum(np.abs(np.diff(y))))
ax[0,0].text(20, 15.5, s='$f(x) = 15.811+$\n $0.0055x-0.0008x^2$')
ax[0,2].text(25, 0.05, s='$f\'(x) = 0.0055-0.0016x$')
ax[0,3].text(20, -0.06, s='$\int_{{19}}^{{75}} |f\'(x)|dx = {:.3f}$'.format(aucs[0]))
ax[0,0].set_ylabel('Myelin (ppm)')
ax[1,0].text(20, 2.95, s='$f(x) = 1.557+0.021x$')
ax[1,2].text(20, -0.06, s='$f\'(x) = 0.021$')
ax[1,3].text(20, -0.06, s='$\int_{{19}}^{{75}} |f\'(x)|dx = {:.3f}$'.format(aucs[1]))
ax[1,0].set_ylabel('Iron (ppm)')
ax[0,0].set_title('Best fitting model')
ax[0,2].set_title('Derivative')
ax[0,3].set_title('Absolute derivative')
ax[0,2].set_ylabel('ppm/year')
ax[1,2].set_ylabel('ppm/year')
ax[0,1].axis('off')
ax[1,1].axis('off')
ax[1,2].set_xlabel('Age')
np.sum(np.abs(np.diff(y)))
f.tight_layout()
f.align_ylabels(ax[:, 0])
f.savefig('../figures/R1/derivative_illustration.pdf', bbox_inches='tight')
```
Procedure of estimating total amount of change across the adult lifespan. The left column shows two example models: One inverted U-shape (median myelin change in the internal capsule), and one linear increase (median iron change in the amygdala). Formally, change across ages is given by the first derivative (middle column). To collapse over the direction of change (increase or decrease), we took the absolute of the derivative (right column). The sum of this absolute derivative (illustrated by the gray area under the curve) represents the total amount of change in a region. As a final step (not illustrated), the sum of the absolute derivative is divided by the model prediction at 19 years old, which represents the total amount of change relative to the baseline value.
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
winning_model_df = pd.read_pickle('../code/app/subcortex-app/data/winning_models/qmri_age_models.pkl')
winning_model_df.head()
age_range = np.arange(19, 76)
predict_df_0 = pd.DataFrame({'Age': age_range, 'Age2': age_range**2, 'sex': 0})
f, ax = plt.subplots(2,4, figsize=(10,3), sharex=True, gridspec_kw={'hspace':.15, 'wspace':.1, 'width_ratios':[.3,.07,.3,.3]})
aucs = []
for i in range(2):
if i == 0:
roi = 'ic'
statistic = 'median'
measure = 'myelin'
elif i == 1:
roi = 'AMG'
statistic = 'median'
measure = 'iron'
result = winning_model_df.loc[(winning_model_df.ROI==roi) & (winning_model_df.qMRI==measure) & (winning_model_df.Statistic==statistic), 'result'].values[0]
params = result.params
print(params)
y = result.predict(exog=predict_df_0)
ax[i,0].plot(age_range,y)
ax[i,2].plot(age_range[1:], np.diff(y))
ax[i,2].axhline(color='k', linewidth=.5)
ax[i,2].set_ylim(-0.08, 0.08)
ax[i,3].plot(age_range[1:], np.abs(np.diff(y)))
ax[i,3].axhline(color='k', linewidth=.5)
ax[i,3].set_ylim(ax[i,2].get_ylim())
ax[i,3].set_yticklabels([])
ax[i,3].fill_between(age_range[1:], np.abs(np.diff(y)), color='lightgrey')
aucs.append(np.sum(np.abs(np.diff(y))))
ax[0,0].text(20, 15.5, s='$f(x) = 15.811+$\n $0.0055x-0.0008x^2$')
ax[0,2].text(25, 0.05, s='$f\'(x) = 0.0055-0.0016x$')
ax[0,3].text(20, -0.06, s='$\int_{{19}}^{{75}} |f\'(x)|dx = {:.3f}$'.format(aucs[0]))
ax[0,0].set_ylabel('Myelin (ppm)')
ax[1,0].text(20, 2.95, s='$f(x) = 1.557+0.021x$')
ax[1,2].text(20, -0.06, s='$f\'(x) = 0.021$')
ax[1,3].text(20, -0.06, s='$\int_{{19}}^{{75}} |f\'(x)|dx = {:.3f}$'.format(aucs[1]))
ax[1,0].set_ylabel('Iron (ppm)')
ax[0,0].set_title('Best fitting model')
ax[0,2].set_title('Derivative')
ax[0,3].set_title('Absolute derivative')
ax[0,2].set_ylabel('ppm/year')
ax[1,2].set_ylabel('ppm/year')
ax[0,1].axis('off')
ax[1,1].axis('off')
ax[1,2].set_xlabel('Age')
np.sum(np.abs(np.diff(y)))
f.tight_layout()
f.align_ylabels(ax[:, 0])
f.savefig('../figures/R1/derivative_illustration.pdf', bbox_inches='tight')
| 0.319334 | 0.546012 |
```
!pip install rdflib
!pip install folium
!pip install pyproj
storage = "https://raw.githubusercontent.com/opencitydata/guia-rdf-datosgob/main/rdf-explotacion/terrazas-madrid.nt" #poner el enlace a tus datos en github (raw files)
from rdflib import Graph, Namespace, Literal
from rdflib.plugins.sparql import prepareQuery
import folium
from pyproj import Transformer
g = Graph()
g.parse(storage, format="ntriples") #quizá esto tarde un poco si el archivo es muy grande
```
# Consulta 1: Listado de terrazas y sus horarios de lunes a viernes anualmente
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?horario
WHERE {
?terraza rdf:type escom:Terraza;
<http://schema.org/openingHours> ?horario .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.horario)
```
# Consulta 2: Listado de terrazas que tengan más de 15 mesas autorizadas
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?mesas
WHERE {
?terraza rdf:type escom:Terraza;
escom:numeroMesasAutorizadas ?mesas .
FILTER(?mesas > "15"^^<http://www.w3.org/2001/XMLSchema#integer>)
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.mesas)
```
# Consulta 3: Listado de terrazas con actividad en el periodo anual
```
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza
WHERE {
?terraza rdf:type escom:Terraza;
escom:periodoFuncionamiento <http://vocab.linkeddata.es/datosabiertos/kos/comercio/periodo-funcionamiento/anual> .
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza)
```
# Ejemplo real: Dibujando puntos geográficos a partir de RDF
```
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
# Preparamos la consulta, dame las terrazas, su horario anual de lunes a jueves y la latitud y longitud de su LocalComercial asociado
q1 = prepareQuery('''
SELECT
?horario ?lat ?lon
WHERE {
?terraza rdf:type escom:Terraza .
?terraza <http://schema.org/openingHours> ?horario .
?terraza escom:perteneceA ?local .
?local rdf:type escom:LocalComercial .
?local <http://www.opengis.net/ont/geosparql#hasGeometry> ?point .
?point rdf:type <http://www.opengis.net/ont/sf#Point> .
?point <https://datos.ign.es/def/geo_core#xETRS89> ?lat .
?point <https://datos.ign.es/def/geo_core#yETRS89> ?lon .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
} LIMIT 10
''',
initNs = { "escom": ESCOM}
)
# inspeccionamos los datos que nos devuelve la consulta
for r in g.query(q1):
print(r.lat, r.lon, r.horario)
# debemos transformar el formato de los datos de lon de UTM a WGS 84
transformer = Transformer.from_crs('epsg:25830','epsg:4326')
mapa = folium.Map(location=[40.4167, -3.70325])
for r in g.query(q1):
x,y = transformer.transform(float((r.lat).replace(",",".")),float((r.lon).replace(",",".")))
horario = (r.horario).replace("Anual Lun-Juev ","")
folium.Marker([x,y], popup=horario, tooltip=horario).add_to(mapa)
mapa
```
|
github_jupyter
|
!pip install rdflib
!pip install folium
!pip install pyproj
storage = "https://raw.githubusercontent.com/opencitydata/guia-rdf-datosgob/main/rdf-explotacion/terrazas-madrid.nt" #poner el enlace a tus datos en github (raw files)
from rdflib import Graph, Namespace, Literal
from rdflib.plugins.sparql import prepareQuery
import folium
from pyproj import Transformer
g = Graph()
g.parse(storage, format="ntriples") #quizá esto tarde un poco si el archivo es muy grande
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?horario
WHERE {
?terraza rdf:type escom:Terraza;
<http://schema.org/openingHours> ?horario .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.horario)
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza ?mesas
WHERE {
?terraza rdf:type escom:Terraza;
escom:numeroMesasAutorizadas ?mesas .
FILTER(?mesas > "15"^^<http://www.w3.org/2001/XMLSchema#integer>)
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza, r.mesas)
from rdflib import XSD
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
q1 = prepareQuery('''
SELECT
?terraza
WHERE {
?terraza rdf:type escom:Terraza;
escom:periodoFuncionamiento <http://vocab.linkeddata.es/datosabiertos/kos/comercio/periodo-funcionamiento/anual> .
}
''',
initNs = { "escom": ESCOM}
)
for r in g.query(q1):
print(r.terraza)
ESCOM = Namespace("http://vocab.ciudadesabiertas.es/def/comercio/tejido-comercial/")
# Preparamos la consulta, dame las terrazas, su horario anual de lunes a jueves y la latitud y longitud de su LocalComercial asociado
q1 = prepareQuery('''
SELECT
?horario ?lat ?lon
WHERE {
?terraza rdf:type escom:Terraza .
?terraza <http://schema.org/openingHours> ?horario .
?terraza escom:perteneceA ?local .
?local rdf:type escom:LocalComercial .
?local <http://www.opengis.net/ont/geosparql#hasGeometry> ?point .
?point rdf:type <http://www.opengis.net/ont/sf#Point> .
?point <https://datos.ign.es/def/geo_core#xETRS89> ?lat .
?point <https://datos.ign.es/def/geo_core#yETRS89> ?lon .
FILTER(regex(?horario, "Anual Lun-Juev.*", "i" ))
} LIMIT 10
''',
initNs = { "escom": ESCOM}
)
# inspeccionamos los datos que nos devuelve la consulta
for r in g.query(q1):
print(r.lat, r.lon, r.horario)
# debemos transformar el formato de los datos de lon de UTM a WGS 84
transformer = Transformer.from_crs('epsg:25830','epsg:4326')
mapa = folium.Map(location=[40.4167, -3.70325])
for r in g.query(q1):
x,y = transformer.transform(float((r.lat).replace(",",".")),float((r.lon).replace(",",".")))
horario = (r.horario).replace("Anual Lun-Juev ","")
folium.Marker([x,y], popup=horario, tooltip=horario).add_to(mapa)
mapa
| 0.338077 | 0.76432 |
# $\color{orange}{\text{Multiway Split Sparse Decision Tree}}$
<span style='color:orange'>
* <a href="#Introduction">Introduction</a> <br>
* <a href="#Bit-Maths">Bit Maths</a> <br>
* <a href="#Design-and-analysis-of-Algorithms">Design and analysis of Algorithms</a> <br>
* <a href="#Implementation">Implementation</a> <br>
- <a href="#Refactoring">Refactoring</a> <br>
- <a href="#Unit-Tests">Unit Tests</a> <br>
- <a href="#Static-Analysis">Static Analysis</a> <br>
- <a href="#Dynamic-Code-Analysis">Dynamic Code Analysis</a> <br>
- <a href="#Benchmarking">Benchmarking</a> <br>
* <a href="#Embedding">Embedding MGOSDT in Python</a> <br>
* <a href="#Contribution">Contribution</a> <br>
* <a href="#Future">Future work</a> <br>
</span>
## Introduction
<img src="tree0.png" alt="drawing" width="550"/>
Decision tree has been popular since the early days of Machine Learning due to its interpretibility. Historically, the way we construct it is simple. We start with some root, keep branching it out til we hit some kind of stopping condition, at which point we can optionally prune some of the branches to avoid overfitting. The problem with this very plain algorithm (top down, prune it afterward) is the lack of optimality, if we choose the wrong point at the very beginning, there is no way to undo it. There has been numerous attempts to fix this problem via Mathematical optimization solver (or neural networks). However, in order to fully optimize a decision tree, we need to go through a search space that is both in both theory and practice hard. It does combinotorially explosion in the number of subtrees one can consider.
The goal of my work is to produce a optimal sparse, **multiway-split** decision tree. We follow a specialized algorithm that combines *dynamic programming* and *branch and bound* to optimize a generalize objective:
$$ \min_{\text{tree}} \hat{L}(\text{tree}, \{(x_i, y_i)\}_i) = l(\text{tree}, \{(x_i, y_i)\}_i) + \lambda(\#\text{leaves in tree}).$$
We minimize the misclassification error over all possible n-ary trees and sparsity. Here $\lambda$ is the trade-off parameter that govern the predictive performance and sparsity. (or in brief, regularizing the number of leaves).
We remark that this algorithm solve the problem of Optimality (NP hard) by leveraging computational caching. No greedy splitting and pruning like C4.5 and CART. When we could solve the problem of optimality, we get sparse accurate tree. Our approach has several important insights
1. **Analytical Bounds**: The collection of bounds show that some partial trees can never be extended to form optimal tree, thus reducing the search space, without sacrificing the optimality of our algorithm)
2. Dynamic Programming and **dependency graph** (See pptx)
* Start with some datasets, apply some naive labels
* Split into subsets using each feature
* Keep splitting until higher accuracy is attained
* Consolidate any duplication is found. (Solution for one duplicated instance can be used as the solution by another instance.
* The DP formulation creates a dependency graph between sets and subsets. Each set is responsible for finding optimal features to sub-divide itself into additional subsets. Then each subset decide the best fit to split. Once enough subsets are decided, we collapse the trees, the optimal tree emerge as a Directed Acyclic graph of best features.
3. Tree representation by its leaves (Store bounds and intermediate results within each leave) 
4. **Permutation map**: Discover identical trees already evaluated.
5. **Leaf-based representation**: We store a bit vector, indicating which data point has features corresponding to the features described by its leaves (**Bit masking**. See `bitmask.cpp`, `dataset.cpp` and `encoder.cpp`)
```cpp
#include <iostream>
#include <cassert>
#include <vector>
// The purpose of the bit function is to tell us whether a certain bit of an int is set to 1
// By doing &1 we are getting a value where all bits other than bit 0 are 0
// The bit function does
// 1 move the desired bit to bit 0 (by shifting)
// 2 set all bits other than bit 0 to 0 (by adding)
// 3 result true if the result is not 0 (by converting to bool)
static bool bit(int index, int value)
{
return value & (1 << index);
}
#include <vector>
std::vector<bool> makeBitVector(size_t rain, bool construction, bool rush_hour, bool friday)
{
std::vector<bool> result(5, false);
result[0] = bit(0, rain);// result[0] is the first bit of rain (bit 0)
result[1] = bit(1, rain);// result[1] is the 2nd bit of rain (bit 1)
result[2] = construction;
result[3] = rush_hour;
result[4] = friday;
return result;
}
static bool bit(int index, int value)
{
return value & (1 << index); // check if index^th bit is present in the subset
}
```
[bitmasking via vector of bool](https://godbolt.org/z/qYnxE4rzx)
[bitmasking via bitset](https://godbolt.org/z/WPbf7GKhc)
[bitmasking multiple bits](https://godbolt.org/z/456ovEcx8)
6. Caching of intermediate results make our computations very fast. (Since cache memory is the fastest type of memory in your computer)
Question: Does each set correspond to a task? or do the sets and subsets have nothing to do with the arrangement of work?
In my [codes](https://gitlab.com/leannejdong/mgosdt/-/blob/main/src/optimizer/extraction/models.hpp)
the function Line25 creates a set of optimal trees. It is essentially the dependency graph in the context of concurrency programming. Indeed, tree, subtree; problem, sub-problem; set, subset in mgosdt are the `task`, which are the problems entered by the bit vectors (from the bitmask class).
7. Incremental computation
The bounds of bit vector in each leave also let us to use incremental computations to evaluate the children of the leaves- should we decide to split next or not...
5. Multiway split
* Give a more natural way to handle multivalued categorical features than binary splits. Even if we have 3 categories, a 2-way split still seems less interpretable
* Give a more interpretable DT algorithm that can handle a larger variety of datasets.
* m-way tree works much nicer on GPUs than binary trees. The shallowness usually help in the sense that we could compress it and save memory storing information. For instance, B-tree, a kind of balanced search tree that is optimized for external memory. This is due to its shallow structure, so the height of a B tree is minimized, meaning that we don't have to access all the items everytime we call the method recursively.
### Binary search tree vs Binary decision tree
A **binary search tree** is an efficient data structure for storing information which you’d like to look up later. For example, you can store n integers and look one up in log n time. At each node in the binary search tree you are simply asking whether the number you’re looking for is higher or lower (hence binary), until eventually you find what you’re looking for (think binary search).
A **binary decision tree**, at least in the context of machine learning, is a function that maps an input space of data to an output space of classes. For example, say you want classify whether a product will get sold out on Black Friday or not. The input space is all the information about the product such as historical sales, product type, amount discounted, number in stock, etc. The output space has two classes: will sell out, or won’t sell out.
Each node in the binary decision tree asks a binary question about the data, e.g does the product have a high discount?, does the product usually sell well?. Based on the answer to each question you will take the left or right branch to the next node, following it down to the bottom (a leaf node) where you will find your final answer about whether it will sell out or not.
More generally, you can think of a binary decision tree as a decision making tool. It asks you a series of questions and gives you a decision based on your answers.
### Examples of multiway-split trees
B* trees, suffix trees
### Represent each subproblem by bit vector
```cpp
// a program converts integers to their bit representation
#include <iostream>
#include <bitset>
int main(){
for(unsigned i; std::cin>>i;)
std::cout << std::dec << i << "=="
<< std::hex << "0x" << i << "=="
<< std::bitset<8*sizeof(unsigned)>{i}<<"\n";// #bits in an unsigned int
// unsigned types work better with bit operations since we typically don't care about
// negative numbers when dealing with bitwise operations.
// Using an unsigned type gives us one more bit to work with
}
```
## Bit Maths
Information gets stored in computer in term of binary numbers.
**Bitmasks** are a very useful way to compress multiple boolean flags in a single variable. It can reduce memory usage and operations on bits are basically as fast as they can get. In practice, any time you want to have multiple flags describing something in your application, a bitmask could be the right tool for the job.
Mask in Bitmask means hiding something. And Bitmask is nothing but a binary number that represents something. Take an example. Consider the set $A = \{1, 2, 3, 4, 5\}$. How do we represent the subset $\{2, 4\}$ using a bitmask of length 5? (**Answer**: the bit mask 01010 represents the subset $\{2, 4\}$). The benefits of using bitmask are
* Set the $i^{th}$ bit using bitwise or: $b|(1<< i)$. Take $i = 0$.
* Unset(clear) the $i^{th}$ bit: $b\&!(1<< i)$. Take $i = 1$.
* Set the $i^{th}$ bit: $b\&(1<< i)$. Take $i = 3$. Then
$$(1<<i) = 01000$$
$$01010\& 01000 = 01000$$
<font color = orange>Exercise</font>: Given a set, count how many subsets have sum of elements greater than or equal to a given value. [subset sum](https://godbolt.org/z/1YdcrTdoK)
We start by creating a class called Bitmask. This class will handle all of our bit manipulation.
```
class Bitmask
{
public:
Bitmask();
// Overwrites this bitmask.
void SetMask(Bitmask& other);
// Returns binary representation of bitmask.
uint32_t GetMask() const;
// Returns true if bit at pos = 1, else false.
bool GetBit(int pos) const;
// Sets bit at specified pos to 1 or 0 (true or false).
void SetBit(int pos, bool on);
// Sets bit at pos to 1.
void SetBit(int pos);
// Sets bit at pos to 0.
void ClearBit(int pos);
// Sets all bits to 0.
void Clear();
private:
uint32_t bits; // 1.
};
```
```cpp
Bitmask::Bitmask() : bits(0) { }
void Bitmask::SetMask(Bitmask& other)
{
bits = other.GetMask();
}
uint32_t Bitmask::GetMask() const
{
return bits;
}
bool Bitmask::GetBit(int pos) const
{
return (bits & (1 << pos)) != 0; // 1
}
// A simple helper method that calls set or clear bit
void Bitmask::SetBit(int pos, bool on)
{
if(on)
{
SetBit(pos);
}
else
{
ClearBit(pos);
}
}
void Bitmask::SetBit(int pos)
{
bits = bits | 1 << pos; // 2
}
void Bitmask::ClearBit(int pos)
{
bits = bits & ~(1 << pos); // 3
}
void Bitmask::Clear()
{
bits = 0;
}
```
In MGOSDT, We define a function module as a collection of functions.
* This declaration acts as both a function module and a container class.
* The static class methods implements a function module providing operations on arrays of type bitblock, which can be allocated on the stack.
* The non-static class methods implements a heap-allocated equivalence, which supports the same methods @note: Many of the binary operations assume that operands have the same length
[bitmask.hpp](https://gitlab.com/leannejdong/mgosdt/-/blob/dev1/src/bitmask.hpp)
[bitmask.cpp](https://gitlab.com/leannejdong/mgosdt/-/blob/dev1/src/bitmask.cpp)
## Design and analysis of Algorithms
### The DPB algorithm
* Operates on weighted, additive, non-negative loss functions.
$$ \min_{\text{tree}} \hat{L}(\text{tree}, \{(x_i, y_i)\}_i) = l(\text{tree}, \{(x_i, y_i)\}_i) + \lambda(\#\text{leaves in tree}).$$
* **DP** allows us to decompose a problem into smaller child problems that can be solved recursively through a function call.
* The **parallelism** allows us to solve the sub problems in parallel by delegating work to a separate thread.
* The **branch and bound** algorithm let us to prune the search space. In the case of mgosdt, the analytical bounds allows us to eliminate some part of the search space.
How does BB help? If we know the optimal cost is less than X, and we know the current branch of our search space cost more than X, then we can disregard this branch, since we know it won't lead to the optimal cost.
### The Primary Data structure:
GOSDT mains two primary data structures:
* a concurrent *priority queue* is used to schedule problems to solve. Recall, a priority queue maintains an ordering in the queue based on the priorities of individual queue items. A normal queue has a FIFO policy, whereas a priorrity queue sorts its items. The C++ stl implementation `std::priority_queue` is not thread-safe. The *concurrent* usage means that the values from all three can change due to push/pop methods in other threads. The `tbb::concurrent_priority_queue` support `std::priority_queue`'s methods of size, empty and swap and is **thread-safe**.
* a dependency graph is used to store problems and their dependency relationships. The dependency relationship $dep(p_{\pi}, p_c)$ is defined between problems $p_{\pi}$ and $p_c$ only if the solution of $p_{\pi}$ depends on the solution of $p_c$. Each $p_c$ is further specified as $p^j_l$ or $p^j_r$ indicating that it is the left and right branch produced by splitting on feature $j$.
### Algorithms
* Algorithm 1

* Algorithm 2 get_lower_bound$(s, Z, z^-, z^+)->lb)$
* Algorithm 3 get_upper_bound$(s, Z, z^-, z^+)->ub)$
* Algorithm 4 fails_bound$(p)->v$
* Algorithm 5 $\text{split}(s, j, Z)->s_l, s_r$

* Algorithm 6 an extraction algorithm that is used to construct the optimal tree from the dependency graph once the main GOSDT algorithm completes.

In my [codes](https://gitlab.com/leannejdong/mgosdt/-/blob/main/src/optimizer/extraction/models.hpp)
the function Line25 creates a set of optimal trees. It is essentially the dependency graph in the context of concurrency programming. Indeed, tree, subtree; problem, sub-problem; set, subset in mgosdt are the `task`, which are the problems entered by the bit vectors (from the bitmask class).
## Dynamic programming formulation
* Each dataset presents a classfication problem.
* When we alter the data by condition, the resulting subset presents a new problem.
* Each filtering creates a new subproblem.
* If two sets of conditions result in the same subset, then the solution of that subset can be used for both sets of conditions.
This allows us to reduce the amount of computations to reach the solutions.
## Contribution
Our work presents
* A new algorithm convert binary decision tree to n-ary decision tree.
* Efficient memory management leads to a speed up! Prevent memory allocation does generally speed up a program.
* Performance ptimization.
- Completed profiling that allows us to detect which part of mgosdt are slow.
- We came up with ideas about what speed up mgosdt
- We experimented and performed benchmarking which validated things actually work.
## Some common questions on multiway vs Binary decision tree?
Given the tree's structure, any one of the `N` attributes can be encoded in $log_2(N)` bits.
* Do n-ary DT lead a run time improvement?
No. There is no difference in term of complexity (both time and space). Every node may have multiple children (over 2) but the running time is still $O(\log N)$.
$$\log_n N = \frac{\log_a N}{\log_a b}.$$
Here, the $\log_a b$ is just a constant so it does not matter. So we can change the base of the logarithm and the running time complexity for the algorithm stay the same.
$$ O(c*\log N) = c O(\log N) = O(\log N) .$$ This is why, the branching factor (i.e. the number of children a node may have) does not matter.
* Do n-ary DT gives better Interpretability?
Yes. For the reasons mentioned earlier.
## Implementations
### Refactoring
Static and dynamic code analyses are performed during source code reviews of gosdt and mgosdt. Static code analysis is done during the development of our codebase; Then we perform dynamic code analysis in studying how the code behaves during execution.
### Static Code Analysis
### Dynamic Code Analysis
- **Thread, address sanitizer**. We debug our codes with address sanitizer with `fsanitize` flag. There were large number of memory leaks, data races and unitialized memory issues on the original GOSDT codebase.
- Valgrind
- **CPU Profiler**. We run Linux profiler `perf` combine with flame chart to get a better overview of possible red spots might cause performance slow down. Nothing been detected. See [cpu-profiling](https://gitlab.com/leannejdong/mgosdt/-/blob/main/cpu-profiling.md)
```
from sklearn import datasets
import xgboost as xgb
iris = datasets.load_iris()
X = iris.data
y = iris.target
from platform import python_version
print(python_version())
```
Let’s get all of our data set up. We’ll start off by creating a train-test split so we can see just how well XGBoost performs. We’ll go with an 80%-20% split this time.
```
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
#In order for XGBoost to be able to use our data, we’ll need to transform it into a specific format that XGBoost can handle. That format is called DMatrix.
#It’s a very simple one-linear to transform a numpy array of data to DMatrix format:
D_train = xgb.DMatrix(X_train, label=Y_train)
D_test = xgb.DMatrix(X_test, label=Y_test)
# Define xgboost
param = {
'eta': 0.3,
'max_depth': 3,
'objective': 'multi:softprob',
'num_class': 3}
steps = 20 # The number of training iterations
#Training and Testing
#We can finally train our model similar to how we do so with Scikit Learn:
model = xgb.train(param, D_train, steps)
#Let’s now run an evaluation.
# Again the process is very similar to that of training models in Scikit Learn:
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Precision = {}".format(precision_score(Y_test, best_preds, average='macro')))
print("Recall = {}".format(recall_score(Y_test, best_preds, average='macro')))
print("Accuracy = {}".format(accuracy_score(Y_test, best_preds)))
```
### Further Exploration with XGBoost
That just about sums up the basics of XGBoost. But there are some more cool features that’ll help you get the most out of your models.
The gamma parameter can also help with controlling overfitting. It specifies the minimum reduction in the loss required to make a further partition on a leaf node of the tree. I.e if creating a new node doesn’t reduce the loss by a certain amount, then we won’t create it at all.
The booster parameter allows you to set the type of model you will use when building the ensemble. The default is gbtree which builds an ensemble of decision trees. If your data isn’t too complicated, you can go with the faster and simpler gblinear option which builds an ensemble of linear models.
Setting the optimal hyperparameters of any ML model can be a challenge. So why not let Scikit Learn do it for you? We can combine Scikit Learn’s grid search with an XGBoost classifier quite easily:
```
from sklearn.model_selection import GridSearchCV
clf = xgb.XGBClassifier()
parameters = {
"eta" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] ,
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight" : [ 1, 3, 5, 7 ],
"gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ]
}
grid = GridSearchCV(clf,
parameters, n_jobs=4,
scoring="neg_log_loss", cv=3)
grid.fit(X_train, Y_train)
model.dump_model('dump.raw.txt')
```
## Embedding MGOSDT in Python
Our goal is to being able to run [example.py](https://gitlab.com/leannejdong/mgosdt/-/blob/main/python/example.py)
which calls the `GOSDT` class in [gosdt.py](https://gitlab.com/leannejdong/mgosdt/-/blob/main/python/model/gosdt.py).
This GOSDT class uses the C++ extension module `gosdt` defined in `python_extension.cpp`.
The implementations are inherantly done in C++ by creating a `GOSDT` object called `model`, which perform operations `fit`, `predict`, `score`.
```python
import pandas as pd
import numpy as np
from model.gosdt import GOSDT
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/iris/data.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.04,
"time_limit": 3600,
"verbose": True
}
model = GOSDT(hyperparameters)
model.fit(X, y)
# model.load("python/model/model.json")
# model.load("../gosdt_icml/model.json")
print("Execution Time: {}".format(model.time))
prediction = model.predict(X)
training_accuracy = model.score(X, y)
print("Training Accuracy: {}".format(training_accuracy))
print("Size: {}".format(model.leaves()))
print("Loss: {}".format(1 - training_accuracy))
print("Risk: {}".format(
model.leaves() * hyperparameters["regularization"]
+ 1 - training_accuracy))
model.tree.__initialize_training_loss__(X, y)
print(model.tree)
print(model.latex())
```
It is simple C! First we need a function take some python object as input. We need to parse them and get some native variables in C++. In this case, we struct some char pointer that store the python objects. Then we do our operations in C++, configuring algorithm. Then do the step back from my native variables to python objects. Next, register the function within a module's symbol table. It is a table in which declared which functions are being supported by the module. (Remember, all Python functions live in a module, even if they are actually C functions) The name `add` would be the python name we use to call this module.
Finally, we have to declare the initialization of the module. It is the function will be called when importing the module
### Python C API
```cpp
#include <Python.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include "gosdt.hpp"
// @param args: contains a single string object which is a JSON string containing the algorithm configuration
static PyObject * configure(PyObject * self, PyObject * args) {
const char * configuration;
if (!PyArg_ParseTuple(args, "s", & configuration)) { return NULL; }
std::istringstream config_stream(configuration);
GOSDT::configure(config_stream);
return Py_BuildValue("");
}
// @param args: contains a single string object which contains the training data in CSV form
// @returns a string object containing a JSON array of all resulting models
static PyObject * fit(PyObject * self, PyObject * args) {
const char * dataset;
if (!PyArg_ParseTuple(args, "s", & dataset)) { return NULL; }
std::istringstream data_stream(dataset);
GOSDT model;
std::string result;
model.fit(data_stream, result);
return Py_BuildValue("s", result.c_str());
}
// @returns the number of seconds spent training
static PyObject * time(PyObject * self, PyObject * args) { return Py_BuildValue("f", GOSDT::time); }
// @returns the number of iterations spent training
static PyObject * iterations(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::iterations); }
// @returns the number of vertices in the depency graph
static PyObject * size(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::size); }
// @returns the current status code
static PyObject * status(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::status); }
// Define the list of methods Python intepreter needs to know about for a module
static PyMethodDef gosdt_methods[] = {
// { method name, method pointer, method parameter format, method description }
{"configure", configure, METH_VARARGS, "Configures the algorithm using an input JSON string"},
{"fit", fit, METH_VARARGS, "Trains the model using an input CSV string"},
{"time", time, METH_NOARGS, "Number of seconds spent training"},
{"iterations", iterations, METH_NOARGS, "Number of iterations spent training"},
{"size", size, METH_NOARGS, "Number of vertices in the depency graph"},
{"status", status, METH_NOARGS, "Check the status code of the algorithm"},
{NULL, NULL, 0, NULL}
};
// Define the module
static struct PyModuleDef gosdt = {
PyModuleDef_HEAD_INIT,
"gosdt", // Module Name
"Generalized Optimal Sparse Decision Tree", // Module Description
-1, // Size of per-interpreter state
gosdt_methods // Module methods
};
// Initialize the module
PyMODINIT_FUNC PyInit_gosdt(void) {
return PyModule_Create(&gosdt);
}
```
## Importing C++ extension to the Python Library
```
%cd
%cd Dev/mgosdt
import gosdt
%cd ../python
import pandas as pd
import numpy as np
from model.gosdt import GOSDT
import gosdt
with open ("/home/leanne/Dev/mgosdt/experiments/datasets/tennis/tennis.csv", "r") as data_file:
data = data_file.read()
with open ("/home/leanne/Dev/mgosdt/experiments/configurations/debug.json", "r") as config_file:
config = config_file.read()
print("Config:", config)
print("Data:", data)
gosdt.configure(config)
result = gosdt.fit(data)
print("Result: ", result)
print("Time (seconds): ", gosdt.time())
print("Iterations: ", gosdt.iterations())
print("Graph Size: ", gosdt.size())
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/iris/data.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.04,
"time_limit": 3600,
"verbose": True
}
model = GOSDT(hyperparameters)
model.fit(X, y)
print("Execution Time: {}".format(model.time))
#dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/monk_2/data.csv"))
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/tennis/tennis.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.1,
"time_limit": 3600,
"verbose": True,
}
model = GOSDT(hyperparameters)
model.fit(X, y)
print("Execution Time: {}".format(model.time))
```
|
github_jupyter
|
#include <iostream>
#include <cassert>
#include <vector>
// The purpose of the bit function is to tell us whether a certain bit of an int is set to 1
// By doing &1 we are getting a value where all bits other than bit 0 are 0
// The bit function does
// 1 move the desired bit to bit 0 (by shifting)
// 2 set all bits other than bit 0 to 0 (by adding)
// 3 result true if the result is not 0 (by converting to bool)
static bool bit(int index, int value)
{
return value & (1 << index);
}
#include <vector>
std::vector<bool> makeBitVector(size_t rain, bool construction, bool rush_hour, bool friday)
{
std::vector<bool> result(5, false);
result[0] = bit(0, rain);// result[0] is the first bit of rain (bit 0)
result[1] = bit(1, rain);// result[1] is the 2nd bit of rain (bit 1)
result[2] = construction;
result[3] = rush_hour;
result[4] = friday;
return result;
}
static bool bit(int index, int value)
{
return value & (1 << index); // check if index^th bit is present in the subset
}
// a program converts integers to their bit representation
#include <iostream>
#include <bitset>
int main(){
for(unsigned i; std::cin>>i;)
std::cout << std::dec << i << "=="
<< std::hex << "0x" << i << "=="
<< std::bitset<8*sizeof(unsigned)>{i}<<"\n";// #bits in an unsigned int
// unsigned types work better with bit operations since we typically don't care about
// negative numbers when dealing with bitwise operations.
// Using an unsigned type gives us one more bit to work with
}
class Bitmask
{
public:
Bitmask();
// Overwrites this bitmask.
void SetMask(Bitmask& other);
// Returns binary representation of bitmask.
uint32_t GetMask() const;
// Returns true if bit at pos = 1, else false.
bool GetBit(int pos) const;
// Sets bit at specified pos to 1 or 0 (true or false).
void SetBit(int pos, bool on);
// Sets bit at pos to 1.
void SetBit(int pos);
// Sets bit at pos to 0.
void ClearBit(int pos);
// Sets all bits to 0.
void Clear();
private:
uint32_t bits; // 1.
};
Bitmask::Bitmask() : bits(0) { }
void Bitmask::SetMask(Bitmask& other)
{
bits = other.GetMask();
}
uint32_t Bitmask::GetMask() const
{
return bits;
}
bool Bitmask::GetBit(int pos) const
{
return (bits & (1 << pos)) != 0; // 1
}
// A simple helper method that calls set or clear bit
void Bitmask::SetBit(int pos, bool on)
{
if(on)
{
SetBit(pos);
}
else
{
ClearBit(pos);
}
}
void Bitmask::SetBit(int pos)
{
bits = bits | 1 << pos; // 2
}
void Bitmask::ClearBit(int pos)
{
bits = bits & ~(1 << pos); // 3
}
void Bitmask::Clear()
{
bits = 0;
}
from sklearn import datasets
import xgboost as xgb
iris = datasets.load_iris()
X = iris.data
y = iris.target
from platform import python_version
print(python_version())
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=0.2)
#In order for XGBoost to be able to use our data, we’ll need to transform it into a specific format that XGBoost can handle. That format is called DMatrix.
#It’s a very simple one-linear to transform a numpy array of data to DMatrix format:
D_train = xgb.DMatrix(X_train, label=Y_train)
D_test = xgb.DMatrix(X_test, label=Y_test)
# Define xgboost
param = {
'eta': 0.3,
'max_depth': 3,
'objective': 'multi:softprob',
'num_class': 3}
steps = 20 # The number of training iterations
#Training and Testing
#We can finally train our model similar to how we do so with Scikit Learn:
model = xgb.train(param, D_train, steps)
#Let’s now run an evaluation.
# Again the process is very similar to that of training models in Scikit Learn:
import numpy as np
from sklearn.metrics import precision_score, recall_score, accuracy_score
preds = model.predict(D_test)
best_preds = np.asarray([np.argmax(line) for line in preds])
print("Precision = {}".format(precision_score(Y_test, best_preds, average='macro')))
print("Recall = {}".format(recall_score(Y_test, best_preds, average='macro')))
print("Accuracy = {}".format(accuracy_score(Y_test, best_preds)))
from sklearn.model_selection import GridSearchCV
clf = xgb.XGBClassifier()
parameters = {
"eta" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] ,
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight" : [ 1, 3, 5, 7 ],
"gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ]
}
grid = GridSearchCV(clf,
parameters, n_jobs=4,
scoring="neg_log_loss", cv=3)
grid.fit(X_train, Y_train)
model.dump_model('dump.raw.txt')
import pandas as pd
import numpy as np
from model.gosdt import GOSDT
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/iris/data.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.04,
"time_limit": 3600,
"verbose": True
}
model = GOSDT(hyperparameters)
model.fit(X, y)
# model.load("python/model/model.json")
# model.load("../gosdt_icml/model.json")
print("Execution Time: {}".format(model.time))
prediction = model.predict(X)
training_accuracy = model.score(X, y)
print("Training Accuracy: {}".format(training_accuracy))
print("Size: {}".format(model.leaves()))
print("Loss: {}".format(1 - training_accuracy))
print("Risk: {}".format(
model.leaves() * hyperparameters["regularization"]
+ 1 - training_accuracy))
model.tree.__initialize_training_loss__(X, y)
print(model.tree)
print(model.latex())
#include <Python.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include "gosdt.hpp"
// @param args: contains a single string object which is a JSON string containing the algorithm configuration
static PyObject * configure(PyObject * self, PyObject * args) {
const char * configuration;
if (!PyArg_ParseTuple(args, "s", & configuration)) { return NULL; }
std::istringstream config_stream(configuration);
GOSDT::configure(config_stream);
return Py_BuildValue("");
}
// @param args: contains a single string object which contains the training data in CSV form
// @returns a string object containing a JSON array of all resulting models
static PyObject * fit(PyObject * self, PyObject * args) {
const char * dataset;
if (!PyArg_ParseTuple(args, "s", & dataset)) { return NULL; }
std::istringstream data_stream(dataset);
GOSDT model;
std::string result;
model.fit(data_stream, result);
return Py_BuildValue("s", result.c_str());
}
// @returns the number of seconds spent training
static PyObject * time(PyObject * self, PyObject * args) { return Py_BuildValue("f", GOSDT::time); }
// @returns the number of iterations spent training
static PyObject * iterations(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::iterations); }
// @returns the number of vertices in the depency graph
static PyObject * size(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::size); }
// @returns the current status code
static PyObject * status(PyObject * self, PyObject * args) { return Py_BuildValue("i", GOSDT::status); }
// Define the list of methods Python intepreter needs to know about for a module
static PyMethodDef gosdt_methods[] = {
// { method name, method pointer, method parameter format, method description }
{"configure", configure, METH_VARARGS, "Configures the algorithm using an input JSON string"},
{"fit", fit, METH_VARARGS, "Trains the model using an input CSV string"},
{"time", time, METH_NOARGS, "Number of seconds spent training"},
{"iterations", iterations, METH_NOARGS, "Number of iterations spent training"},
{"size", size, METH_NOARGS, "Number of vertices in the depency graph"},
{"status", status, METH_NOARGS, "Check the status code of the algorithm"},
{NULL, NULL, 0, NULL}
};
// Define the module
static struct PyModuleDef gosdt = {
PyModuleDef_HEAD_INIT,
"gosdt", // Module Name
"Generalized Optimal Sparse Decision Tree", // Module Description
-1, // Size of per-interpreter state
gosdt_methods // Module methods
};
// Initialize the module
PyMODINIT_FUNC PyInit_gosdt(void) {
return PyModule_Create(&gosdt);
}
%cd
%cd Dev/mgosdt
import gosdt
%cd ../python
import pandas as pd
import numpy as np
from model.gosdt import GOSDT
import gosdt
with open ("/home/leanne/Dev/mgosdt/experiments/datasets/tennis/tennis.csv", "r") as data_file:
data = data_file.read()
with open ("/home/leanne/Dev/mgosdt/experiments/configurations/debug.json", "r") as config_file:
config = config_file.read()
print("Config:", config)
print("Data:", data)
gosdt.configure(config)
result = gosdt.fit(data)
print("Result: ", result)
print("Time (seconds): ", gosdt.time())
print("Iterations: ", gosdt.iterations())
print("Graph Size: ", gosdt.size())
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/iris/data.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.04,
"time_limit": 3600,
"verbose": True
}
model = GOSDT(hyperparameters)
model.fit(X, y)
print("Execution Time: {}".format(model.time))
#dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/monk_2/data.csv"))
dataframe = pd.DataFrame(pd.read_csv("/home/leanne/Dev/mgosdt/experiments/datasets/tennis/tennis.csv"))
X = dataframe[dataframe.columns[:-1]]
y = dataframe[dataframe.columns[-1:]]
hyperparameters = {
"regularization": 0.1,
"time_limit": 3600,
"verbose": True,
}
model = GOSDT(hyperparameters)
model.fit(X, y)
print("Execution Time: {}".format(model.time))
| 0.590307 | 0.987676 |
```
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
import matplotlib.pyplot as plt
from pmdarima.arima import auto_arima
data = pd.read_csv('report.csv')
data.head()
data.shape
len(data['report_year'].unique())
data.reset_index(drop=True)
data.isnull().sum()
data.isnull().sum()
data=data[['report_year','agency_jurisdiction','assaults_percapita']]
data.head()
df=data.loc[data['agency_jurisdiction'] == 'Washington, DC']
df.head()
df.isnull().sum()
df.reset_index(drop=True)
hc=df[['report_year','assaults_percapita']]
hc.set_index('report_year',inplace=True)
hc.shape
hc.head()
plt.figure(figsize=(10,7))
plt.plot(hc,'r-')
from pmdarima.arima import ADFTest
adf_test = ADFTest(alpha = 0.05)
adf_test.should_diff(hc)
from statsmodels.tsa.stattools import adfuller
result = adfuller(hc, autolag='AIC')
print(f'ADF Statistic: {result[0]}')
print(f'n_lags: {result[1]}')
print(f'p-value: {result[1]}')
for key, value in result[4].items():
print('Critial Values:')
print(f' {key}, {value}')
train=hc[:35]
test=hc[34:]
train.tail()
test.head()
plt.plot(train)
plt.plot(test)
from statsmodels.tsa.holtwinters import ExponentialSmoothing
model = ExponentialSmoothing(train, trend="add", seasonal="add", seasonal_periods=11)
#model2 = ExponentialSmoothing(train, trend="add", seasonal="add", seasonal_periods=12, damped=True)
fit = model.fit()
pred = fit.forecast(7)
#fit2 = model2.fit()
#pred2 = fit2.forecast(7)
sse1 = np.sqrt(np.mean(np.square(test.values - pred.values)))
#sse2 = np.sqrt(np.mean(np.square(test.values - pred2.values)))
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(train.index[:35], train.values[25:35]);
ax.plot(test.index, test.values, label='truth');
ax.plot(test.index, pred, linestyle='--', color='#ff7823',label="Forcasted".format(sse1, fit.aic));
#ax.plot(test.index, pred2, linestyle='--', color='#3c763d',label="damped (RMSE={:0.2f}, AIC={:0.2f})".format(sse2, fit2.aic));
ax.legend();
ax.set_title("Exponential Smoothing");
from sklearn.metrics import r2_score
r2_score(test, pred)
arima_model = auto_arima(train,start_p=2, d=1, start_q=2,
max_p=10, max_d=5, max_q=7, start_P=1,
D=0, start_Q=0, max_P=5, max_D=5,
max_Q=0, m=15, seasonal=True,
error_action='warn',trace = True,
supress_warnings=True,stepwise = True,random_state=40,n_fits = 50 )
arima_model.summary()
prediction = pd.DataFrame(arima_model.predict(n_periods = 7),index=test.index)
prediction.columns = ['predicted_assaults_percapita']
prediction
print("Predicted Assaults_percapita Crime Rates of Washington DC ")
plt.figure(figsize=(8,5))
plt.plot(train,label="Training")
plt.plot(test,label="Test")
plt.plot(prediction,label="Predicted")
plt.legend()
plt.show()
from sklearn.metrics import r2_score
r2_score(test['assaults_percapita'], prediction['predicted_assaults_percapita'])
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
import matplotlib.pyplot as plt
from pmdarima.arima import auto_arima
data = pd.read_csv('report.csv')
data.head()
data.shape
len(data['report_year'].unique())
data.reset_index(drop=True)
data.isnull().sum()
data.isnull().sum()
data=data[['report_year','agency_jurisdiction','assaults_percapita']]
data.head()
df=data.loc[data['agency_jurisdiction'] == 'Washington, DC']
df.head()
df.isnull().sum()
df.reset_index(drop=True)
hc=df[['report_year','assaults_percapita']]
hc.set_index('report_year',inplace=True)
hc.shape
hc.head()
plt.figure(figsize=(10,7))
plt.plot(hc,'r-')
from pmdarima.arima import ADFTest
adf_test = ADFTest(alpha = 0.05)
adf_test.should_diff(hc)
from statsmodels.tsa.stattools import adfuller
result = adfuller(hc, autolag='AIC')
print(f'ADF Statistic: {result[0]}')
print(f'n_lags: {result[1]}')
print(f'p-value: {result[1]}')
for key, value in result[4].items():
print('Critial Values:')
print(f' {key}, {value}')
train=hc[:35]
test=hc[34:]
train.tail()
test.head()
plt.plot(train)
plt.plot(test)
from statsmodels.tsa.holtwinters import ExponentialSmoothing
model = ExponentialSmoothing(train, trend="add", seasonal="add", seasonal_periods=11)
#model2 = ExponentialSmoothing(train, trend="add", seasonal="add", seasonal_periods=12, damped=True)
fit = model.fit()
pred = fit.forecast(7)
#fit2 = model2.fit()
#pred2 = fit2.forecast(7)
sse1 = np.sqrt(np.mean(np.square(test.values - pred.values)))
#sse2 = np.sqrt(np.mean(np.square(test.values - pred2.values)))
fig, ax = plt.subplots(figsize=(12, 6))
ax.plot(train.index[:35], train.values[25:35]);
ax.plot(test.index, test.values, label='truth');
ax.plot(test.index, pred, linestyle='--', color='#ff7823',label="Forcasted".format(sse1, fit.aic));
#ax.plot(test.index, pred2, linestyle='--', color='#3c763d',label="damped (RMSE={:0.2f}, AIC={:0.2f})".format(sse2, fit2.aic));
ax.legend();
ax.set_title("Exponential Smoothing");
from sklearn.metrics import r2_score
r2_score(test, pred)
arima_model = auto_arima(train,start_p=2, d=1, start_q=2,
max_p=10, max_d=5, max_q=7, start_P=1,
D=0, start_Q=0, max_P=5, max_D=5,
max_Q=0, m=15, seasonal=True,
error_action='warn',trace = True,
supress_warnings=True,stepwise = True,random_state=40,n_fits = 50 )
arima_model.summary()
prediction = pd.DataFrame(arima_model.predict(n_periods = 7),index=test.index)
prediction.columns = ['predicted_assaults_percapita']
prediction
print("Predicted Assaults_percapita Crime Rates of Washington DC ")
plt.figure(figsize=(8,5))
plt.plot(train,label="Training")
plt.plot(test,label="Test")
plt.plot(prediction,label="Predicted")
plt.legend()
plt.show()
from sklearn.metrics import r2_score
r2_score(test['assaults_percapita'], prediction['predicted_assaults_percapita'])
| 0.50293 | 0.538134 |
#### 1. Consider the problem of predicting how well a student does in her second year of college/university, given how well she did in her first year.
Specifically, let x be equal to the number of "A" grades (including A-. A and A+ grades) that a student receives in their first year of college (freshmen year). We would like to predict the value of y, which we define as the number of "A" grades they get in their second year (sophomore year).
Here each row is one training example. Recall that in linear regression, our hypothesis is ```h_θ(x)=θ_0+ θ_1x```, and we use m to denote the number of training examples.
<img src="https://d396qusza40orc.cloudfront.net/flex-ml/quizIIq1v3.png" alt="">
For the training set given above (note that this training set may also be referenced in other questions in this quiz), what is the value of m? In the box below, please enter your answer (which should be a number between 0 and 10).
##### Ans: 4
#### 2. Many substances that can burn (such as gasoline and alcohol) have a chemical structure based on carbon atoms; for this reason they are called hydrocarbons. A chemist wants to understand how the number of carbon atoms in a molecule affects how much energy is released when that molecule combusts (meaning that it is burned). The chemist obtains the dataset below. In the column on the right, “kJ/mol” is the unit measuring the amount of energy released.
<img src="https://d396qusza40orc.cloudfront.net/ml/images/2.2-quiz1.png" alt="">
#### You would like to use linear regression ```(h_θ(x) = θ_0+θ_1x)``` to estimate the amount of energy released (y) as a function of the number of carbon atoms (x). Which of the following do you think will be the values you obtain for ```θ_0``` and ```θ_1```? You should be able to select the right answer without actually implementing linear regression.
##### Ans: ```θ_0 = −569.6, θ_1 = −530.9```
#### 3. Suppose we set ```θ_0 = −1, θ1 = 2``` in the linear regression hypothesis from Q1. What is ```hθ(6)```?
##### Ans: 11
#### 4. Let fff be some function so that ```f(θ_0,θ_1)``` outputs a number. For this problem, fff is some arbitrary/unknown smooth function (not necessarily the cost function of linear regression, so fff may have local optima). Suppose we use gradient descent to try to minimize ```f(θ_0,θ_1)``` as a function of ```θ_0``` and ```θ_1```. Which of the following statements are true? (Check all that apply.)
##### Ans:
- If ```θ_0``` and ```θ_1``` are initialized at a local minimum, then one iteration will not change their values.
- If the learning rate is too small, then gradient descent may take a very long time to converge.
#### 5. Suppose that for some linear regression problem (say, predicting housing prices as in the lecture), we have some training set, and for our training set we managed to find some ```θ_0, θ_1``` such that ```J(θ_0,θ_1) = 0```.
#### Which of the statements below must then be true? (Check all that apply.)
##### Ans:
- For these values of ```θ_0 and θ_1``` that satisfy ```J(θ_0,θ_1) = 0```, we have that ```h_θ(x(i))= y(i)``` for every training example ```(x(i),y(i))```
|
github_jupyter
|
#### 1. Consider the problem of predicting how well a student does in her second year of college/university, given how well she did in her first year.
Specifically, let x be equal to the number of "A" grades (including A-. A and A+ grades) that a student receives in their first year of college (freshmen year). We would like to predict the value of y, which we define as the number of "A" grades they get in their second year (sophomore year).
Here each row is one training example. Recall that in linear regression, our hypothesis is ```h_θ(x)=θ_0+ θ_1x```, and we use m to denote the number of training examples.
<img src="https://d396qusza40orc.cloudfront.net/flex-ml/quizIIq1v3.png" alt="">
For the training set given above (note that this training set may also be referenced in other questions in this quiz), what is the value of m? In the box below, please enter your answer (which should be a number between 0 and 10).
##### Ans: 4
#### 2. Many substances that can burn (such as gasoline and alcohol) have a chemical structure based on carbon atoms; for this reason they are called hydrocarbons. A chemist wants to understand how the number of carbon atoms in a molecule affects how much energy is released when that molecule combusts (meaning that it is burned). The chemist obtains the dataset below. In the column on the right, “kJ/mol” is the unit measuring the amount of energy released.
<img src="https://d396qusza40orc.cloudfront.net/ml/images/2.2-quiz1.png" alt="">
#### You would like to use linear regression ```(h_θ(x) = θ_0+θ_1x)``` to estimate the amount of energy released (y) as a function of the number of carbon atoms (x). Which of the following do you think will be the values you obtain for ```θ_0``` and ```θ_1```? You should be able to select the right answer without actually implementing linear regression.
##### Ans: ```θ_0 = −569.6, θ_1 = −530.9```
#### 3. Suppose we set ```θ_0 = −1, θ1 = 2``` in the linear regression hypothesis from Q1. What is ```hθ(6)```?
##### Ans: 11
#### 4. Let fff be some function so that ```f(θ_0,θ_1)``` outputs a number. For this problem, fff is some arbitrary/unknown smooth function (not necessarily the cost function of linear regression, so fff may have local optima). Suppose we use gradient descent to try to minimize ```f(θ_0,θ_1)``` as a function of ```θ_0``` and ```θ_1```. Which of the following statements are true? (Check all that apply.)
##### Ans:
- If ```θ_0``` and ```θ_1``` are initialized at a local minimum, then one iteration will not change their values.
- If the learning rate is too small, then gradient descent may take a very long time to converge.
#### 5. Suppose that for some linear regression problem (say, predicting housing prices as in the lecture), we have some training set, and for our training set we managed to find some ```θ_0, θ_1``` such that ```J(θ_0,θ_1) = 0```.
#### Which of the statements below must then be true? (Check all that apply.)
##### Ans:
- For these values of ```θ_0 and θ_1``` that satisfy ```J(θ_0,θ_1) = 0```, we have that ```h_θ(x(i))= y(i)``` for every training example ```(x(i),y(i))```
| 0.874868 | 0.993242 |
# k-Fold Cross Validation
## Importing the libraries
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
## Importing the dataset
```
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
```
## Splitting the dataset into the Training set and Test set
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
```
## Feature Scaling
```
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
```
## Training the Kernel SVM model on the Training set
```
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
```
## Making the Confusion Matrix
```
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
```
## Applying k-Fold Cross Validation
```
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
print("Accuracy: {:.2f} %".format(accuracies.mean()*100))
print("Standard Deviation: {:.2f} %".format(accuracies.std()*100))
```
## Visualising the Training set results
```
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
```
## Visualising the Test set results
```
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
print("Accuracy: {:.2f} %".format(accuracies.mean()*100))
print("Standard Deviation: {:.2f} %".format(accuracies.std()*100))
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| 0.625781 | 0.964389 |
## Using submodel loss of active materials in PyBaMM
In this notebook we show how to use the loss of active materials (LAM) submodel in pybamm. The LAM model follows the equation (25) from [[6]](#References), and the stresses are calculated by equations (7)-(9) in [[1]](#References). To see all of the models and submodels available in PyBaMM, please take a look at the documentation here.
```
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import os
import numpy as np
import matplotlib.pyplot as plt
os.chdir(pybamm.__path__[0]+'/..')
# Here the model is applicable to SPM, SPMe and DFN
model = pybamm.lithium_ion.DFN(
options=
{
"particle": "Fickian diffusion",
"SEI":"solvent-diffusion limited",
"SEI film resistance":"distributed",
"SEI porosity change":"false",
"particle mechanics":"swelling only",
"loss of active material":"stress-driven",
}
)
chemistry = pybamm.parameter_sets.Ai2020
param = pybamm.ParameterValues(chemistry=chemistry)
param.update({"Negative electrode LAM constant propotional term": 1e-4})
param.update({"Positive electrode LAM constant propotional term": 1e-4})
total_cycles = 2
experiment = pybamm.Experiment(
[
"Discharge at 1C until 3 V",
"Rest for 600 seconds",
"Charge at 1C until 4.2 V",
"Hold at 4.199 V for 600 seconds",
] * total_cycles
)
sim1 = pybamm.Simulation(
model,
experiment = experiment,
parameter_values = param,
solver = pybamm.CasadiSolver("fast with events")
)
solution = sim1.solve(calc_esoh=False)
t_all = solution["Time [h]"].entries
v_all = solution["Terminal voltage [V]"].entries
I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries
# ploting the results
f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,4))
ax1.plot(t_all, v_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("Terminal voltage [V]")
#ax1.legend()
ax2.plot(t_all, I_if_p, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("Positive electrode interfacial current densities")
#ax2.legend()
#ax2.set_xlim(6000,7000)
ax3.plot(t_all, I_if_n, label="loss of active material model")
ax3.set_xlabel("Time [h]")
ax3.set_ylabel("Negative electrode interfacial current densities")
ax3.legend(bbox_to_anchor=(1, 1.2))
#ax3.set_xlim(10000,15000)
# f.tight_layout(pad=1.0)
plt.show()
LAM_n_all = solution["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all = solution["X-averaged positive electrode active material volume fraction"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, LAM_n_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode active material volume fraction")
ax2.plot(t_all, LAM_p_all, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode active material volume fraction")
f.tight_layout(pad=3.0)
plt.show()
S_t_n_all = solution["X-averaged negative particle surface tangential stress"].entries
S_t_p_all = solution["X-averaged positive particle surface tangential stress"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, S_t_n_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative tangential stress/ $E_n$")
ax2.plot(t_all, S_t_p_all, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive tangential stress/ $E_p$")
f.tight_layout(pad=3.0)
plt.show()
k1 = 1e-4
k2 = 1e-3
k3 = 1e-2
param.update({"Positive electrode LAM constant propotional term": k2})
param.update({"Negative electrode LAM constant propotional term": k2})
sim2 = pybamm.Simulation(
model,
experiment=experiment,
parameter_values=param,
solver=pybamm.CasadiSolver("fast with events"),
)
solution2 = sim2.solve(calc_esoh=False)
param.update({"Positive electrode LAM constant propotional term": k3})
param.update({"Negative electrode LAM constant propotional term": k3})
sim3 = pybamm.Simulation(
model,
experiment=experiment,
parameter_values=param,
solver=pybamm.CasadiSolver("fast with events"),
)
solution3 = sim3.solve(calc_esoh=False)
t_all2 = solution2["Time [h]"].entries
t_all3 = solution3["Time [h]"].entries
LAM_n_all2 = solution2["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all2 = solution2["X-averaged positive electrode active material volume fraction"].entries
LAM_n_all3 = solution3["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all3 = solution3["X-averaged positive electrode active material volume fraction"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, LAM_n_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, LAM_n_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, LAM_n_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode active material volume fraction")
ax1.legend()
ax2.plot(t_all, LAM_p_all, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, LAM_p_all2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, LAM_p_all3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode active material volume fraction")
f.tight_layout(pad=3.0)
ax2.legend()
plt.show()
t_all2 = solution2["Time [h]"].entries
t_all3 = solution3["Time [h]"].entries
a_n_all = solution["X-averaged negative electrode surface area to volume ratio"].entries
a_p_all = solution["X-averaged positive electrode surface area to volume ratio"].entries
a_n_all2 = solution2["X-averaged negative electrode surface area to volume ratio"].entries
a_p_all2 = solution2["X-averaged positive electrode surface area to volume ratio"].entries
a_n_all3 = solution3["Negative electrode surface area to volume ratio"].entries[-1,:]
a_p_all3 = solution3["Positive electrode surface area to volume ratio"].entries[0,:]
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, a_n_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, a_n_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, a_n_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode surface area to volume ratio")
ax1.legend()
ax2.plot(t_all, a_p_all, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, a_p_all2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, a_p_all3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode surface area to volume ratio")
f.tight_layout(pad=3.0)
ax2.legend()
plt.show()
v_all = solution["Terminal voltage [V]"].entries
v_all2 = solution2["Terminal voltage [V]"].entries
v_all3 = solution3["Terminal voltage [V]"].entries
I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries
I_if_n2 = solution2["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p2 = solution2["Sum of x-averaged positive electrode interfacial current densities"].entries
I_if_n3 = solution3["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p3 = solution3["Sum of x-averaged positive electrode interfacial current densities"].entries
f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,5))
ax1.plot(t_all, v_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, v_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, v_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("Terminal voltage [V]")
#ax1.legend()
#ax1.set_xlim(0.5,0.8)
ax2.plot(t_all, I_if_n, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, I_if_n2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, I_if_n3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("Negative electrode interfacial current densities")
#ax2.legend()
#ax2.set_xlim(6000,7000)
ax2.set_ylim(2.2155,2.2165)
ax3.plot(t_all, I_if_p, label="k_LAM = "+ str(k1))
ax3.plot(t_all2, I_if_p2, label="k_LAM = "+ str(k2))
ax3.plot(t_all3, I_if_p3, label="k_LAM = "+ str(k3))
ax3.set_xlabel("Time [h]")
ax3.set_ylabel("Positive electrode interfacial current densities")
ax3.legend(bbox_to_anchor=(0.68, 1.3), ncol=2)
#ax3.set_xlim(2,2.8)
#ax3.set_ylim(2.492,2.494)
ax3.set_ylim(-2.494,-2.492)
plt.tight_layout(pad=1.0)
```
## References
The relevant papers for this notebook are:
```
pybamm.print_citations()
```
|
github_jupyter
|
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import os
import numpy as np
import matplotlib.pyplot as plt
os.chdir(pybamm.__path__[0]+'/..')
# Here the model is applicable to SPM, SPMe and DFN
model = pybamm.lithium_ion.DFN(
options=
{
"particle": "Fickian diffusion",
"SEI":"solvent-diffusion limited",
"SEI film resistance":"distributed",
"SEI porosity change":"false",
"particle mechanics":"swelling only",
"loss of active material":"stress-driven",
}
)
chemistry = pybamm.parameter_sets.Ai2020
param = pybamm.ParameterValues(chemistry=chemistry)
param.update({"Negative electrode LAM constant propotional term": 1e-4})
param.update({"Positive electrode LAM constant propotional term": 1e-4})
total_cycles = 2
experiment = pybamm.Experiment(
[
"Discharge at 1C until 3 V",
"Rest for 600 seconds",
"Charge at 1C until 4.2 V",
"Hold at 4.199 V for 600 seconds",
] * total_cycles
)
sim1 = pybamm.Simulation(
model,
experiment = experiment,
parameter_values = param,
solver = pybamm.CasadiSolver("fast with events")
)
solution = sim1.solve(calc_esoh=False)
t_all = solution["Time [h]"].entries
v_all = solution["Terminal voltage [V]"].entries
I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries
# ploting the results
f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,4))
ax1.plot(t_all, v_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("Terminal voltage [V]")
#ax1.legend()
ax2.plot(t_all, I_if_p, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("Positive electrode interfacial current densities")
#ax2.legend()
#ax2.set_xlim(6000,7000)
ax3.plot(t_all, I_if_n, label="loss of active material model")
ax3.set_xlabel("Time [h]")
ax3.set_ylabel("Negative electrode interfacial current densities")
ax3.legend(bbox_to_anchor=(1, 1.2))
#ax3.set_xlim(10000,15000)
# f.tight_layout(pad=1.0)
plt.show()
LAM_n_all = solution["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all = solution["X-averaged positive electrode active material volume fraction"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, LAM_n_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode active material volume fraction")
ax2.plot(t_all, LAM_p_all, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode active material volume fraction")
f.tight_layout(pad=3.0)
plt.show()
S_t_n_all = solution["X-averaged negative particle surface tangential stress"].entries
S_t_p_all = solution["X-averaged positive particle surface tangential stress"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, S_t_n_all, label="loss of active material model")
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative tangential stress/ $E_n$")
ax2.plot(t_all, S_t_p_all, label="loss of active material model")
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive tangential stress/ $E_p$")
f.tight_layout(pad=3.0)
plt.show()
k1 = 1e-4
k2 = 1e-3
k3 = 1e-2
param.update({"Positive electrode LAM constant propotional term": k2})
param.update({"Negative electrode LAM constant propotional term": k2})
sim2 = pybamm.Simulation(
model,
experiment=experiment,
parameter_values=param,
solver=pybamm.CasadiSolver("fast with events"),
)
solution2 = sim2.solve(calc_esoh=False)
param.update({"Positive electrode LAM constant propotional term": k3})
param.update({"Negative electrode LAM constant propotional term": k3})
sim3 = pybamm.Simulation(
model,
experiment=experiment,
parameter_values=param,
solver=pybamm.CasadiSolver("fast with events"),
)
solution3 = sim3.solve(calc_esoh=False)
t_all2 = solution2["Time [h]"].entries
t_all3 = solution3["Time [h]"].entries
LAM_n_all2 = solution2["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all2 = solution2["X-averaged positive electrode active material volume fraction"].entries
LAM_n_all3 = solution3["X-averaged negative electrode active material volume fraction"].entries
LAM_p_all3 = solution3["X-averaged positive electrode active material volume fraction"].entries
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, LAM_n_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, LAM_n_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, LAM_n_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode active material volume fraction")
ax1.legend()
ax2.plot(t_all, LAM_p_all, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, LAM_p_all2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, LAM_p_all3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode active material volume fraction")
f.tight_layout(pad=3.0)
ax2.legend()
plt.show()
t_all2 = solution2["Time [h]"].entries
t_all3 = solution3["Time [h]"].entries
a_n_all = solution["X-averaged negative electrode surface area to volume ratio"].entries
a_p_all = solution["X-averaged positive electrode surface area to volume ratio"].entries
a_n_all2 = solution2["X-averaged negative electrode surface area to volume ratio"].entries
a_p_all2 = solution2["X-averaged positive electrode surface area to volume ratio"].entries
a_n_all3 = solution3["Negative electrode surface area to volume ratio"].entries[-1,:]
a_p_all3 = solution3["Positive electrode surface area to volume ratio"].entries[0,:]
f, (ax1, ax2) = plt.subplots(1, 2 ,figsize=(10,4))
ax1.plot(t_all, a_n_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, a_n_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, a_n_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("X-averaged negative electrode surface area to volume ratio")
ax1.legend()
ax2.plot(t_all, a_p_all, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, a_p_all2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, a_p_all3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("X-averaged positive electrode surface area to volume ratio")
f.tight_layout(pad=3.0)
ax2.legend()
plt.show()
v_all = solution["Terminal voltage [V]"].entries
v_all2 = solution2["Terminal voltage [V]"].entries
v_all3 = solution3["Terminal voltage [V]"].entries
I_if_n = solution["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p = solution["Sum of x-averaged positive electrode interfacial current densities"].entries
I_if_n2 = solution2["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p2 = solution2["Sum of x-averaged positive electrode interfacial current densities"].entries
I_if_n3 = solution3["Sum of x-averaged negative electrode interfacial current densities"].entries
I_if_p3 = solution3["Sum of x-averaged positive electrode interfacial current densities"].entries
f, (ax1, ax2, ax3) = plt.subplots(1, 3 ,figsize=(18,5))
ax1.plot(t_all, v_all, label="k_LAM = "+ str(k1))
ax1.plot(t_all2, v_all2, label="k_LAM = "+ str(k2))
ax1.plot(t_all3, v_all3, label="k_LAM = "+ str(k3))
ax1.set_xlabel("Time [h]")
ax1.set_ylabel("Terminal voltage [V]")
#ax1.legend()
#ax1.set_xlim(0.5,0.8)
ax2.plot(t_all, I_if_n, label="k_LAM = "+ str(k1))
ax2.plot(t_all2, I_if_n2, label="k_LAM = "+ str(k2))
ax2.plot(t_all3, I_if_n3, label="k_LAM = "+ str(k3))
ax2.set_xlabel("Time [h]")
ax2.set_ylabel("Negative electrode interfacial current densities")
#ax2.legend()
#ax2.set_xlim(6000,7000)
ax2.set_ylim(2.2155,2.2165)
ax3.plot(t_all, I_if_p, label="k_LAM = "+ str(k1))
ax3.plot(t_all2, I_if_p2, label="k_LAM = "+ str(k2))
ax3.plot(t_all3, I_if_p3, label="k_LAM = "+ str(k3))
ax3.set_xlabel("Time [h]")
ax3.set_ylabel("Positive electrode interfacial current densities")
ax3.legend(bbox_to_anchor=(0.68, 1.3), ncol=2)
#ax3.set_xlim(2,2.8)
#ax3.set_ylim(2.492,2.494)
ax3.set_ylim(-2.494,-2.492)
plt.tight_layout(pad=1.0)
pybamm.print_citations()
| 0.638046 | 0.873161 |
# Homework4
Words before the real parts beginning:
- `CMakeLists.txt` enables the google tests and the compiler Sanitizer. But I use this cmake **only in delevepment**, so not for evaluation.
- As said, Unit tests (google tests) can be found in: `Tester.cpp`, `TestAggregation.cpp`, `TestDictionaryCompression.cpp`. Use cmake to build them and run them.
- I did some basic c++ code refactoring but not complete. The code style is still not cool, such as functions defined in the header files.
- I only use the given makefile for evaluation. For aforementioned reasons, I did tiny change to makefile to fit my code. But the change is not performance-critical at all.
- I did not cover all the (annoying) edge cases of SIMD programming, thus as to load aligned memory address, scalar code for the very remaining part. The reason I did hard coded them, is just I saw some comments hinting us to make some assumptions about the datasets. I have taken care of some edge cases, but I still believe my implementation can't handle arbitrary size of datasets.
# Aggregation
We are simulating following SQL in C++. We only have to take care of the counting number, so no materialization, data copying.
```sql
SELECT COUNT(*)
FROM R
WHERE R.a > 42
```
The dataset is conducted into two cases: `int8_t` and `int64_t`. For each case we perform 3 types of counting: trivial counting, branch-free counting, SIMD counting:
- `int8_t`
- count8
- count8BrFree
- count8SIMD
- `int64_t`
- count64
- count64BrFree
- count64SIMD
## 1. Compiler Auto-Vectorization on different optimization flags
GCC 9.2.0 for count8`:
- `-O0`: Trivial code and with branch.
- `-O1` and `-O2`:
- https://compiler.db.in.tum.de/z/tapZ2e and https://compiler.db.in.tum.de/z/S65gHh
- We can see from the generated assembly, that the code is optimized into a branch-free version. In addition, these two flags differ not so much.
- Zoom in:
```asm
cmp BYTE PTR [rcx], dl
setl sil
movzx esi, sil
add eax, esi
```
- Interpretation of the `Zoom in`: The addition operation doesn't care about the result of `cmp` and **just add anyway**.
- `-O3`:
- https://compiler.db.in.tum.de/z/wgntZh
- The generated assembly is much much sophisticated and not generally readable. But easy to find `xmm` such registers, which means the code is SIMD styled. So GCC is assumed that all the CPU has `xmm` and corresponding instructions.
- `-O3 -fno-tree-vectorize`
- https://compiler.db.in.tum.de/z/Y0U5Au
- `-fno-tree-vectorize` can avoid the Compiler Auto-Vectorization. So the generated assembly is again readable. Without a surprise, the code is branch-free. An additional improvement is instruction reordering: the loop counter increment `add rdi, 1` is reordered into the predicate evaluation part. As learned in the lecture, so this approach can reduce the **data hazard**, so higher IPC is possible.
- `-O3 -march=skylake-avx512`
- https://compiler.db.in.tum.de/z/TipY86
- Giving more hint to the compiler about the CPU instructions, so it can generates code with `ymm`. So a step further into new SIMD instructions. But sill not with `zmm`.
I did check the Clang compiler. But nothing new and not so different as the results of GCC. So I do not duplicate the part of Clang. In addition, I also do not duplicate with `count64`.
<br>
<br>
---
## 2. Performance Comparison
- compile with `-O3`
- `static constexpr unsigned chunkSize = 32 * 1024;`
### `count8` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.01 | 37967780 | 75753929 | 298264 | 1186.00 | 4470.00 | 8460367.00| 2.00 | 4.49 |
| 10| 0.01 | 37944228 | 75759726 | 298640 | 44.00 | 4373.00 | 9296856.00| 2.00 | 4.08 |
| 50| 0.01 | 37929608 | 75753652 | 297388 | 2.00 | 4284.00 | 9293465.00| 2.00 | 4.08 |
| 90| 0.01 | 37929970 | 75753431 | 297536 | 2.00 | 4271.00 | 9292979.00| 2.00 | 4.08 |
| 99| 0.01 | 37929996 | 75757288 | 297449 | 13.00 | 4302.00 | 9293348.00| 2.00 | 4.08 |
### `count8BrFree` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.01 | 37937768.00 | 75754360.00 | 248891.00 | 141.00 | 4371.00 | 9286812.00 | 2.00 | 4.09 |
| 10 | 0.01 | 37936964.00 | 75756120.00 | 250062.00 | 31.00 | 4329.00 | 8551862.00 | 2.00 | 4.44 |
| 50 | 0.01 | 37947498.00 | 75757929.00 | 248265.00 | 29.00 | 4302.00 | 8683809.00 | 2.00 | 4.37 |
| 90 | 0.01 | 37933641.00 | 75754000.00 | 248694.00 | 7.00 | 4272.00 | 8570374.00 | 2.00 | 4.43 |
| 99 | 0.01 | 37936553.00 | 75751117.00 | 248884.00 | 2.00 | 4252.00 | 8570310.00 | 2.00 | 4.43 |
### `count8SIMD` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.00 | 5646344.00 | 19043315.00 | 256755.00 | 38.00 | 4212.00 | 1383754.00 | 3.37 | 4.08 |
| 10 | 0.00 | 5676919.00 | 19049035.00 | 256416.00 | 23.00 | 4242.00 | 1426374.00 | 3.36 | 3.98 |
| 50 | 0.00 | 5665713.00 | 19048964.00 | 258650.00 | 9.00 | 4226.00 | 1388184.00 | 3.36 | 4.08 |
| 90 | 0.00 | 5654067.00 | 19043315.00 | 256613.00 | 1.00 | 4199.00 | 1385808.00 | 3.37 | 4.08 |
| 99 | 0.00 | 5643771.00 | 19043315.00 | 256614.00 | 1.00 | 4196.00 | 1418537.00 | 3.37 | 3.98 |
<br>
The only thing that is different among selectivities is the **LLC-misses** within each function-performance-table. I can't fully understand what is the reason for that. First of all, I think it is non-trivial. But we are handling the experiment variable **selectivity** with all other variables same controlled. But the code with `-O3` is already branch-free. I try to build a 1 GiB dataset to see, if the things can change and last the program running more time.
### `count8` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.07| 303026454.00| 605887954.00| 2561410.00| 1081.00| 33475.00| 67607083.00| 2.00| 4.48 |
| 10| 0.07| 303079169.00| 605903523.00| 2510285.00| 64.00| 33459.00| 69049179.00| 2.00| 4.39 |
| 50| 0.07| 303024145.00| 605904251.00| 2492105.00| 24.00| 33426.00| 69104967.00| 2.00| 4.38 |
| 90| 0.07| 303041225.00| 605903970.00| 2517845.00| 45.00| 33400.00| 70093723.00| 2.00| 4.32 |
| 99| 0.07| 302975832.00| 605898334.00| 2465491.00| 12.00| 33342.00| 67570932.00| 2.00| 4.48 |
### `count8BrFree` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.07| 303069403.00| 605904571.00| 2370574.00| 162.00| 33569.00| 70088179.00| 2.00| 4.32 |
| 10| 0.07| 302953587.00| 605897569.00| 2370558.00| 43.00| 33391.00| 67589821.00| 2.00| 4.48 |
| 50| 0.07| 303010251.00| 605898885.00| 2370657.00| 20.00| 33374.00| 68960633.00| 2.00| 4.39 |
| 90| 0.07| 302940150.00| 605895134.00| 2370447.00| 11.00| 33297.00| 67517510.00| 2.00| 4.49 |
| 99| 0.07| 302998359.00| 605895551.00| 2370466.00| 45.00| 33357.00| 70757769.00| 2.00| 4.28 |
### `count8SIMD` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.01| 44026985.00| 152235981.00| 2377856.00| 30.00| 32948.00| 9969621.00| 3.46| 4.42 |
| 10| 0.01| 43959079.00| 152243450.00| 2435882.00| 19.00| 32972.00| 10797501.00| 3.46| 4.07 |
| 50| 0.01| 43981193.00| 152243436.00| 2446258.00| 5.00| 32966.00| 10649242.00| 3.46| 4.13 |
| 90| 0.01| 44287390.00| 152243371.00| 2377643.00| 3.00| 32956.00| 10860073.00| 3.44| 4.08 |
| 99| 0.01| 44206670.00| 152236117.00| 2390787.00| 6.00| 32933.00| 10125350.00| 3.44| 4.37 |
We observe same pattern within the each function's cases. For now, we see the branch-misses are a constant as well as the IPC, which results in a same run time for each function's cases. I can still no explain why the LLC or the memory access pattern differ, when the selectivity changes. I want to say something about pre-fetching, but it is still against the imagination in my mind. **I am not capable to solve it and conclude it into TODO&Question part.**
<br>
<br>
### `count64` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 74598307.00| 151106622.00| 16787016.00| 19264.00| 2075.00| 18561840.00| 2.03| 4.02 |
| 10| 0.02| 74337775.00| 151093901.00| 16786607.00| 20.00| 1995.00| 18481213.00| 2.03| 4.02 |
| 50| 0.02| 77126731.00| 151102084.00| 16786862.00| 10.00| 2004.00| 17790253.00| 1.96| 4.34 |
| 90| 0.02| 76202818.00| 151093920.00| 16786599.00| 5.00| 1931.00| 18193870.00| 1.98| 4.19 |
| 99| 0.02| 75836536.00| 151099305.00| 16786830.00| 1.00| 1929.00| 18017938.00| 1.99| 4.21 |
### `count64BrFree` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 75551472.00| 151102283.00| 16787897.00| 1071.00| 1608.00| 17928791.00| 2.00| 4.21 |
| 10| 0.02| 75320401.00| 151091230.00| 16787562.00| 8.00| 1558.00| 17948005.00| 2.01| 4.20 |
| 50| 0.02| 74095232.00| 151099346.00| 16787844.00| 9.00| 1575.00| 18338945.00| 2.04| 4.04 |
| 90| 0.02| 75269268.00| 151093596.00| 16787639.00| 21.00| 1566.00| 18113362.00| 2.01| 4.16 |
| 99| 0.02| 74556385.00| 151093581.00| 16787597.00| 2.00| 1566.00| 18416177.00| 2.03| 4.05 |
### `count64SIMD` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 72763197.00| 167859379.00| 16785832.00| 45.00| 2278.00| 17557114.00| 2.31| 4.14 |
| 10| 0.02| 71748000.00| 167855643.00| 16785664.00| 23.00| 2311.00| 17694600.00| 2.34| 4.05 |
| 50| 0.02| 72700947.00| 167850905.00| 16785493.00| 8.00| 2257.00| 17288096.00| 2.31| 4.21 |
| 90| 0.02| 72470870.00| 167859045.00| 16785841.00| 4.00| 2319.00| 17197390.00| 2.32| 4.21 |
| 99| 0.02| 72723105.00| 167853672.00| 16785593.00| 4.00| 2283.00| 17385915.00| 2.31| 4.18 |
So the case with `int64_t` has a larger dataset size (8x). So more stress is put on the memory bus and generally more LLC-misses are conducted. But the pattern remains as the `int8_t` case.
---
# Dictionary Decompression
## Introduction
I did implemented a precomputed lookup table (PTB) to decode a **Byte** instead of a nibble, which is a standard SIMD programming approach. So following code shows, the size of the PTB is `sizeof(int64_t) * 256 = 1024 Bytes`, which is ideal to fit into L1 cache. It can also be some PTB better that this trivial one, but I did not go deep into this part.
```c++
constexpr std::array<int32_t, 16> nibble_dict = {100, 101, 102, 103,
200, 201, 202, 203,
300, 301, 302, 303,
400, 401, 402, 403};
constexpr std::array<int64_t, 256> fill_byte_dict() {
std::array<int64_t, 256> byte_dict{0};
for (size_t i = 0; i < 256; i++) {
// Non-Trivial: Assumption: Little-Endian
byte_dict[i] = (static_cast<int64_t>(nibble_dict[i & 0b1111]) << 32) + nibble_dict[(i >> 4) & 0b1111];
}
return byte_dict;
}
constexpr std::array<int64_t, 256> byte_dict = fill_byte_dict();
```
With such PTB, it is no necessary to pick out each nibble and decode individually, I can just optimize the given scalar function with this PTB, the work inside the function is greatly reduced and I can expect a much higher IPC from code helped by PTB:
```c++
void dictDecompress_BYTE(uint8_t *in, uint32_t inCount, int32_t *out) {
int64_t *out_64 = reinterpret_cast<int64_t*>(out);
for (uint32_t i = 0; i < inCount; i++) {
out_64[i] = byte_dict[in[i]]; // This line is just a single copy XD
}
}
```
Not surprisely, this PTB can be also helpful to optimize other to-be-implemented functions. To handle a single byte as an entity is good way to reduce the unnecesary bit operation. But the last function `dictDecompressPermute` is completely un-relevant with PTB, since it is required to store the dictionary in a SIMD register. Under this requirement, only the given `nibble_dict` fits into SIMD register. The PTB can only live in L1-cache, but no in a single tiny register. So the last function is coming with native bit operations.
<br>
I list the functions to be evaluted:
- dictDecompress
- dictDecompress_BYTE
- dictDecompress8
- dictDecompressGather
- dictDecompressPermute
## Evaluation
- `static constexpr unsigned chunkSize = 6 * 1024;`
|Function | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| dictDecompress| 0.95| 4227264116.00| 12610021897.00| 10992.00| 1235.00| 1234282.00| 945615877.00| 2.98|4.47 |
| dictDecompress_BYTE| 0.30| 1335041763.00| 5906043128.00| 4448.00| 250.00| 1230785.00| 300948618.00|4.42| 4.44 |
| dictDecompress8| 0.31| 1369582142.00| 3814647224.00| 6016.00| 159.00| 1231228.00| 308940631.00|2.79| 4.43 |
| dictDecompressGather| 0.38| 1695602737.00| 3614431670.00| 5015.00| 41.00| 1231441.00| 379709796.00|2.13| 4.47 |
| dictDecompressPermute| 0.75| 3358729931.00| 7382434310.00| 8557.00| 156.00| 1233481.00| 750297263.00| 2.20|4.48 |
The SIMD versions can optimize the runtime a lot. As said before (and refer to code) the `dictDecompressPermute` is unpacking values into nibbles. In addition the `*_permute_*` instructions need multiple load to do the work and has longer latency.
As expected, the function `dictDecompress_BYTE` dominates with top performance and ultimate IPC and L1-cache-hit-ratio. I list the assembly here: https://compiler.db.in.tum.de/z/nmSJ_Q. As said we do nothing more than data-copying, which is reflected in **only** assembly `mov` instruction. This time I don't throw more insight into SIMD versions. They are good but not optimal to solve this too-simplified mock-up problem. In a more realistic problem, we have to integrate the PTB and SIMD together to do less unnecessary runtime computation.
# TODO & Quesstion:
1. Is the code with `zmm` better than `ymm`? To avoid the CPU to become too hot?
- The former one use `ymm` and the last one use `zmm`, which is a surprise for me.
- SIMD code with `-O3 -march=skylake-avx512`: https://compiler.db.in.tum.de/z/dKxLUt
- SIMD code with `-O0 -march=skylake-avx512`: https://compiler.db.in.tum.de/z/UuA7Gb
2. Why not to generate with `zmm`, since the CPU is capable for AVX512?
- Scalar code with `-O3 -march=skylake-avx512`: https://compiler.db.in.tum.de/z/TipY86
3. To be honest, I don't get it with the `chunkSize` variable? Why we need it? Why does it have impact on performance or does it have?
4. The **LLC-misses** issue in the aggregation part. I feel it is just too easy to see the answer or it is something that I never know. So two extreme cases.
# Appendix
## Experiment Environment
- All the experiments are performed on a Core i9-7900X - Intel: https://en.wikichip.org/wiki/intel/core_i9/i9-7900x
## Aggregation with `-O1` and `-O2`:
I see this performance result less valuable to study. So I do not pay attention to them and list them as **raw data and back-up** (if necessary, I can process quickly them with `grep`, `sed` to generate the similar markdown table above):
- `-O1`:
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 1, 0.06, 287754790.00, 939635722.00, 2101705.00, 29105.00, 963.00, 64318743.00, 1, 3.27, 1.00, 4.47
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 1, 0.07, 287788349.00, 939637852.00, 2101547.00, 130.00, 904.00, 65339314.00, 1, 3.27, 1.00, 4.40
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 1, 0.00, 7275923.00, 18896781.00, 2098081.00, 31.00, 260.00, 1746311.00, 1, 2.60, 1.00, 4.17
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 10, 0.07, 287776338.00, 939647803.00, 2101982.00, 128.00, 1079.00, 66631973.00, 1, 3.27, 1.00, 4.32
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 10, 0.07, 287800581.00, 939655430.00, 2101790.00, 172.00, 913.00, 65015405.00, 1, 3.26, 1.00, 4.43
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 10, 0.00, 7170435.00, 18896788.00, 2098160.00, 25.00, 358.00, 1723214.00, 1, 2.64, 1.00, 4.16
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 50, 0.07, 287854231.00, 939647118.00, 2101995.00, 175.00, 909.00, 67587460.00, 1, 3.26, 1.00, 4.26
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 50, 0.07, 287958053.00, 939643469.00, 2101858.00, 81.00, 872.00, 66568401.00, 1, 3.26, 1.00, 4.33
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 50, 0.00, 7190618.00, 18896764.00, 2098075.00, 8.00, 274.00, 1726542.00, 1, 2.63, 1.00, 4.16
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 90, 0.07, 287768579.00, 939639849.00, 2101758.00, 32.00, 866.00, 66975877.00, 1, 3.27, 1.00, 4.30
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 90, 0.06, 287550772.00, 939635535.00, 2101408.00, 25.00, 779.00, 64607587.00, 1, 3.27, 1.00, 4.45
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 90, 0.00, 7308862.00, 18904281.00, 2098406.00, 24.00, 302.00, 1825409.00, 1, 2.59, 1.00, 4.00
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 99, 0.07, 287739603.00, 939648638.00, 2102023.00, 64.00, 1011.00, 66763662.00, 1, 3.27, 1.00, 4.31
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 99, 0.06, 287782839.00, 939637066.00, 2101378.00, 24.00, 828.00, 64074377.00, 1, 3.27, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 99, 0.00, 7120195.00, 18904233.00, 2098407.00, 14.00, 307.00, 1779185.00, 1, 2.66, 1.00, 4.00
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 1, 0.07, 291492978.00, 939676261.00, 16787623.00, 19164.00, 1657.00, 66813087.00, 1, 3.22, 1.00, 4.36
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 1, 0.07, 291257921.00, 939677404.00, 16788597.00, 1385.00, 2286.00, 66395047.00, 1, 3.23, 1.00, 4.39
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 1, 0.02, 65977353.00, 167856185.00, 16785407.00, 27.00, 2312.00, 15674289.00, 1, 2.54, 1.00, 4.21
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 10, 0.07, 291207280.00, 939672614.00, 16787532.00, 16.00, 1618.00, 65481997.00, 1, 3.23, 1.00, 4.45
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 10, 0.07, 291096892.00, 939675160.00, 16788553.00, 12.00, 2125.00, 67539328.00, 1, 3.23, 1.00, 4.31
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 10, 0.02, 67047079.00, 167856207.00, 16785389.00, 7.00, 2285.00, 15028130.00, 1, 2.50, 1.00, 4.46
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 50, 0.07, 291071616.00, 939672641.00, 16787555.00, 23.00, 1599.00, 67395253.00, 1, 3.23, 1.00, 4.32
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 50, 0.06, 291313468.00, 939679161.00, 16788604.00, 34.00, 2445.00, 64860159.00, 1, 3.23, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 50, 0.02, 66057946.00, 167857828.00, 16785466.00, 7.00, 2362.00, 16155568.00, 1, 2.54, 1.00, 4.09
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 90, 0.06, 291162005.00, 939676496.00, 16787697.00, 20.00, 1668.00, 64833842.00, 1, 3.23, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 90, 0.07, 291741105.00, 939795743.00, 16793996.00, 1445.00, 3812.00, 66927604.00, 1, 3.22, 1.00, 4.36
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 90, 0.02, 67541670.00, 168106513.00, 16795301.00, 1115.00, 3469.00, 15658681.00, 1, 2.49, 1.00, 4.31
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 99, 0.07, 291419705.00, 939722791.00, 16789983.00, 320.00, 2228.00, 67215480.00, 1, 3.22, 1.00, 4.34
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 99, 0.07, 291210544.00, 939674509.00, 16788492.00, 19.00, 2639.00, 65623097.00, 1, 3.23, 1.00, 4.44
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 99, 0.02, 66126737.00, 167856162.00, 16785401.00, 15.00, 2290.00, 16171263.00, 1, 2.54, 1.00, 4.09
- `-O2`
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 1, 0.06, 257698663.00, 939628827.00, 2101514.00, 29362.00, 878.00, 57267736.00, 1, 3.65, 1.00, 4.50
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 1, 0.06, 257619479.00, 939631538.00, 2101497.00, 149.00, 876.00, 59978447.00, 1, 3.65, 1.00, 4.30
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 1, 0.00, 11235407.00, 18896506.00, 2098233.00, 31.00, 373.00, 2625342.00, 1, 1.68, 1.00, 4.28
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 10, 0.06, 257635474.00, 939637009.00, 2101738.00, 69.00, 845.00, 60311466.00, 1, 3.65, 1.00, 4.27
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 10, 0.06, 257479421.00, 939630322.00, 2101369.00, 52.00, 785.00, 58535684.00, 1, 3.65, 1.00, 4.40
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 10, 0.00, 11301605.00, 18903991.00, 2098555.00, 43.00, 542.00, 2660121.00, 1, 1.67, 1.00, 4.25
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 50, 0.06, 257589881.00, 939634320.00, 2101539.00, 51.00, 874.00, 60225943.00, 1, 3.65, 1.00, 4.28
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 50, 0.06, 257629142.00, 939632597.00, 2101384.00, 38.00, 797.00, 57373909.00, 1, 3.65, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 50, 0.00, 10925578.00, 18904045.00, 2098542.00, 17.00, 405.00, 2707304.00, 1, 1.73, 1.00, 4.04
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 90, 0.06, 257659199.00, 939633177.00, 2101532.00, 24.00, 829.00, 58563195.00, 1, 3.65, 1.00, 4.40
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 90, 0.06, 257458904.00, 939626627.00, 2101204.00, 16.00, 751.00, 58397800.00, 1, 3.65, 1.00, 4.41
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 90, 0.00, 10766972.00, 18896506.00, 2098255.00, 15.00, 358.00, 2668497.00, 1, 1.76, 1.00, 4.03
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8, 99, 0.06, 257608222.00, 939626854.00, 2101261.00, 36.00, 728.00, 58701274.00, 1, 3.65, 1.00, 4.39
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8BrFree, 99, 0.06, 257316207.00, 939628965.00, 2101279.00, 66.00, 882.00, 58865193.00, 1, 3.65, 1.00, 4.37
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count8SIMD, 99, 0.00, 11226295.00, 18896489.00, 2098234.00, 7.00, 360.00, 2623890.00, 1, 1.68, 1.00, 4.28
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 1, 0.06, 274453210.00, 939886928.00, 16798913.00, 21284.00, 4585.00, 64520744.00, 1, 3.42, 1.00, 4.25
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 1, 0.06, 273671301.00, 939666792.00, 16789239.00, 549.00, 2587.00, 60938102.00, 1, 3.43, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 1, 0.02, 78031836.00, 151078354.00, 16785328.00, 48.00, 1285.00, 18431284.00, 1, 1.94, 1.00, 4.23
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 10, 0.06, 273582569.00, 939661699.00, 16788129.00, 87.00, 2590.00, 61939133.00, 1, 3.43, 1.00, 4.42
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 10, 0.06, 273395829.00, 939659073.00, 16789066.00, 67.00, 2528.00, 63699046.00, 1, 3.44, 1.00, 4.29
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 10, 0.02, 77870877.00, 151075372.00, 16785301.00, 20.00, 1257.00, 18375350.00, 1, 1.94, 1.00, 4.24
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 50, 0.07, 273835495.00, 939768240.00, 16793215.00, 414.00, 3419.00, 65133328.00, 1, 3.43, 1.00, 4.20
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 50, 0.06, 273657706.00, 939670332.00, 16789396.00, 54.00, 2619.00, 60936018.00, 1, 3.43, 1.00, 4.49
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 50, 0.02, 76236858.00, 151083471.00, 16785585.00, 13.00, 1304.00, 18636876.00, 1, 1.98, 1.00, 4.09
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 90, 0.06, 273597355.00, 939665698.00, 16788226.00, 77.00, 2611.00, 62100741.00, 1, 3.43, 1.00, 4.41
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 90, 0.06, 273525210.00, 939671119.00, 16789468.00, 40.00, 2672.00, 63595816.00, 1, 3.44, 1.00, 4.30
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 90, 0.02, 78198494.00, 151082048.00, 16785548.00, 53.00, 1309.00, 18416917.00, 1, 1.93, 1.00, 4.25
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64, 99, 0.06, 273549464.00, 939659512.00, 16788050.00, 40.00, 2547.00, 62205638.00, 1, 3.44, 1.00, 4.40
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64BrFree, 99, 0.06, 273578885.00, 939659786.00, 16789064.00, 55.00, 2569.00, 62368798.00, 1, 3.43, 1.00, 4.39
name, selectivity, time sec, cycles, instructions, L1-misses, LLC-misses, branch-misses, task-clock, scale, IPC, CPUs, GHz
count64SIMD, 99, 0.02, 77774420.00, 151074873.00, 16785304.00, 16.00, 1258.00, 18542856.00, 1, 1.94, 1.00, 4.19
|
github_jupyter
|
SELECT COUNT(*)
FROM R
WHERE R.a > 42
cmp BYTE PTR [rcx], dl
setl sil
movzx esi, sil
add eax, esi
```
- Interpretation of the `Zoom in`: The addition operation doesn't care about the result of `cmp` and **just add anyway**.
- `-O3`:
- https://compiler.db.in.tum.de/z/wgntZh
- The generated assembly is much much sophisticated and not generally readable. But easy to find `xmm` such registers, which means the code is SIMD styled. So GCC is assumed that all the CPU has `xmm` and corresponding instructions.
- `-O3 -fno-tree-vectorize`
- https://compiler.db.in.tum.de/z/Y0U5Au
- `-fno-tree-vectorize` can avoid the Compiler Auto-Vectorization. So the generated assembly is again readable. Without a surprise, the code is branch-free. An additional improvement is instruction reordering: the loop counter increment `add rdi, 1` is reordered into the predicate evaluation part. As learned in the lecture, so this approach can reduce the **data hazard**, so higher IPC is possible.
- `-O3 -march=skylake-avx512`
- https://compiler.db.in.tum.de/z/TipY86
- Giving more hint to the compiler about the CPU instructions, so it can generates code with `ymm`. So a step further into new SIMD instructions. But sill not with `zmm`.
I did check the Clang compiler. But nothing new and not so different as the results of GCC. So I do not duplicate the part of Clang. In addition, I also do not duplicate with `count64`.
<br>
<br>
---
## 2. Performance Comparison
- compile with `-O3`
- `static constexpr unsigned chunkSize = 32 * 1024;`
### `count8` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.01 | 37967780 | 75753929 | 298264 | 1186.00 | 4470.00 | 8460367.00| 2.00 | 4.49 |
| 10| 0.01 | 37944228 | 75759726 | 298640 | 44.00 | 4373.00 | 9296856.00| 2.00 | 4.08 |
| 50| 0.01 | 37929608 | 75753652 | 297388 | 2.00 | 4284.00 | 9293465.00| 2.00 | 4.08 |
| 90| 0.01 | 37929970 | 75753431 | 297536 | 2.00 | 4271.00 | 9292979.00| 2.00 | 4.08 |
| 99| 0.01 | 37929996 | 75757288 | 297449 | 13.00 | 4302.00 | 9293348.00| 2.00 | 4.08 |
### `count8BrFree` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.01 | 37937768.00 | 75754360.00 | 248891.00 | 141.00 | 4371.00 | 9286812.00 | 2.00 | 4.09 |
| 10 | 0.01 | 37936964.00 | 75756120.00 | 250062.00 | 31.00 | 4329.00 | 8551862.00 | 2.00 | 4.44 |
| 50 | 0.01 | 37947498.00 | 75757929.00 | 248265.00 | 29.00 | 4302.00 | 8683809.00 | 2.00 | 4.37 |
| 90 | 0.01 | 37933641.00 | 75754000.00 | 248694.00 | 7.00 | 4272.00 | 8570374.00 | 2.00 | 4.43 |
| 99 | 0.01 | 37936553.00 | 75751117.00 | 248884.00 | 2.00 | 4252.00 | 8570310.00 | 2.00 | 4.43 |
### `count8SIMD` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1 | 0.00 | 5646344.00 | 19043315.00 | 256755.00 | 38.00 | 4212.00 | 1383754.00 | 3.37 | 4.08 |
| 10 | 0.00 | 5676919.00 | 19049035.00 | 256416.00 | 23.00 | 4242.00 | 1426374.00 | 3.36 | 3.98 |
| 50 | 0.00 | 5665713.00 | 19048964.00 | 258650.00 | 9.00 | 4226.00 | 1388184.00 | 3.36 | 4.08 |
| 90 | 0.00 | 5654067.00 | 19043315.00 | 256613.00 | 1.00 | 4199.00 | 1385808.00 | 3.37 | 4.08 |
| 99 | 0.00 | 5643771.00 | 19043315.00 | 256614.00 | 1.00 | 4196.00 | 1418537.00 | 3.37 | 3.98 |
<br>
The only thing that is different among selectivities is the **LLC-misses** within each function-performance-table. I can't fully understand what is the reason for that. First of all, I think it is non-trivial. But we are handling the experiment variable **selectivity** with all other variables same controlled. But the code with `-O3` is already branch-free. I try to build a 1 GiB dataset to see, if the things can change and last the program running more time.
### `count8` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.07| 303026454.00| 605887954.00| 2561410.00| 1081.00| 33475.00| 67607083.00| 2.00| 4.48 |
| 10| 0.07| 303079169.00| 605903523.00| 2510285.00| 64.00| 33459.00| 69049179.00| 2.00| 4.39 |
| 50| 0.07| 303024145.00| 605904251.00| 2492105.00| 24.00| 33426.00| 69104967.00| 2.00| 4.38 |
| 90| 0.07| 303041225.00| 605903970.00| 2517845.00| 45.00| 33400.00| 70093723.00| 2.00| 4.32 |
| 99| 0.07| 302975832.00| 605898334.00| 2465491.00| 12.00| 33342.00| 67570932.00| 2.00| 4.48 |
### `count8BrFree` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.07| 303069403.00| 605904571.00| 2370574.00| 162.00| 33569.00| 70088179.00| 2.00| 4.32 |
| 10| 0.07| 302953587.00| 605897569.00| 2370558.00| 43.00| 33391.00| 67589821.00| 2.00| 4.48 |
| 50| 0.07| 303010251.00| 605898885.00| 2370657.00| 20.00| 33374.00| 68960633.00| 2.00| 4.39 |
| 90| 0.07| 302940150.00| 605895134.00| 2370447.00| 11.00| 33297.00| 67517510.00| 2.00| 4.49 |
| 99| 0.07| 302998359.00| 605895551.00| 2370466.00| 45.00| 33357.00| 70757769.00| 2.00| 4.28 |
### `count8SIMD` with varying selectivity with 1GiB
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.01| 44026985.00| 152235981.00| 2377856.00| 30.00| 32948.00| 9969621.00| 3.46| 4.42 |
| 10| 0.01| 43959079.00| 152243450.00| 2435882.00| 19.00| 32972.00| 10797501.00| 3.46| 4.07 |
| 50| 0.01| 43981193.00| 152243436.00| 2446258.00| 5.00| 32966.00| 10649242.00| 3.46| 4.13 |
| 90| 0.01| 44287390.00| 152243371.00| 2377643.00| 3.00| 32956.00| 10860073.00| 3.44| 4.08 |
| 99| 0.01| 44206670.00| 152236117.00| 2390787.00| 6.00| 32933.00| 10125350.00| 3.44| 4.37 |
We observe same pattern within the each function's cases. For now, we see the branch-misses are a constant as well as the IPC, which results in a same run time for each function's cases. I can still no explain why the LLC or the memory access pattern differ, when the selectivity changes. I want to say something about pre-fetching, but it is still against the imagination in my mind. **I am not capable to solve it and conclude it into TODO&Question part.**
<br>
<br>
### `count64` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 74598307.00| 151106622.00| 16787016.00| 19264.00| 2075.00| 18561840.00| 2.03| 4.02 |
| 10| 0.02| 74337775.00| 151093901.00| 16786607.00| 20.00| 1995.00| 18481213.00| 2.03| 4.02 |
| 50| 0.02| 77126731.00| 151102084.00| 16786862.00| 10.00| 2004.00| 17790253.00| 1.96| 4.34 |
| 90| 0.02| 76202818.00| 151093920.00| 16786599.00| 5.00| 1931.00| 18193870.00| 1.98| 4.19 |
| 99| 0.02| 75836536.00| 151099305.00| 16786830.00| 1.00| 1929.00| 18017938.00| 1.99| 4.21 |
### `count64BrFree` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 75551472.00| 151102283.00| 16787897.00| 1071.00| 1608.00| 17928791.00| 2.00| 4.21 |
| 10| 0.02| 75320401.00| 151091230.00| 16787562.00| 8.00| 1558.00| 17948005.00| 2.01| 4.20 |
| 50| 0.02| 74095232.00| 151099346.00| 16787844.00| 9.00| 1575.00| 18338945.00| 2.04| 4.04 |
| 90| 0.02| 75269268.00| 151093596.00| 16787639.00| 21.00| 1566.00| 18113362.00| 2.01| 4.16 |
| 99| 0.02| 74556385.00| 151093581.00| 16787597.00| 2.00| 1566.00| 18416177.00| 2.03| 4.05 |
### `count64SIMD` with varying selectivity
|Selectivity | Runtime | Cycles | Instructions | L1-misses | LLC-misses | Branch-misses | Task-clock | IPC | GHz |
|---|---|---|---|---|---|---|---|---|---|
| 1| 0.02| 72763197.00| 167859379.00| 16785832.00| 45.00| 2278.00| 17557114.00| 2.31| 4.14 |
| 10| 0.02| 71748000.00| 167855643.00| 16785664.00| 23.00| 2311.00| 17694600.00| 2.34| 4.05 |
| 50| 0.02| 72700947.00| 167850905.00| 16785493.00| 8.00| 2257.00| 17288096.00| 2.31| 4.21 |
| 90| 0.02| 72470870.00| 167859045.00| 16785841.00| 4.00| 2319.00| 17197390.00| 2.32| 4.21 |
| 99| 0.02| 72723105.00| 167853672.00| 16785593.00| 4.00| 2283.00| 17385915.00| 2.31| 4.18 |
So the case with `int64_t` has a larger dataset size (8x). So more stress is put on the memory bus and generally more LLC-misses are conducted. But the pattern remains as the `int8_t` case.
---
# Dictionary Decompression
## Introduction
I did implemented a precomputed lookup table (PTB) to decode a **Byte** instead of a nibble, which is a standard SIMD programming approach. So following code shows, the size of the PTB is `sizeof(int64_t) * 256 = 1024 Bytes`, which is ideal to fit into L1 cache. It can also be some PTB better that this trivial one, but I did not go deep into this part.
With such PTB, it is no necessary to pick out each nibble and decode individually, I can just optimize the given scalar function with this PTB, the work inside the function is greatly reduced and I can expect a much higher IPC from code helped by PTB:
| 0.781456 | 0.900179 |
```
!pip install wandb
!wandb login
import pickle
from sklearn.model_selection import train_test_split
from spacy.lang.en import English
import random
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pathlib import Path
import time
import wandb
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/Colab Notebooks/NLP/project/
# %mkdir NYTData
# !wget https://www.dropbox.com/sh/xu9tu5hmjhuddwk/AAD31tK6oEoGlhpRZzeu3Y3Ya/NYTcorpus_train.p.gz?dl=1 --directory-prefix=NYTData
# !gunzip -c NYTData/NYTcorpus_train.p.gz\?dl\=1 > NYTData/NYTcorpus_train.p
# %mkdir vector_cache
# !wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip --directory-prefix=vector_cache
# !unzip vector_cache/wiki-news-300d-1M.vec.zip -d vector_cache/
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
VOCAB_PREFIX = [PAD, UNK]
VEC_PATH = Path('vector_cache') / 'wiki-news-300d-1M.vec'
MAX_VOCAB = 25000
batch_size = 64
validation_split = .3
shuffle_dataset = True
random_seed = 42
class BaseVocab:
def __init__(self, data, lower=False):
self.data = data
self.lower = lower
self.build_vocab()
def normalize_unit(self, unit):
if self.lower:
return unit.lower()
else:
return unit
def unit2id(self, unit):
unit = self.normalize_unit(unit)
if unit in self._unit2id:
return self._unit2id[unit]
else:
return self._unit2id[UNK]
def id2unit(self, id):
return self._id2unit[id]
def map(self, units):
return [self.unit2id(unit) for unit in units]
def build_vocab(self):
NotImplementedError()
def __len__(self):
return len(self._unit2id)
class PretrainedWordVocab(BaseVocab):
def build_vocab(self):
self._id2unit = VOCAB_PREFIX + self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class LabelVocab(BaseVocab):
def build_vocab(self):
self._id2unit = self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class Pretrain:
def __init__(self, vec_filename, max_vocab=-1):
self._vec_filename = vec_filename
self._max_vocab = max_vocab
@property
def vocab(self):
if not hasattr(self, '_vocab'):
self._vocab, self._emb = self.read()
return self._vocab
@property
def emb(self):
if not hasattr(self, '_emb'):
self._vocab, self._emb = self.read()
return self._emb
def read(self):
if self._vec_filename is None:
raise Exception("Vector file is not provided.")
print(f"Reading pretrained vectors from {self._vec_filename}...")
words, emb, failed = self.read_from_file(self._vec_filename, open_func=open)
if failed > 0: # recover failure
emb = emb[:-failed]
if len(emb) - len(VOCAB_PREFIX) != len(words):
raise Exception("Loaded number of vectors does not match number of words.")
# Use a fixed vocab size
if self._max_vocab > len(VOCAB_PREFIX) and self._max_vocab < len(words):
words = words[:self._max_vocab - len(VOCAB_PREFIX)]
emb = emb[:self._max_vocab]
vocab = PretrainedWordVocab(words, lower=True)
print("Done Reading")
return vocab, emb
def read_from_file(self, filename, open_func=open):
"""
Open a vector file using the provided function and read from it.
"""
first = True
words = []
failed = 0
with open_func(filename, 'rb') as f:
for i, line in enumerate(f):
try:
line = line.decode()
except UnicodeDecodeError:
failed += 1
continue
if first:
# the first line contains the number of word vectors and the dimensionality
first = False
line = line.strip().split(' ')
rows, cols = [int(x) for x in line]
emb = np.zeros((rows + len(VOCAB_PREFIX), cols), dtype=np.float32)
continue
line = line.rstrip().split(' ')
emb[i+len(VOCAB_PREFIX)-1-failed, :] = [float(x) for x in line[-cols:]]
words.append(' '.join(line[:-cols]))
return words, emb, failed
class NYTDataSet(Dataset):
def __init__(self, vectorized_data):
self.data = vectorized_data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def pad_sequences(batch):
max_len = max([len(x[0]) for x in batch])
padded_sequences = torch.zeros((len(batch), max_len), dtype=torch.long)
labels = torch.zeros((len(batch), len(batch[0][1])), dtype=torch.float)
for i, sample in enumerate(batch):
padded_sequences[i, :len(sample[0])] = sample[0]
labels[i, :] = sample[1]
padded_sequences = padded_sequences.to(device)
labels = labels.to(device)
return padded_sequences, labels
pretrain = Pretrain(VEC_PATH, MAX_VOCAB)
nlp = English()
tokenizer = nlp.tokenizer
# Check if we are running on a CPU or GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
%%time
vectorized_train_data = torch.load('vectorized_10000_train.pt')
train_data = NYTDataSet(vectorized_data=vectorized_train_data)
n_training_samples=len(train_data)
n_training_samples
%%time
vectorized_test_data = torch.load('vectorized_10000_test.pt')
test_data = NYTDataSet(vectorized_data=vectorized_test_data)
print(len(test_data))
# Creating data indices for training and validation splits:
dataset_size = len(train_data)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, collate_fn=pad_sequences)
validation_loader = DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, collate_fn=pad_sequences)
test_loader = DataLoader(test_data, batch_size=batch_size, collate_fn=pad_sequences)
class CNN(nn.Module):
def __init__(self, pretrain, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim,
dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding.from_pretrained(
torch.from_numpy(pretrain.emb),
padding_idx=pad_idx,
freeze=True
)
self.convs = nn.ModuleList([
nn.Conv2d(in_channels = 1,
out_channels = n_filters,
kernel_size = (fs, embedding_dim))
for fs in filter_sizes
])
self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
#text = [batch size, sent len]
embedded = self.embedding(text)
#embedded = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
#embedded = [batch size, 1, sent len, emb dim]
conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
#conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]
pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]
#pooled_n = [batch size, n_filters]
cat = self.dropout(torch.cat(pooled, dim = 1))
#cat = [batch size, n_filters * len(filter_sizes)]
return self.fc(cat)
INPUT_DIM = len(pretrain.vocab)
EMBEDDING_DIM = pretrain.emb.shape[1]
N_FILTERS = 100
FILTER_SIZES = [3,4,5,6]
OUTPUT_DIM = 538
DROPOUT = 0.6
model = CNN(pretrain, INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_ID)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
run = wandb.init(
entity='ut-mit-news-classify',
project="NYT Multilabeling",
)
# Magic
wandb.watch(model)
def multi_label_accuracy(preds, y):
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
confusion_vector = rounded_preds / y
true_positives = torch.sum(confusion_vector==1)
false_positives = torch.sum(torch.isinf(confusion_vector))
false_negatives = torch.sum(confusion_vector==0)
true_negatives = torch.sum(torch.isnan(confusion_vector))
accuracy = (true_positives + true_negatives) / (true_positives + false_positives + false_negatives + true_negatives)
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f_score = (2 * precision * recall) / (precision + recall)
return accuracy, precision, recall, f_score
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
epoch_precision = 0
epoch_recall = 0
epoch_f_score = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch[0]).squeeze(1)
loss = criterion(predictions, batch[1])
acc, precision, recall, f_score = multi_label_accuracy(predictions, batch[1])
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_precision += precision.item()
epoch_recall += recall.item()
epoch_f_score += f_score.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator), \
epoch_precision / len(iterator), epoch_recall / len(iterator), \
epoch_f_score / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
epoch_precision = 0
epoch_recall = 0
epoch_f_score = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch[0]).squeeze(1)
loss = criterion(predictions, batch[1])
acc, precision, recall, f_score = multi_label_accuracy(predictions, batch[1])
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_precision += precision.item()
epoch_recall += recall.item()
epoch_f_score += f_score.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator), \
epoch_precision / len(iterator), epoch_recall / len(iterator), \
epoch_f_score / len(iterator)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
%%time
patience = 7
epochs_of_no_improvement = 0
best_valid_loss = float('inf')
wandb.config.early_stopping_patience = patience
wandb.config.training_samples=n_training_samples
model_file_name = f'nyt_cnn_classifier_trained_with_{n_training_samples}_samples.pt'
epoch = 0
while True:
start_time = time.time()
train_loss, train_acc, train_precision, train_recall, train_f_score \
= train(model, train_loader, optimizer, criterion)
valid_loss, valid_acc, valid_precision, valid_recall, valid_f_score \
= evaluate(model, validation_loader, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
print(f'New validation loss {valid_loss} is better than the best validation loss {best_valid_loss} so far.')
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_file_name)
epochs_of_no_improvement = 0
else:
epochs_of_no_improvement += 1
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | ' +
f'Train Precision: {train_precision*100:.2f}% | Train Recall: {train_recall*100:.2f}% | ' +
f'Train F1-score: {train_f_score*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}% | ' +
f'Val. Precision: {valid_precision*100:.2f}% | Val. Recall: {valid_recall*100:.2f}% | ' +
f'Val. F1-score: {valid_f_score*100:.2f}%')
wandb.log({"train_loss": train_loss,
"train_precision": train_precision,
"train_f_score": train_f_score,
"train_acc": train_acc,
"train_recall": train_recall,
"valid_loss": valid_loss,
"valid_acc": valid_acc,
"valid_precision": valid_precision,
"valid_recall": valid_recall,
"valid_f_score": valid_f_score
})
# check if the training should be stopped and then stop the training
if epochs_of_no_improvement == patience :
print(f'Early stopping, on epoch: {epoch+1}.')
break
epoch += 1
n_training_samples
model = CNN(pretrain, INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_ID)
model.load_state_dict(torch.load(model_file_name))
model = model.to(device)
start_time = time.time()
test_loss, test_acc, test_precision, test_recall, test_f_score \
= evaluate(model, test_loader, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: test | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTest Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | ' +
f'Test Precision: {test_precision*100:.2f}% | Test Recall: {test_recall*100:.2f}% | ' +
f'Test F1-score: {test_f_score*100:.2f}%')
wandb.log({"test_acc": test_acc,
"test_precision": test_precision,
"test_recall": test_recall,
"test_f_score": test_f_score
})
run.finish()
```
|
github_jupyter
|
!pip install wandb
!wandb login
import pickle
from sklearn.model_selection import train_test_split
from spacy.lang.en import English
import random
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pathlib import Path
import time
import wandb
from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/MyDrive/Colab Notebooks/NLP/project/
# %mkdir NYTData
# !wget https://www.dropbox.com/sh/xu9tu5hmjhuddwk/AAD31tK6oEoGlhpRZzeu3Y3Ya/NYTcorpus_train.p.gz?dl=1 --directory-prefix=NYTData
# !gunzip -c NYTData/NYTcorpus_train.p.gz\?dl\=1 > NYTData/NYTcorpus_train.p
# %mkdir vector_cache
# !wget https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip --directory-prefix=vector_cache
# !unzip vector_cache/wiki-news-300d-1M.vec.zip -d vector_cache/
PAD = '<PAD>'
PAD_ID = 0
UNK = '<UNK>'
UNK_ID = 1
VOCAB_PREFIX = [PAD, UNK]
VEC_PATH = Path('vector_cache') / 'wiki-news-300d-1M.vec'
MAX_VOCAB = 25000
batch_size = 64
validation_split = .3
shuffle_dataset = True
random_seed = 42
class BaseVocab:
def __init__(self, data, lower=False):
self.data = data
self.lower = lower
self.build_vocab()
def normalize_unit(self, unit):
if self.lower:
return unit.lower()
else:
return unit
def unit2id(self, unit):
unit = self.normalize_unit(unit)
if unit in self._unit2id:
return self._unit2id[unit]
else:
return self._unit2id[UNK]
def id2unit(self, id):
return self._id2unit[id]
def map(self, units):
return [self.unit2id(unit) for unit in units]
def build_vocab(self):
NotImplementedError()
def __len__(self):
return len(self._unit2id)
class PretrainedWordVocab(BaseVocab):
def build_vocab(self):
self._id2unit = VOCAB_PREFIX + self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class LabelVocab(BaseVocab):
def build_vocab(self):
self._id2unit = self.data
self._unit2id = {w:i for i, w in enumerate(self._id2unit)}
class Pretrain:
def __init__(self, vec_filename, max_vocab=-1):
self._vec_filename = vec_filename
self._max_vocab = max_vocab
@property
def vocab(self):
if not hasattr(self, '_vocab'):
self._vocab, self._emb = self.read()
return self._vocab
@property
def emb(self):
if not hasattr(self, '_emb'):
self._vocab, self._emb = self.read()
return self._emb
def read(self):
if self._vec_filename is None:
raise Exception("Vector file is not provided.")
print(f"Reading pretrained vectors from {self._vec_filename}...")
words, emb, failed = self.read_from_file(self._vec_filename, open_func=open)
if failed > 0: # recover failure
emb = emb[:-failed]
if len(emb) - len(VOCAB_PREFIX) != len(words):
raise Exception("Loaded number of vectors does not match number of words.")
# Use a fixed vocab size
if self._max_vocab > len(VOCAB_PREFIX) and self._max_vocab < len(words):
words = words[:self._max_vocab - len(VOCAB_PREFIX)]
emb = emb[:self._max_vocab]
vocab = PretrainedWordVocab(words, lower=True)
print("Done Reading")
return vocab, emb
def read_from_file(self, filename, open_func=open):
"""
Open a vector file using the provided function and read from it.
"""
first = True
words = []
failed = 0
with open_func(filename, 'rb') as f:
for i, line in enumerate(f):
try:
line = line.decode()
except UnicodeDecodeError:
failed += 1
continue
if first:
# the first line contains the number of word vectors and the dimensionality
first = False
line = line.strip().split(' ')
rows, cols = [int(x) for x in line]
emb = np.zeros((rows + len(VOCAB_PREFIX), cols), dtype=np.float32)
continue
line = line.rstrip().split(' ')
emb[i+len(VOCAB_PREFIX)-1-failed, :] = [float(x) for x in line[-cols:]]
words.append(' '.join(line[:-cols]))
return words, emb, failed
class NYTDataSet(Dataset):
def __init__(self, vectorized_data):
self.data = vectorized_data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def pad_sequences(batch):
max_len = max([len(x[0]) for x in batch])
padded_sequences = torch.zeros((len(batch), max_len), dtype=torch.long)
labels = torch.zeros((len(batch), len(batch[0][1])), dtype=torch.float)
for i, sample in enumerate(batch):
padded_sequences[i, :len(sample[0])] = sample[0]
labels[i, :] = sample[1]
padded_sequences = padded_sequences.to(device)
labels = labels.to(device)
return padded_sequences, labels
pretrain = Pretrain(VEC_PATH, MAX_VOCAB)
nlp = English()
tokenizer = nlp.tokenizer
# Check if we are running on a CPU or GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
%%time
vectorized_train_data = torch.load('vectorized_10000_train.pt')
train_data = NYTDataSet(vectorized_data=vectorized_train_data)
n_training_samples=len(train_data)
n_training_samples
%%time
vectorized_test_data = torch.load('vectorized_10000_test.pt')
test_data = NYTDataSet(vectorized_data=vectorized_test_data)
print(len(test_data))
# Creating data indices for training and validation splits:
dataset_size = len(train_data)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
valid_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(train_data, batch_size=batch_size, sampler=train_sampler, collate_fn=pad_sequences)
validation_loader = DataLoader(train_data, batch_size=batch_size, sampler=valid_sampler, collate_fn=pad_sequences)
test_loader = DataLoader(test_data, batch_size=batch_size, collate_fn=pad_sequences)
class CNN(nn.Module):
def __init__(self, pretrain, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim,
dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding.from_pretrained(
torch.from_numpy(pretrain.emb),
padding_idx=pad_idx,
freeze=True
)
self.convs = nn.ModuleList([
nn.Conv2d(in_channels = 1,
out_channels = n_filters,
kernel_size = (fs, embedding_dim))
for fs in filter_sizes
])
self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
#text = [batch size, sent len]
embedded = self.embedding(text)
#embedded = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
#embedded = [batch size, 1, sent len, emb dim]
conved = [F.relu(conv(embedded)).squeeze(3) for conv in self.convs]
#conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]
pooled = [F.max_pool1d(conv, conv.shape[2]).squeeze(2) for conv in conved]
#pooled_n = [batch size, n_filters]
cat = self.dropout(torch.cat(pooled, dim = 1))
#cat = [batch size, n_filters * len(filter_sizes)]
return self.fc(cat)
INPUT_DIM = len(pretrain.vocab)
EMBEDDING_DIM = pretrain.emb.shape[1]
N_FILTERS = 100
FILTER_SIZES = [3,4,5,6]
OUTPUT_DIM = 538
DROPOUT = 0.6
model = CNN(pretrain, INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_ID)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
model = model.to(device)
criterion = criterion.to(device)
run = wandb.init(
entity='ut-mit-news-classify',
project="NYT Multilabeling",
)
# Magic
wandb.watch(model)
def multi_label_accuracy(preds, y):
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
confusion_vector = rounded_preds / y
true_positives = torch.sum(confusion_vector==1)
false_positives = torch.sum(torch.isinf(confusion_vector))
false_negatives = torch.sum(confusion_vector==0)
true_negatives = torch.sum(torch.isnan(confusion_vector))
accuracy = (true_positives + true_negatives) / (true_positives + false_positives + false_negatives + true_negatives)
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f_score = (2 * precision * recall) / (precision + recall)
return accuracy, precision, recall, f_score
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
epoch_precision = 0
epoch_recall = 0
epoch_f_score = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch[0]).squeeze(1)
loss = criterion(predictions, batch[1])
acc, precision, recall, f_score = multi_label_accuracy(predictions, batch[1])
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_precision += precision.item()
epoch_recall += recall.item()
epoch_f_score += f_score.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator), \
epoch_precision / len(iterator), epoch_recall / len(iterator), \
epoch_f_score / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
epoch_precision = 0
epoch_recall = 0
epoch_f_score = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch[0]).squeeze(1)
loss = criterion(predictions, batch[1])
acc, precision, recall, f_score = multi_label_accuracy(predictions, batch[1])
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_precision += precision.item()
epoch_recall += recall.item()
epoch_f_score += f_score.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator), \
epoch_precision / len(iterator), epoch_recall / len(iterator), \
epoch_f_score / len(iterator)
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
%%time
patience = 7
epochs_of_no_improvement = 0
best_valid_loss = float('inf')
wandb.config.early_stopping_patience = patience
wandb.config.training_samples=n_training_samples
model_file_name = f'nyt_cnn_classifier_trained_with_{n_training_samples}_samples.pt'
epoch = 0
while True:
start_time = time.time()
train_loss, train_acc, train_precision, train_recall, train_f_score \
= train(model, train_loader, optimizer, criterion)
valid_loss, valid_acc, valid_precision, valid_recall, valid_f_score \
= evaluate(model, validation_loader, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
print(f'New validation loss {valid_loss} is better than the best validation loss {best_valid_loss} so far.')
best_valid_loss = valid_loss
torch.save(model.state_dict(), model_file_name)
epochs_of_no_improvement = 0
else:
epochs_of_no_improvement += 1
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}% | ' +
f'Train Precision: {train_precision*100:.2f}% | Train Recall: {train_recall*100:.2f}% | ' +
f'Train F1-score: {train_f_score*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}% | ' +
f'Val. Precision: {valid_precision*100:.2f}% | Val. Recall: {valid_recall*100:.2f}% | ' +
f'Val. F1-score: {valid_f_score*100:.2f}%')
wandb.log({"train_loss": train_loss,
"train_precision": train_precision,
"train_f_score": train_f_score,
"train_acc": train_acc,
"train_recall": train_recall,
"valid_loss": valid_loss,
"valid_acc": valid_acc,
"valid_precision": valid_precision,
"valid_recall": valid_recall,
"valid_f_score": valid_f_score
})
# check if the training should be stopped and then stop the training
if epochs_of_no_improvement == patience :
print(f'Early stopping, on epoch: {epoch+1}.')
break
epoch += 1
n_training_samples
model = CNN(pretrain, INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT, PAD_ID)
model.load_state_dict(torch.load(model_file_name))
model = model.to(device)
start_time = time.time()
test_loss, test_acc, test_precision, test_recall, test_f_score \
= evaluate(model, test_loader, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
print(f'Epoch: test | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTest Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | ' +
f'Test Precision: {test_precision*100:.2f}% | Test Recall: {test_recall*100:.2f}% | ' +
f'Test F1-score: {test_f_score*100:.2f}%')
wandb.log({"test_acc": test_acc,
"test_precision": test_precision,
"test_recall": test_recall,
"test_f_score": test_f_score
})
run.finish()
| 0.610453 | 0.289303 |
<a href="https://colab.research.google.com/github/SamH3pn3r/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/Copy_of_LS_DS_224_Sequence_your_narrative.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
_Lambda School Data Science_
# Sequence your narrative
Today we will create a sequence of visualizations inspired by [Hans Rosling's 200 Countries, 200 Years, 4 Minutes](https://www.youtube.com/watch?v=jbkSRLYSojo).
Using this [data from Gapminder](https://github.com/open-numbers/ddf--gapminder--systema_globalis/):
- [Income Per Person (GDP Per Capital, Inflation Adjusted) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv)
- [Life Expectancy (in Years) by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv)
- [Population Totals, by Geo & Time](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)
- [Entities](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv)
- [Concepts](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv)
Objectives
- sequence multiple visualizations
- combine qualitative anecdotes with quantitative aggregates
Links
- [Hans Rosling’s TED talks](https://www.ted.com/speakers/hans_rosling)
- [Spiralling global temperatures from 1850-2016](https://twitter.com/ed_hawkins/status/729753441459945474)
- "[The Pudding](https://pudding.cool/) explains ideas debated in culture with visual essays."
- [A Data Point Walks Into a Bar](https://lisacharlotterost.github.io/2016/12/27/datapoint-in-bar/): a thoughtful blog post about emotion and empathy in data storytelling
## Make a plan
#### How to present the data?
Variables --> Visual Encodings
- Income --> x
- Lifespan --> y
- Region --> color
- Population --> size
- Year --> animation frame (alternative: small multiple)
- Country --> annotation
Qualitative --> Verbal
- Editorial / contextual explanation --> audio narration (alternative: text)
#### How to structure the data?
| Year | Country | Region | Income | Lifespan | Population |
|------|---------|----------|--------|----------|------------|
| 1818 | USA | Americas | ### | ## | # |
| 1918 | USA | Americas | #### | ### | ## |
| 2018 | USA | Americas | ##### | ### | ### |
| 1818 | China | Asia | # | # | # |
| 1918 | China | Asia | ## | ## | ### |
| 2018 | China | Asia | ### | ### | ##### |
## Upgrade Seaborn
Make sure you have at least version 0.9.0.
In Colab, go to **Restart runtime** after you run the `pip` command.
```
import seaborn as sns
sns.__version__
```
## More imports
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
```
## Load & look at data
```
income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')
population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')
income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape
income.head()
lifespan.head()
population.head()
pd.options.display.max_columns = 500
entities.head()
concepts.head()
```
## Merge data
https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf
```
df = pd.merge(income,lifespan)
df.shape
more_df = pd.merge(df, population)
more_df.shape
entities_variables = ['country', 'name', 'world_6region']
entities = entities[entities_variables]
print(entities.shape)
entities.head()
mas_df = pd.merge(entities, more_df, left_on='country', right_on='geo')
print(mas_df.shape)
mas_df.head()
merged_df = mas_df.drop(['country','geo'], axis=1)
munge_df = merged_df.rename(index=str, columns = {
'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income_per_person',
'time': 'year',
'life_expectancy_years': 'life_expect',
'population_total': 'population',
'world_6region': 'region',
'name': 'country'})
munge_df.head()
munge_df.dtypes
munge_df.describe(exclude='number')
```
## Explore data
```
munge_df.country.unique()
munge_df.region.unique()
usa = munge_df[munge_df.country=='United States']
usa.head()
usa[usa.year.isin([1818, 1918, 2018])]
china = munge_df[munge_df.country=='China']
china[china.year.isin([1818,1918,2018])]
```
## Plot visualization
```
now
years = ['2018']
now = munge_df[munge_df.year.isin(years)]
fig = plt.figure();
ax = sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=now);
#axe = plt.axes()
ax.set_axis_labels("Income Per Person(USD)", "Life Expectancy");
plt.xlim((0,105000));
plt.ylim((0,90));
ax.fig.text(x=0.05, y=1.055, s="A Comparison Between Regions By Life Expectancy and Income", fontsize= 12, weight='bold');
ax.fig.text(x=.25, y=1, s="Using data from gapminder", fontsize=10);
#x_ticks = [10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000]
#ax.set(xticks = x_ticks)
#sns.set(rc={"xtick.bottom" : True, "ytick.left" : True})
ax[1, 1].xaxis.set_minor_locator(minorLocator)
plt.show();
#My practice
countries = ['China', 'India']
years = ['1818', '1838', '1858', '1878', '1898', '1918', '1938', '1958', '1978', '1998' '2018']
country_prog = munge_df[(munge_df.country.isin(countries)) & (munge_df.year.isin(years))]
ax = sns.relplot(x='income_per_person', y='life_expect', hue='country',
size='population', data=country_prog);
ax.set_axis_labels("Income Per Person(USD)", "Life Expectancy")
sns.set_style('whitegrid')
sns.despine()
plt.xlim((0,5000))
plt.ylim((0,90))
plt.title("A comparison of life expectancy and income per person by country/continent", fontsize=10)
plt.suptitle("Using data from gapminder", fontsize=10)
plt.show(ax);
sqatar = munge_df[munge_df.income_per_person > 80000].sort_values(by='income_per_person')
qatar
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=now);
plt.text(x=qatar.income_per_person-5000, y = qatar.life_expect+1, s='Qatar')
years = [1818, 1918, 2018]
centuries = munge_df[munge_df.year.isin(years)]
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', col='year', data=centuries);
plt.xscale('log');
scores = [1918, 1938, 1958, 1978, 1998, 2018]
decades = munge_df[munge_df.year.isin(scores)]
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population',col='year', data=decades);
for year in scores:
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150,150000))
plt.ylim((0,90))
plt.title(year)
plt.axvline(x=1000, color='yellow')
munge_df[(munge_df.year==1918) & (munge_df.life_expect > 50)]
munge_df[(munge_df.year==2018) & (munge_df.life_expect < 60)]
```
## Analyze outliers
```
#did that
```
## Plot multiple years
```
#did that
```
## Point out a story
```
year = 2018 #@param {type:"slider", min:1800, max:2018, step:1}
sns.relplot(x='income_per_person', y='life_expect', hue='region', size='population',
data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((0,90))
plt.title(year);
colors = ['windows blue', 'amber', 'greyish', 'faded green', 'dusty purple']
sns.palplot(sns.xkcd_palette(colors));
from google.colab import widgets
tb = widgets.TabBar([str(year) for year in scores])
for tab, year in zip(tb,scores):
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((0,90));
```
# ASSIGNMENT
Replicate the lesson code
# STRETCH OPTIONS
## 1. Animate!
- [Making animations work in Google Colaboratory](https://medium.com/lambda-school-machine-learning/making-animations-work-in-google-colaboratory-new-home-for-ml-prototyping-c6147186ae75)
- [How to Create Animated Graphs in Python](https://towardsdatascience.com/how-to-create-animated-graphs-in-python-bb619cc2dec1)
- [The Ultimate Day of Chicago Bikeshare](https://chrisluedtke.github.io/divvy-data.html) (Lambda School Data Science student)
## 2. Work on anything related to your portfolio site / project
|
github_jupyter
|
import seaborn as sns
sns.__version__
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
income = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--income_per_person_gdppercapita_ppp_inflation_adjusted--by--geo--time.csv')
lifespan = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--life_expectancy_years--by--geo--time.csv')
population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
entities = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
concepts = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--concepts.csv')
income.shape, lifespan.shape, population.shape, entities.shape, concepts.shape
income.head()
lifespan.head()
population.head()
pd.options.display.max_columns = 500
entities.head()
concepts.head()
df = pd.merge(income,lifespan)
df.shape
more_df = pd.merge(df, population)
more_df.shape
entities_variables = ['country', 'name', 'world_6region']
entities = entities[entities_variables]
print(entities.shape)
entities.head()
mas_df = pd.merge(entities, more_df, left_on='country', right_on='geo')
print(mas_df.shape)
mas_df.head()
merged_df = mas_df.drop(['country','geo'], axis=1)
munge_df = merged_df.rename(index=str, columns = {
'income_per_person_gdppercapita_ppp_inflation_adjusted': 'income_per_person',
'time': 'year',
'life_expectancy_years': 'life_expect',
'population_total': 'population',
'world_6region': 'region',
'name': 'country'})
munge_df.head()
munge_df.dtypes
munge_df.describe(exclude='number')
munge_df.country.unique()
munge_df.region.unique()
usa = munge_df[munge_df.country=='United States']
usa.head()
usa[usa.year.isin([1818, 1918, 2018])]
china = munge_df[munge_df.country=='China']
china[china.year.isin([1818,1918,2018])]
now
years = ['2018']
now = munge_df[munge_df.year.isin(years)]
fig = plt.figure();
ax = sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=now);
#axe = plt.axes()
ax.set_axis_labels("Income Per Person(USD)", "Life Expectancy");
plt.xlim((0,105000));
plt.ylim((0,90));
ax.fig.text(x=0.05, y=1.055, s="A Comparison Between Regions By Life Expectancy and Income", fontsize= 12, weight='bold');
ax.fig.text(x=.25, y=1, s="Using data from gapminder", fontsize=10);
#x_ticks = [10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000]
#ax.set(xticks = x_ticks)
#sns.set(rc={"xtick.bottom" : True, "ytick.left" : True})
ax[1, 1].xaxis.set_minor_locator(minorLocator)
plt.show();
#My practice
countries = ['China', 'India']
years = ['1818', '1838', '1858', '1878', '1898', '1918', '1938', '1958', '1978', '1998' '2018']
country_prog = munge_df[(munge_df.country.isin(countries)) & (munge_df.year.isin(years))]
ax = sns.relplot(x='income_per_person', y='life_expect', hue='country',
size='population', data=country_prog);
ax.set_axis_labels("Income Per Person(USD)", "Life Expectancy")
sns.set_style('whitegrid')
sns.despine()
plt.xlim((0,5000))
plt.ylim((0,90))
plt.title("A comparison of life expectancy and income per person by country/continent", fontsize=10)
plt.suptitle("Using data from gapminder", fontsize=10)
plt.show(ax);
sqatar = munge_df[munge_df.income_per_person > 80000].sort_values(by='income_per_person')
qatar
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=now);
plt.text(x=qatar.income_per_person-5000, y = qatar.life_expect+1, s='Qatar')
years = [1818, 1918, 2018]
centuries = munge_df[munge_df.year.isin(years)]
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', col='year', data=centuries);
plt.xscale('log');
scores = [1918, 1938, 1958, 1978, 1998, 2018]
decades = munge_df[munge_df.year.isin(scores)]
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population',col='year', data=decades);
for year in scores:
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150,150000))
plt.ylim((0,90))
plt.title(year)
plt.axvline(x=1000, color='yellow')
munge_df[(munge_df.year==1918) & (munge_df.life_expect > 50)]
munge_df[(munge_df.year==2018) & (munge_df.life_expect < 60)]
#did that
#did that
year = 2018 #@param {type:"slider", min:1800, max:2018, step:1}
sns.relplot(x='income_per_person', y='life_expect', hue='region', size='population',
data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((0,90))
plt.title(year);
colors = ['windows blue', 'amber', 'greyish', 'faded green', 'dusty purple']
sns.palplot(sns.xkcd_palette(colors));
from google.colab import widgets
tb = widgets.TabBar([str(year) for year in scores])
for tab, year in zip(tb,scores):
sns.relplot(x='income_per_person', y='life_expect', hue='region',
size='population', data=munge_df[munge_df.year==year])
plt.xscale('log')
plt.xlim((150, 150000))
plt.ylim((0,90));
| 0.530723 | 0.942929 |
Addresses can be converted to a specific format via the `output_format` parameter, the following keywords are supported. Any missing attributes are omitted.
* house_number: ('1234')
* street_prefix_abbr: ('N', 'S', 'E', or 'W')
* street_prefix_full: ('North', 'South', 'East', or 'West')
* street_name: ('Main')
* street_suffix_abbr: ('St', 'Ave')
* street_suffix_full: ('Street', 'Avenue')
* apartment: ('Apt 1')
* building: ('Staples Center')
* city: ('Los Angeles')
* state_abbr: ('CA')
* state_full: ('California')
* zipcode: ('57903')
The default `output_format` is "(building) house_number street_prefix_abbr street_name street_suffix_abbr, apartment,
city, state_abbr zipcode"
The `must_contain` parameter takes a tuple containing parts of the address that must be included for the address to be successfully cleaned, the following keywords are supported.
* house_number: ('1234')
* street_prefix: ('N', 'North')
* street_name: ('Main')
* street_suffix: ('St', 'Avenue')
* apartment: ('Apt 1')
* building: ('Staples Center')
* city: ('Los Angeles')
* state: ('CA', 'California')
* zipcode: ('57903')
The default value for `must_contain` is `("house_number", "street_name")`. Therefore, by default addresses must contain a house number and street name to be successfully cleaned.
Invalid parsing is handled with the `errors` parameter:
* "coerce" (default): invalid parsing will be set to NaN
* "ignore": invalid parsing will return the input
* "raise": invalid parsing will raise an exception
After cleaning, a **report** is printed that provides the following information:
* How many values were cleaned (the value must have been transformed).
* How many values could not be parsed.
* A summary of the cleaned data: how many values are in the correct format, and how many values are NaN.
The following sections demonstrate the functionality of `clean_address()` and `validate_address()`.
## An example dirty dataset
```
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"1111 S Figueroa St, Los Angeles, CA 90015",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
np.nan,
"NULL"
]
}
)
df
```
## 1. Default `clean_address()`
By default the `output_format` parameter is set to "(building) house_number street_prefix_abbr street_name street_suffix_abbr apartment, city, state_abbr zipcode" and the `must_contain` parameter is set `("house_number", "street_name")`. The errors parameter is set to "coerce" (set NaN when parsing is invalid).
```
from dataprep.clean import clean_address
clean_address(df, "address")
```
Note that "123 Pine Ave." is considered not cleaned in the report since its resulting format is the same as the input. Also, "main st" is invalid since it does not contain a house number.
## 2. Output formats
```
clean_address(
df,
"address",
output_format="(zipcode) street_prefix_full street_name ~state_full~"
)
clean_address(
df,
"address",
output_format="house_number street_name street_suffix_full (building)",
)
```
### Splitting The Output
A tab character can be placed between address keywords to split the output into separate columns. The column names are taken from the output format.
```
clean_address(
df,
"address",
output_format="house_number street_name \t state_full"
)
```
## 3. `must_contain` parameter
This parameter takes a tuple containing parts of the address that must be included for the address to be successfully cleaned.
```
clean_address(
df, "address", must_contain=("house_number", "zipcode")
)
```
## 4. `split` parameter
The `split` parameter adds individual columns containing the cleaned address values to the given DataFrame.
```
clean_address(df, "address", split=True)
```
Setting split to True is equivalent to placing tabs between each word in the `output_format` and removing all characters that are not part of an address keyword (ie. commas). Column names are taken from the address keywords in the `output_format`.
```
clean_address(
df,
"address",
split=True,
output_format="house_number, street_name, building"
)
```
## 5. `inplace` parameter
This just deletes the given column from the returned dataframe.
A new column containing cleaned addresses is added with a title in the format `"{original title}_clean"`.
```
clean_address(df, "address", inplace=True)
```
### `inplace` and `split`
```
clean_address(df, "address", inplace=True, split=True)
```
## 6. `validate_address()`
`validate_address()` returns True when the input is a valid address value otherwise it returns False. Valid types are the same as `clean_address()`.
```
from dataprep.clean import validate_address
print(validate_address("123 main st"))
print(validate_address("main st"))
print(validate_address("apt 1 s maple rd manhattan", must_contain=("apartment",)))
print(validate_address("(staples center) 1111 S Figueroa St, Los Angeles"))
print(validate_address("789 North Maple Way Boston, MA"))
```
### `validate_address()` on a pandas series
```
df["valid"] = validate_address(df["address"])
df
```
### `must_contain`
```
df["valid"] = validate_address(df["address"], must_contain=("building", "city"))
df
```
|
github_jupyter
|
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"1111 S Figueroa St, Los Angeles, CA 90015",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
np.nan,
"NULL"
]
}
)
df
from dataprep.clean import clean_address
clean_address(df, "address")
clean_address(
df,
"address",
output_format="(zipcode) street_prefix_full street_name ~state_full~"
)
clean_address(
df,
"address",
output_format="house_number street_name street_suffix_full (building)",
)
clean_address(
df,
"address",
output_format="house_number street_name \t state_full"
)
clean_address(
df, "address", must_contain=("house_number", "zipcode")
)
clean_address(df, "address", split=True)
clean_address(
df,
"address",
split=True,
output_format="house_number, street_name, building"
)
clean_address(df, "address", inplace=True)
clean_address(df, "address", inplace=True, split=True)
from dataprep.clean import validate_address
print(validate_address("123 main st"))
print(validate_address("main st"))
print(validate_address("apt 1 s maple rd manhattan", must_contain=("apartment",)))
print(validate_address("(staples center) 1111 S Figueroa St, Los Angeles"))
print(validate_address("789 North Maple Way Boston, MA"))
df["valid"] = validate_address(df["address"])
df
df["valid"] = validate_address(df["address"], must_contain=("building", "city"))
df
| 0.516595 | 0.860545 |
# Cognitive Hackathon: Overview
This is a four week course on cognitive computing with labs and a student project.
Here are the topics by week:
**[Week 1](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%201/README.ipynb)** Introduce cognitive computing and the development environment
**[Week 2](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%202/README.ipynb)** Students brainstorm their project concept
**[Week 3](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%203/README.ipynb)** Students implement their project concept
**[Week 4](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%204/README.ipynb)** Project Conclusions, Discussion, and Data Presentation
**[Suggested Rubric](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Rubric.ipynb)** If you're planning on grading this project, this is a high-level, suggested rubric for this project.
Here is detail of the four course weeks:
## [Week 1](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%201/README.ipynb)
Introduce cognitive computing and the development environment for the course. Azure Notebooks will be presented and worked with in a hands-on lab. Python fundamentals will be presented as well, along with a lab. Real-world Artificial Intelligence (AI) examples will be presented.
## [Week 2](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%202/README.ipynb)
Firsthand cognitive examples will be presented. The course project begins: Students will brainstorm their project concept and choose an idea. They will group into teams to choose specific ideas to develop. Requirements, applications, and technical considerations will be discussed. Live Python code example of spell check cognitive service will be presented.
## [Week 3](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%203/README.ipynb)
Teacher demonstrates Cognitive API code examples to prepare students for Computer Vision and Text Analytics Labs. Students will implement their cognitive concept in the course project and form conclusions.
## [Week 4](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%204/README.ipynb)
Students will present their Project Conclusions. They will discuss considerations with data and apply these to their project. Additional cognitive services may be experimented with.
To get started, clone this Azure Notebook:
<img width="1346" alt="Screen Shot 2019-04-24 at 5 33 31 AM" src="https://user-images.githubusercontent.com/1314285/56659754-c6ca2a00-6652-11e9-9a23-377baf2adb33.png">
|
github_jupyter
|
# Cognitive Hackathon: Overview
This is a four week course on cognitive computing with labs and a student project.
Here are the topics by week:
**[Week 1](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%201/README.ipynb)** Introduce cognitive computing and the development environment
**[Week 2](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%202/README.ipynb)** Students brainstorm their project concept
**[Week 3](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%203/README.ipynb)** Students implement their project concept
**[Week 4](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%204/README.ipynb)** Project Conclusions, Discussion, and Data Presentation
**[Suggested Rubric](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Rubric.ipynb)** If you're planning on grading this project, this is a high-level, suggested rubric for this project.
Here is detail of the four course weeks:
## [Week 1](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%201/README.ipynb)
Introduce cognitive computing and the development environment for the course. Azure Notebooks will be presented and worked with in a hands-on lab. Python fundamentals will be presented as well, along with a lab. Real-world Artificial Intelligence (AI) examples will be presented.
## [Week 2](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%202/README.ipynb)
Firsthand cognitive examples will be presented. The course project begins: Students will brainstorm their project concept and choose an idea. They will group into teams to choose specific ideas to develop. Requirements, applications, and technical considerations will be discussed. Live Python code example of spell check cognitive service will be presented.
## [Week 3](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%203/README.ipynb)
Teacher demonstrates Cognitive API code examples to prepare students for Computer Vision and Text Analytics Labs. Students will implement their cognitive concept in the course project and form conclusions.
## [Week 4](https://spring2019tealsproject-sguthals.notebooks.azure.com/j/notebooks/Week%204/README.ipynb)
Students will present their Project Conclusions. They will discuss considerations with data and apply these to their project. Additional cognitive services may be experimented with.
To get started, clone this Azure Notebook:
<img width="1346" alt="Screen Shot 2019-04-24 at 5 33 31 AM" src="https://user-images.githubusercontent.com/1314285/56659754-c6ca2a00-6652-11e9-9a23-377baf2adb33.png">
| 0.776284 | 0.950686 |
# PBH neutrino fluxes
Plot the neutrino flux for different PBH masses and abundances
```
from Source.fluxes import *
# Routine to plot fluxes for several masses and relevant backgrounds
def plot_fluxes(Mpbhs, fpbh, is_DM, mass_spec=0, sig=0):
if is_DM:
sufix1 = "DM"
else:
sufix1 = "evaporated"
# Sufix for outputs depending on the mass function
sufx = sufix(mass_spec, sig)
# Create legend
customlegend = []
#------------
# PBH fluxes
#------------
# Compute fluxes for an array of PBH masses (comment if already computed)
compute_flux(Mpbhs, is_DM, mass_spec, sig)
# Plot PBH fluxes
for mm, Mpbh in enumerate(Mpbhs):
fileflux = "fluxes/{:.1e}/flux_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux = np.loadtxt(fileflux, unpack=True)
plt.loglog( E_nu, fpbh*flux, color = cols[mm], linestyle="-" )
# Plot separately galactic and extragalactic flux for PBHs as DM
if is_DM:
fileflux_galac = "fluxes/{:.1e}/flux_galac_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux_galac = np.loadtxt(fileflux_galac, unpack=True)
plt.loglog( E_nu, fpbh*flux_galac, linestyle=":", color = cols[mm])
fileflux_extragalac = "fluxes/{:.1e}/flux_extragalac_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux_exga = np.loadtxt(fileflux_extragalac, unpack=True)
plt.loglog( E_nu, fpbh*flux_exga, linestyle="--", color = cols[mm])
customlegend.append( Line2D([0], [0], color=cols[mm], lw=4, label = r"$M_{\rm PBH}=$"+scinot(Mpbh)+" g"))
# Specify in legend galactic or extragalactic if PBHs are DM
if is_DM:
customlegend.append( Line2D([0], [0], color="black", linestyle=":", label="Galactic"))
customlegend.append( Line2D([0], [0], color="black", linestyle="--", label="Extragalactic"))
customlegend.append( Line2D([0], [0], color="black", linestyle="-", label="Total"))
#------------
# Background fluxes
#------------
# Load backgrounds
backfolder = "data/backfluxes/"
col_back = "grey"
Eatm, atm_nue = np.loadtxt(backfolder+"atmnue_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_nuebar = np.loadtxt(backfolder+"atmnuebar_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_numu = np.loadtxt(backfolder+"atmnumu_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_numubar = np.loadtxt(backfolder+"atmnumubar_noosc_fluka_flux.dat",unpack=True)
Eatm*=1.e3 # to MeV
atmflux = atm_nue/1.e7 # GeV^-1 m^-2 s^-1 -> MeV^-1 cm^-2 s^-1
EB8, sol_B8_1, sol_B8_2, sol_B8_3 = np.loadtxt(backfolder+"B8NeutrinoFlux.dat",unpack=True)
sol_B8 = sol_B8_1 + sol_B8_2 + sol_B8_3
Ehep, sol_hep = np.loadtxt(backfolder+"HEPNeutrinoFlux.dat",unpack=True)
EO15, sol_O15 = np.loadtxt(backfolder+"O15NeutrinoFlux.dat",unpack=True)
EN13, sol_N13 = np.loadtxt(backfolder+"N13NeutrinoFlux.dat",unpack=True)
Epp, sol_pp = np.loadtxt(backfolder+"PPNeutrinoFlux.dat",unpack=True)
# Interpolate backgrounds
atmint = interp1d(Eatm, atmflux, fill_value=0., bounds_error=False)
B8int = interp1d(EB8, sol_B8, fill_value=0., bounds_error=False)
hepint = interp1d(Ehep, sol_hep, fill_value=0., bounds_error=False)
O15int = interp1d(EO15, sol_O15, fill_value=0., bounds_error=False)
N13int = interp1d(EN13, sol_N13, fill_value=0., bounds_error=False)
ppint = interp1d(Epp, sol_pp, fill_value=0., bounds_error=False)
Ebacks = np.logspace(np.log10(Epp[0]), np.log10(Eatm[-1]), 500)
# Sum backgrounds and correct normalization, see table III of 1812.05550 or Table 2 of 1208.5723
backmax = atmint(Ebacks) + B8int(Ebacks)*4.59e6 + hepint(Ebacks)*8.31e3 + O15int(Ebacks)*1.56e8 + N13int(Ebacks)*2.17e8 + ppint(Ebacks)*6.03e10
# Plot backgrounds
plt.fill_between( Ebacks, np.zeros_like(Ebacks), backmax, color = col_back, alpha=0.3)
# Label backgrounds
plt.text(120., 7.e-3, "Atm.")
plt.text(20., 2.e1, r"hep")
#plt.text(6., 5.e6, r"$^8$B")
#plt.text(1.7, 1.e7, r"$^{15}$O")
customlegend.append( Line2D([0], [0], color=col_back, lw=6, linestyle="-", alpha=0.3, label="Backgrounds"))
# Final plot settings
plt.xlim(1., 2.e2)
plt.ylim( 1.e-5, 1.e4 )
plt.yticks(np.logspace(-5,4,num=10))
plt.tick_params(axis='both', which='both', top=True, right=True, direction="in")
plt.grid(which="both",linestyle=":",linewidth=1)
plt.legend(handles=customlegend)#, loc="lower left")
plt.xlabel(r'$E_\nu{\rm \,\, [MeV]}$')
if is_DM:
plt.title(r"$f_{\rm PBH}=$"+scinot(fpbh))
else:
plt.title(r"$\beta'=$"+scinot(fpbh))
plt.ylabel(r'$d\Phi/dE_\nu \,\, [{\rm MeV}^{-1}{\rm s}^{-1}{\rm cm}^{-2}]$')
plt.savefig("figures/fluxes_"+sufix1+".png", bbox_inches='tight', dpi=300)
plt.show()
plt.gcf().clear()
```
## Compute neutrino fluxes from PBHs as Dark Matter
```
Mpbhs = [1e15, 2e15, 4e15]
fpbh = 1.e-3
plot_fluxes(Mpbhs, fpbh, is_DM = 1)
```
## Compute neutrino fluxes from evaporated PBHs
```
Mpbhs = [1.e12, 1.e13, 1.e14]
fpbh = 1.e-18
plot_fluxes(Mpbhs, fpbh, is_DM = 0)
```
|
github_jupyter
|
from Source.fluxes import *
# Routine to plot fluxes for several masses and relevant backgrounds
def plot_fluxes(Mpbhs, fpbh, is_DM, mass_spec=0, sig=0):
if is_DM:
sufix1 = "DM"
else:
sufix1 = "evaporated"
# Sufix for outputs depending on the mass function
sufx = sufix(mass_spec, sig)
# Create legend
customlegend = []
#------------
# PBH fluxes
#------------
# Compute fluxes for an array of PBH masses (comment if already computed)
compute_flux(Mpbhs, is_DM, mass_spec, sig)
# Plot PBH fluxes
for mm, Mpbh in enumerate(Mpbhs):
fileflux = "fluxes/{:.1e}/flux_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux = np.loadtxt(fileflux, unpack=True)
plt.loglog( E_nu, fpbh*flux, color = cols[mm], linestyle="-" )
# Plot separately galactic and extragalactic flux for PBHs as DM
if is_DM:
fileflux_galac = "fluxes/{:.1e}/flux_galac_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux_galac = np.loadtxt(fileflux_galac, unpack=True)
plt.loglog( E_nu, fpbh*flux_galac, linestyle=":", color = cols[mm])
fileflux_extragalac = "fluxes/{:.1e}/flux_extragalac_isDM_{}".format(Mpbh, is_DM)+sufx
E_nu, flux_exga = np.loadtxt(fileflux_extragalac, unpack=True)
plt.loglog( E_nu, fpbh*flux_exga, linestyle="--", color = cols[mm])
customlegend.append( Line2D([0], [0], color=cols[mm], lw=4, label = r"$M_{\rm PBH}=$"+scinot(Mpbh)+" g"))
# Specify in legend galactic or extragalactic if PBHs are DM
if is_DM:
customlegend.append( Line2D([0], [0], color="black", linestyle=":", label="Galactic"))
customlegend.append( Line2D([0], [0], color="black", linestyle="--", label="Extragalactic"))
customlegend.append( Line2D([0], [0], color="black", linestyle="-", label="Total"))
#------------
# Background fluxes
#------------
# Load backgrounds
backfolder = "data/backfluxes/"
col_back = "grey"
Eatm, atm_nue = np.loadtxt(backfolder+"atmnue_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_nuebar = np.loadtxt(backfolder+"atmnuebar_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_numu = np.loadtxt(backfolder+"atmnumu_noosc_fluka_flux.dat",unpack=True)
#Eatm, atm_numubar = np.loadtxt(backfolder+"atmnumubar_noosc_fluka_flux.dat",unpack=True)
Eatm*=1.e3 # to MeV
atmflux = atm_nue/1.e7 # GeV^-1 m^-2 s^-1 -> MeV^-1 cm^-2 s^-1
EB8, sol_B8_1, sol_B8_2, sol_B8_3 = np.loadtxt(backfolder+"B8NeutrinoFlux.dat",unpack=True)
sol_B8 = sol_B8_1 + sol_B8_2 + sol_B8_3
Ehep, sol_hep = np.loadtxt(backfolder+"HEPNeutrinoFlux.dat",unpack=True)
EO15, sol_O15 = np.loadtxt(backfolder+"O15NeutrinoFlux.dat",unpack=True)
EN13, sol_N13 = np.loadtxt(backfolder+"N13NeutrinoFlux.dat",unpack=True)
Epp, sol_pp = np.loadtxt(backfolder+"PPNeutrinoFlux.dat",unpack=True)
# Interpolate backgrounds
atmint = interp1d(Eatm, atmflux, fill_value=0., bounds_error=False)
B8int = interp1d(EB8, sol_B8, fill_value=0., bounds_error=False)
hepint = interp1d(Ehep, sol_hep, fill_value=0., bounds_error=False)
O15int = interp1d(EO15, sol_O15, fill_value=0., bounds_error=False)
N13int = interp1d(EN13, sol_N13, fill_value=0., bounds_error=False)
ppint = interp1d(Epp, sol_pp, fill_value=0., bounds_error=False)
Ebacks = np.logspace(np.log10(Epp[0]), np.log10(Eatm[-1]), 500)
# Sum backgrounds and correct normalization, see table III of 1812.05550 or Table 2 of 1208.5723
backmax = atmint(Ebacks) + B8int(Ebacks)*4.59e6 + hepint(Ebacks)*8.31e3 + O15int(Ebacks)*1.56e8 + N13int(Ebacks)*2.17e8 + ppint(Ebacks)*6.03e10
# Plot backgrounds
plt.fill_between( Ebacks, np.zeros_like(Ebacks), backmax, color = col_back, alpha=0.3)
# Label backgrounds
plt.text(120., 7.e-3, "Atm.")
plt.text(20., 2.e1, r"hep")
#plt.text(6., 5.e6, r"$^8$B")
#plt.text(1.7, 1.e7, r"$^{15}$O")
customlegend.append( Line2D([0], [0], color=col_back, lw=6, linestyle="-", alpha=0.3, label="Backgrounds"))
# Final plot settings
plt.xlim(1., 2.e2)
plt.ylim( 1.e-5, 1.e4 )
plt.yticks(np.logspace(-5,4,num=10))
plt.tick_params(axis='both', which='both', top=True, right=True, direction="in")
plt.grid(which="both",linestyle=":",linewidth=1)
plt.legend(handles=customlegend)#, loc="lower left")
plt.xlabel(r'$E_\nu{\rm \,\, [MeV]}$')
if is_DM:
plt.title(r"$f_{\rm PBH}=$"+scinot(fpbh))
else:
plt.title(r"$\beta'=$"+scinot(fpbh))
plt.ylabel(r'$d\Phi/dE_\nu \,\, [{\rm MeV}^{-1}{\rm s}^{-1}{\rm cm}^{-2}]$')
plt.savefig("figures/fluxes_"+sufix1+".png", bbox_inches='tight', dpi=300)
plt.show()
plt.gcf().clear()
Mpbhs = [1e15, 2e15, 4e15]
fpbh = 1.e-3
plot_fluxes(Mpbhs, fpbh, is_DM = 1)
Mpbhs = [1.e12, 1.e13, 1.e14]
fpbh = 1.e-18
plot_fluxes(Mpbhs, fpbh, is_DM = 0)
| 0.829457 | 0.824356 |
**Importing Libraries**
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
```
**Getting the data**
```
data=pd.read_csv("../input/Salary.csv")
x=data['YearsExperience']
y=data['Salary']
plt.scatter(x,y)
plt.show()
```
**Coding Gradient Descent**
```
def hypothesis(x,gradient):
return gradient[0]+x*gradient[1]
def cost(x,y,gradient):
cost=0
m=x.shape[0]
for i in range(m):
cost=cost+(hypothesis(x[i],gradient)-y[i])**2
cost=cost/2*m
return cost
def gradient_descent(x,y,gradient,alpha):
m=x.shape[0]
q0=0
q1=0
for i in range(m):
q0=q0+(hypothesis(x[i],gradient)-y[i])
q1=q1+(hypothesis(x[i],gradient)-y[i])*x[i]
q0=q0/m
q1=q1/m
gradient[0]=gradient[0]-alpha*q0
gradient[1]=gradient[1]-alpha*q1
return cost(x,y,gradient)
def run():
alpha=0.035
gradient=[0,0]
error=[]
for i in range(500):
error.append(gradient_descent(x,y,gradient,alpha))
plt.plot(error)
plt.xlabel('No of Iterations',fontsize=13)
plt.ylabel('Error',fontsize=13)
plt.title('Gradient Descent Performance',fontsize=17)
plt.show()
y_vals=x*gradient[1]+gradient[0]
fig=plt.figure(figsize=(20,7))
ax1=fig.add_subplot(1,2,1)
ax1.plot(x,y_vals)
ax1.scatter(x,y,color='red')
ax1.set_xlabel('Experience',fontsize=15)
ax1.set_ylabel('Salary',fontsize=15)
ax1.set_title('Gradient Descent',fontsize=20)
ax2=fig.add_subplot(1,2,2)
reg=LinearRegression().fit(x.values.reshape((-1,1)),y.values.reshape(-1,1))
y_pred=reg.predict(x.values[:,np.newaxis])
ax2.scatter(x,y,color='red')
ax2.plot(x,np.reshape(y_pred,(-1,1)),color='black')
ax2.set_xlabel('Experience',fontsize=15)
ax2.set_ylabel('Salary',fontsize=15)
ax2.set_title('Sklearn',fontsize=20)
plt.show()
run()
```
|
github_jupyter
|
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
data=pd.read_csv("../input/Salary.csv")
x=data['YearsExperience']
y=data['Salary']
plt.scatter(x,y)
plt.show()
def hypothesis(x,gradient):
return gradient[0]+x*gradient[1]
def cost(x,y,gradient):
cost=0
m=x.shape[0]
for i in range(m):
cost=cost+(hypothesis(x[i],gradient)-y[i])**2
cost=cost/2*m
return cost
def gradient_descent(x,y,gradient,alpha):
m=x.shape[0]
q0=0
q1=0
for i in range(m):
q0=q0+(hypothesis(x[i],gradient)-y[i])
q1=q1+(hypothesis(x[i],gradient)-y[i])*x[i]
q0=q0/m
q1=q1/m
gradient[0]=gradient[0]-alpha*q0
gradient[1]=gradient[1]-alpha*q1
return cost(x,y,gradient)
def run():
alpha=0.035
gradient=[0,0]
error=[]
for i in range(500):
error.append(gradient_descent(x,y,gradient,alpha))
plt.plot(error)
plt.xlabel('No of Iterations',fontsize=13)
plt.ylabel('Error',fontsize=13)
plt.title('Gradient Descent Performance',fontsize=17)
plt.show()
y_vals=x*gradient[1]+gradient[0]
fig=plt.figure(figsize=(20,7))
ax1=fig.add_subplot(1,2,1)
ax1.plot(x,y_vals)
ax1.scatter(x,y,color='red')
ax1.set_xlabel('Experience',fontsize=15)
ax1.set_ylabel('Salary',fontsize=15)
ax1.set_title('Gradient Descent',fontsize=20)
ax2=fig.add_subplot(1,2,2)
reg=LinearRegression().fit(x.values.reshape((-1,1)),y.values.reshape(-1,1))
y_pred=reg.predict(x.values[:,np.newaxis])
ax2.scatter(x,y,color='red')
ax2.plot(x,np.reshape(y_pred,(-1,1)),color='black')
ax2.set_xlabel('Experience',fontsize=15)
ax2.set_ylabel('Salary',fontsize=15)
ax2.set_title('Sklearn',fontsize=20)
plt.show()
run()
| 0.542863 | 0.800341 |
```
# Dependencies
import numpy as np
import pandas as pd
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import matplotlib.pyplot as plt
from matplotlib import style
style.use('seaborn')
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
## D1: Determine the Summary Statistics for June
```
# 1. Import the sqlalchemy extract function.
from sqlalchemy import extract
# 2. Write a query that filters the Measurement table to retrieve the temperatures for the month of June.
jtemp = session.query(Measurement).filter(extract('month', Measurement.date) == 6)
# 3. Convert the June temperatures to a list.
jtemp_list = []
for temp in jtemp:
jtemp_list.append(temp.tobs)
print(jtemp_list)
# 4. Create a DataFrame from the list of temperatures for the month of June.
df_jtemp = pd.DataFrame(jtemp_list, columns=["June Temps"])
df_jtemp
# 5. Calculate and print out the summary statistics for the June temperature DataFrame.
np.round(df_jtemp.describe(),2)
```
## D2: Determine the Summary Statistics for December
```
# 6. Write a query that filters the Measurement table to retrieve the temperatures for the month of December.
dtemp = session.query(Measurement).filter(extract('month', Measurement.date) == 12)
# 7. Convert the December temperatures to a list.
dtemp_list=[]
for temp in dtemp:
dtemp_list.append(temp.tobs)
print(dtemp_list)
# 8. Create a DataFrame from the list of temperatures for the month of December.
df_dtemp = pd.DataFrame(dtemp_list, columns=["December Temps"])
# 9. Calculate and print out the summary statistics for the Decemeber temperature DataFrame.
np.round(df_dtemp.describe(),2)
```
## Additional Queries
```
# Check column names and values of measurement table
# engine.execute("select * from measurement").keys()
# >>> RMKeyView(['id', 'station', 'date', 'prcp', 'tobs'])
# engine.execute("select * from measurement").fetchone()
# >>> (1, 'USC00519397', '2010-01-01', 0.08, 65.0)
# Check column names and values of station table
# engine.execute("select * from station").keys()
# >>> RMKeyView(['id', 'station', 'name', 'latitude', 'longitude', 'elevation'])
# engine.execute("select * from station").fetchone()
# >>> (1, 'USC00519397', 'WAIKIKI 717.2, HI US', 21.2716, -157.8168, 3.0)
# list of months for query
mos = [6, 12]
# query count of obervation for June and December grouped by year
result = session.query(extract('year',Measurement.date), extract('month',Measurement.date),
func.count(Measurement.tobs)).\
filter(extract('month',Measurement.date).in_(mos)).\
group_by(extract('year',Measurement.date),\
extract('month',Measurement.date)).all()
for row in result:
print(row)
# result shows no observation for December 2017
# redo temp analysis - June
jtemp2 = session.query(Measurement).filter(Measurement.date < '2017-01-01',
extract('month', Measurement.date) == 6)
# convert to list
jtemp_list2 = [temp.tobs for temp in jtemp2]
# convert to DataFrame
df_jtemp2 = pd.DataFrame(jtemp_list2, columns=["June Temps"])
# get summary statistics
np.round(df_jtemp2.describe(),2)
# redo temp analysis - December
dtemp2 = session.query(Measurement).filter(Measurement.date < '2017-01-01',
extract('month', Measurement.date) == 12)
# convert to list
dtemp_list2 = [temp.tobs for temp in dtemp2]
# convert to DataFrame
df_dtemp2 = pd.DataFrame(dtemp_list2, columns=["December Temps"])
# get summary statistics
np.round(df_dtemp2.describe(),2)
# Precipitation Data
# check number of observations per station
session.query(Measurement.station, func.min(Measurement.date), func.max(Measurement.date), func.count(Measurement.station)).\
filter(Measurement.date >= '2014-01-01', Measurement.date < '2017-01-01').\
group_by(Measurement.station).all()
# USC00511918, USC00517948, and USC00518838 will not be included for the precipitation analysis due to inactivity
# list of active stations
active_stations = ['USC00513117','USC00514830','USC00516128','USC00519281','USC00519397','USC00519523']
# Query 3 years of precipitation data
twenty_fourteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2014-01-01', Measurement.date < '2015-01-01').\
group_by(Measurement.date).all()
twenty_fifteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2015-01-01', Measurement.date < '2016-01-01').\
group_by(Measurement.date).all()
twenty_sixteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2016-01-01', Measurement.date < '2017-01-01').\
group_by(Measurement.date).all()
# Transform to DataFrame
twenty_fourteen_df = pd.DataFrame(twenty_fourteen, columns=['month','day','precipitation'])
twenty_fourteen_df.set_index((['month','day']), inplace=True)
twenty_fifteen_df = pd.DataFrame(twenty_fifteen, columns=['month','day','precipitation'])
twenty_fifteen_df.set_index((['month','day']), inplace=True)
twenty_sixteen_df = pd.DataFrame(twenty_sixteen, columns=['month','day','precipitation'])
twenty_sixteen_df.set_index((['month','day']), inplace=True)
# Create plot
ax = twenty_fourteen_df.plot(figsize=(12,6))
twenty_fifteen_df.plot(ax=ax)
twenty_sixteen_df.plot(ax=ax)
labels=[2014, 2015, 2016]
plt.legend(labels=labels, fontsize=12, title='Year')
plt.ylim(0,6.5)
plt.ylabel('Avg Precipitation')
plt.xlabel('Month, Day')
plt.title('3 Year Precipitation Data')
```
|
github_jupyter
|
# Dependencies
import numpy as np
import pandas as pd
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import matplotlib.pyplot as plt
from matplotlib import style
style.use('seaborn')
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# 1. Import the sqlalchemy extract function.
from sqlalchemy import extract
# 2. Write a query that filters the Measurement table to retrieve the temperatures for the month of June.
jtemp = session.query(Measurement).filter(extract('month', Measurement.date) == 6)
# 3. Convert the June temperatures to a list.
jtemp_list = []
for temp in jtemp:
jtemp_list.append(temp.tobs)
print(jtemp_list)
# 4. Create a DataFrame from the list of temperatures for the month of June.
df_jtemp = pd.DataFrame(jtemp_list, columns=["June Temps"])
df_jtemp
# 5. Calculate and print out the summary statistics for the June temperature DataFrame.
np.round(df_jtemp.describe(),2)
# 6. Write a query that filters the Measurement table to retrieve the temperatures for the month of December.
dtemp = session.query(Measurement).filter(extract('month', Measurement.date) == 12)
# 7. Convert the December temperatures to a list.
dtemp_list=[]
for temp in dtemp:
dtemp_list.append(temp.tobs)
print(dtemp_list)
# 8. Create a DataFrame from the list of temperatures for the month of December.
df_dtemp = pd.DataFrame(dtemp_list, columns=["December Temps"])
# 9. Calculate and print out the summary statistics for the Decemeber temperature DataFrame.
np.round(df_dtemp.describe(),2)
# Check column names and values of measurement table
# engine.execute("select * from measurement").keys()
# >>> RMKeyView(['id', 'station', 'date', 'prcp', 'tobs'])
# engine.execute("select * from measurement").fetchone()
# >>> (1, 'USC00519397', '2010-01-01', 0.08, 65.0)
# Check column names and values of station table
# engine.execute("select * from station").keys()
# >>> RMKeyView(['id', 'station', 'name', 'latitude', 'longitude', 'elevation'])
# engine.execute("select * from station").fetchone()
# >>> (1, 'USC00519397', 'WAIKIKI 717.2, HI US', 21.2716, -157.8168, 3.0)
# list of months for query
mos = [6, 12]
# query count of obervation for June and December grouped by year
result = session.query(extract('year',Measurement.date), extract('month',Measurement.date),
func.count(Measurement.tobs)).\
filter(extract('month',Measurement.date).in_(mos)).\
group_by(extract('year',Measurement.date),\
extract('month',Measurement.date)).all()
for row in result:
print(row)
# result shows no observation for December 2017
# redo temp analysis - June
jtemp2 = session.query(Measurement).filter(Measurement.date < '2017-01-01',
extract('month', Measurement.date) == 6)
# convert to list
jtemp_list2 = [temp.tobs for temp in jtemp2]
# convert to DataFrame
df_jtemp2 = pd.DataFrame(jtemp_list2, columns=["June Temps"])
# get summary statistics
np.round(df_jtemp2.describe(),2)
# redo temp analysis - December
dtemp2 = session.query(Measurement).filter(Measurement.date < '2017-01-01',
extract('month', Measurement.date) == 12)
# convert to list
dtemp_list2 = [temp.tobs for temp in dtemp2]
# convert to DataFrame
df_dtemp2 = pd.DataFrame(dtemp_list2, columns=["December Temps"])
# get summary statistics
np.round(df_dtemp2.describe(),2)
# Precipitation Data
# check number of observations per station
session.query(Measurement.station, func.min(Measurement.date), func.max(Measurement.date), func.count(Measurement.station)).\
filter(Measurement.date >= '2014-01-01', Measurement.date < '2017-01-01').\
group_by(Measurement.station).all()
# USC00511918, USC00517948, and USC00518838 will not be included for the precipitation analysis due to inactivity
# list of active stations
active_stations = ['USC00513117','USC00514830','USC00516128','USC00519281','USC00519397','USC00519523']
# Query 3 years of precipitation data
twenty_fourteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2014-01-01', Measurement.date < '2015-01-01').\
group_by(Measurement.date).all()
twenty_fifteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2015-01-01', Measurement.date < '2016-01-01').\
group_by(Measurement.date).all()
twenty_sixteen = session.query(extract('month',Measurement.date), extract('day',Measurement.date), func.avg(Measurement.prcp)).\
filter((Measurement.station).in_(active_stations),
Measurement.date >= '2016-01-01', Measurement.date < '2017-01-01').\
group_by(Measurement.date).all()
# Transform to DataFrame
twenty_fourteen_df = pd.DataFrame(twenty_fourteen, columns=['month','day','precipitation'])
twenty_fourteen_df.set_index((['month','day']), inplace=True)
twenty_fifteen_df = pd.DataFrame(twenty_fifteen, columns=['month','day','precipitation'])
twenty_fifteen_df.set_index((['month','day']), inplace=True)
twenty_sixteen_df = pd.DataFrame(twenty_sixteen, columns=['month','day','precipitation'])
twenty_sixteen_df.set_index((['month','day']), inplace=True)
# Create plot
ax = twenty_fourteen_df.plot(figsize=(12,6))
twenty_fifteen_df.plot(ax=ax)
twenty_sixteen_df.plot(ax=ax)
labels=[2014, 2015, 2016]
plt.legend(labels=labels, fontsize=12, title='Year')
plt.ylim(0,6.5)
plt.ylabel('Avg Precipitation')
plt.xlabel('Month, Day')
plt.title('3 Year Precipitation Data')
| 0.525856 | 0.793786 |
```
#Import Dependencies
%matplotlib inline
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import dataframe_image as dfi
from scipy import stats
import seaborn as sns
# Load dataset
df = pd.read_csv('timeseries_testv4alt.csv')
df.head()
df_output = df.tail(1)
lastyear_df = df_output.drop(columns=['Year', 'Waitlist','Diabetes','Obesity','Overdose','Pct_O'])
lastyear_df = lastyear_df.reset_index(drop=True)
lastyear_df
df.plot(kind='box', subplots=True, layout=(6,6), sharex=False, sharey=False)
plt.show()
# scatter plot matrix
pd.plotting.scatter_matrix(df)
plt.show()
#Describe data
df.describe()
#Correlation Matrix
corr_matrix = df[["Diabetes", "Obesity", "Overdose", "Pct_O"]].corr()
print(corr_matrix)
#Assign variables for regression
X = df[['Diabetes','Obesity','Overdose','Pct_O']]
y = df['Total_Adj'].values.reshape(-1,1)
print(X.shape, y.shape)
#Use train_test_split to create training and testing data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
#Create model
model = LinearRegression()
#fit the model to the training data and calculate the scores for the training and testing data
#Begin
model.fit(X_train, y_train)
training_score = model.score(X_train, y_train)
testing_score = model.score(X_test, y_test)
#End
print(f"Training Score: {training_score}")
print(f"Testing Score: {testing_score}")
#Plot Residuals for Training and Testing Data
#Begin
plt.scatter(model.predict(X_train), model.predict(X_train) - y_train, c="green", label="Training Data")
plt.scatter(model.predict(X_test), model.predict(X_test) - y_test, c="red", label="Testing Data")
plt.legend()
plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Residual Plot")
#End
#R2 of original model
r_sq = model.score(X, y)
print('coefficient of determination', r_sq)
print(model.intercept_)
print(model.coef_)
#Make a Prediction using model
predictedTotal_Adj = model.predict([[10.77,32,19.9,49]])
print(predictedTotal_Adj)
print(X_train.shape)
print(y_train.shape)
print(y_test.shape)
print(X_test.shape)
modeldetail = sm.OLS(y, X)
resultstest = modeldetail.fit()
print(X.shape)
print(y.shape)
plt.scatter(X[('Diabetes')], y, label='Diabetes')
plt.scatter(X[('Obesity')], y, label='Obesity')
plt.scatter(X[('Overdose')], y, label='Overdose')
plt.scatter(X[('Pct_O')], y, label='% O-type')
plt.title('Full Model')
plt.xlabel('Variables through Time (in %, except Overdose in per 100,000)')
plt.ylabel('Kidney Waitlist through Time')
plt.legend(title='variables',loc='lower center')
plt.savefig('images/US_full.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
plt.show()
fig = plt.figure()
plt.scatter(X_train[('Diabetes')], y_train)
plt.scatter(X_train[('Obesity')], y_train)
plt.scatter(X_train[('Overdose')], y_train)
plt.scatter(X_train[('Pct_O')], y_train)
plt.title('Training Model')
fig.set_size_inches(7,5)
plt.show()
plt.scatter(X_test[('Diabetes')], y_test)
plt.scatter(X_test[('Obesity')], y_test)
plt.scatter(X_test[('Overdose')], y_test)
plt.scatter(X_test[('Pct_O')], y_test)
plt.title('Test Model')
plt.show()
plt.scatter(X_train[('Diabetes')], y_train)
plt.scatter(X_train[('Obesity')], y_train)
plt.scatter(X_train[('Overdose')], y_train)
plt.scatter(X_train[('Pct_O')], y_train)
plt.scatter(X_test[('Diabetes')], y_test)
plt.scatter(X_test[('Obesity')], y_test)
plt.scatter(X_test[('Overdose')], y_test)
plt.scatter(X_test[('Pct_O')], y_test)
plt.title('Test Model')
plt.show()
model_lin = sm.OLS.from_formula("Total_Adj ~ Diabetes + Obesity + Overdose + Pct_O", data=df)
result_lin = model_lin.fit()
result_lin.summary()
df
sns.regplot(X[('Diabetes')],y)
plt.title('Diabetes % vs. Waitlist')
plt.xlabel('Diabetes %')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_diabetes.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Obesity')],y)
plt.title('Obesity % vs. Waitlist')
plt.xlabel('Obesity %')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_obesity.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Overdose')],y)
plt.xlabel('Overdose rate per 100k')
plt.title('Overdoses rate per 100k vs. Waitlist')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_overdose.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Pct_O')],y)
plt.xlabel('Percent O Blood-Type')
plt.title('Percent O Blood-Type vs. Waitlist')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_pct_O.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Diabetes')],y)
sns.regplot(X[('Obesity')],y)
sns.regplot(X[('Overdose')],y)
sns.regplot(X[('Pct_O')],y)
coefficients = pd.concat([pd.DataFrame(X.columns),pd.DataFrame(np.transpose(model.coef_))], axis = 1)
coefficients_transposed = coefficients.T
coefficients_transposed
coefficients_final = coefficients_transposed[1:]
coefficients_final
test_df = pd.DataFrame([[r_sq,training_score,testing_score,model.intercept_ ,predictedTotal_Adj]])
test_df
#dfi.export(test_df, 'test_df.png')
summary_df = pd.concat([test_df, lastyear_df, coefficients_final], axis=1)
summary_df.columns = ['R2','Training Score', 'Test Score', 'Intercept','Forecast based on Natl Avg','Last Waitlist','Coef_Diabetes','Coef_Obesity','Coef_Overdose','Coef_Pct_O']
summary_df
summary_df.dtypes
summary_df
summary_df['Intercept'] = summary_df['Intercept'].astype(int)
summary_df['Forecast based on Natl Avg'] = summary_df['Forecast based on Natl Avg'].astype(int)
summary_df['R2'] = summary_df['R2'].round(decimals=4)
summary_df['Training Score'] = summary_df['Training Score'].round(decimals=4)
summary_df['Test Score'] = summary_df['Test Score'].round(decimals=4)
summary_df = summary_df[['R2','Training Score','Test Score','Intercept','Coef_Diabetes','Coef_Obesity','Coef_Overdose','Coef_Pct_O','Last Waitlist','Forecast based on Natl Avg']]
summary_df
summaryalt_df = summary_df.style.set_properties(**{'background-color': 'black','color': 'lawngreen','border-color': 'white'})
summaryalt_df
```
|
github_jupyter
|
#Import Dependencies
%matplotlib inline
import pandas as pd
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
import dataframe_image as dfi
from scipy import stats
import seaborn as sns
# Load dataset
df = pd.read_csv('timeseries_testv4alt.csv')
df.head()
df_output = df.tail(1)
lastyear_df = df_output.drop(columns=['Year', 'Waitlist','Diabetes','Obesity','Overdose','Pct_O'])
lastyear_df = lastyear_df.reset_index(drop=True)
lastyear_df
df.plot(kind='box', subplots=True, layout=(6,6), sharex=False, sharey=False)
plt.show()
# scatter plot matrix
pd.plotting.scatter_matrix(df)
plt.show()
#Describe data
df.describe()
#Correlation Matrix
corr_matrix = df[["Diabetes", "Obesity", "Overdose", "Pct_O"]].corr()
print(corr_matrix)
#Assign variables for regression
X = df[['Diabetes','Obesity','Overdose','Pct_O']]
y = df['Total_Adj'].values.reshape(-1,1)
print(X.shape, y.shape)
#Use train_test_split to create training and testing data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
#Create model
model = LinearRegression()
#fit the model to the training data and calculate the scores for the training and testing data
#Begin
model.fit(X_train, y_train)
training_score = model.score(X_train, y_train)
testing_score = model.score(X_test, y_test)
#End
print(f"Training Score: {training_score}")
print(f"Testing Score: {testing_score}")
#Plot Residuals for Training and Testing Data
#Begin
plt.scatter(model.predict(X_train), model.predict(X_train) - y_train, c="green", label="Training Data")
plt.scatter(model.predict(X_test), model.predict(X_test) - y_test, c="red", label="Testing Data")
plt.legend()
plt.hlines(y=0, xmin=y.min(), xmax=y.max())
plt.title("Residual Plot")
#End
#R2 of original model
r_sq = model.score(X, y)
print('coefficient of determination', r_sq)
print(model.intercept_)
print(model.coef_)
#Make a Prediction using model
predictedTotal_Adj = model.predict([[10.77,32,19.9,49]])
print(predictedTotal_Adj)
print(X_train.shape)
print(y_train.shape)
print(y_test.shape)
print(X_test.shape)
modeldetail = sm.OLS(y, X)
resultstest = modeldetail.fit()
print(X.shape)
print(y.shape)
plt.scatter(X[('Diabetes')], y, label='Diabetes')
plt.scatter(X[('Obesity')], y, label='Obesity')
plt.scatter(X[('Overdose')], y, label='Overdose')
plt.scatter(X[('Pct_O')], y, label='% O-type')
plt.title('Full Model')
plt.xlabel('Variables through Time (in %, except Overdose in per 100,000)')
plt.ylabel('Kidney Waitlist through Time')
plt.legend(title='variables',loc='lower center')
plt.savefig('images/US_full.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
plt.show()
fig = plt.figure()
plt.scatter(X_train[('Diabetes')], y_train)
plt.scatter(X_train[('Obesity')], y_train)
plt.scatter(X_train[('Overdose')], y_train)
plt.scatter(X_train[('Pct_O')], y_train)
plt.title('Training Model')
fig.set_size_inches(7,5)
plt.show()
plt.scatter(X_test[('Diabetes')], y_test)
plt.scatter(X_test[('Obesity')], y_test)
plt.scatter(X_test[('Overdose')], y_test)
plt.scatter(X_test[('Pct_O')], y_test)
plt.title('Test Model')
plt.show()
plt.scatter(X_train[('Diabetes')], y_train)
plt.scatter(X_train[('Obesity')], y_train)
plt.scatter(X_train[('Overdose')], y_train)
plt.scatter(X_train[('Pct_O')], y_train)
plt.scatter(X_test[('Diabetes')], y_test)
plt.scatter(X_test[('Obesity')], y_test)
plt.scatter(X_test[('Overdose')], y_test)
plt.scatter(X_test[('Pct_O')], y_test)
plt.title('Test Model')
plt.show()
model_lin = sm.OLS.from_formula("Total_Adj ~ Diabetes + Obesity + Overdose + Pct_O", data=df)
result_lin = model_lin.fit()
result_lin.summary()
df
sns.regplot(X[('Diabetes')],y)
plt.title('Diabetes % vs. Waitlist')
plt.xlabel('Diabetes %')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_diabetes.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Obesity')],y)
plt.title('Obesity % vs. Waitlist')
plt.xlabel('Obesity %')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_obesity.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Overdose')],y)
plt.xlabel('Overdose rate per 100k')
plt.title('Overdoses rate per 100k vs. Waitlist')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_overdose.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Pct_O')],y)
plt.xlabel('Percent O Blood-Type')
plt.title('Percent O Blood-Type vs. Waitlist')
plt.ylabel('Kidney Waitlist through Time')
plt.savefig('images/US_pct_O.png', dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', format=None,
transparent=False, bbox_inches=None, pad_inches=0.2, metadata=None)
sns.regplot(X[('Diabetes')],y)
sns.regplot(X[('Obesity')],y)
sns.regplot(X[('Overdose')],y)
sns.regplot(X[('Pct_O')],y)
coefficients = pd.concat([pd.DataFrame(X.columns),pd.DataFrame(np.transpose(model.coef_))], axis = 1)
coefficients_transposed = coefficients.T
coefficients_transposed
coefficients_final = coefficients_transposed[1:]
coefficients_final
test_df = pd.DataFrame([[r_sq,training_score,testing_score,model.intercept_ ,predictedTotal_Adj]])
test_df
#dfi.export(test_df, 'test_df.png')
summary_df = pd.concat([test_df, lastyear_df, coefficients_final], axis=1)
summary_df.columns = ['R2','Training Score', 'Test Score', 'Intercept','Forecast based on Natl Avg','Last Waitlist','Coef_Diabetes','Coef_Obesity','Coef_Overdose','Coef_Pct_O']
summary_df
summary_df.dtypes
summary_df
summary_df['Intercept'] = summary_df['Intercept'].astype(int)
summary_df['Forecast based on Natl Avg'] = summary_df['Forecast based on Natl Avg'].astype(int)
summary_df['R2'] = summary_df['R2'].round(decimals=4)
summary_df['Training Score'] = summary_df['Training Score'].round(decimals=4)
summary_df['Test Score'] = summary_df['Test Score'].round(decimals=4)
summary_df = summary_df[['R2','Training Score','Test Score','Intercept','Coef_Diabetes','Coef_Obesity','Coef_Overdose','Coef_Pct_O','Last Waitlist','Forecast based on Natl Avg']]
summary_df
summaryalt_df = summary_df.style.set_properties(**{'background-color': 'black','color': 'lawngreen','border-color': 'white'})
summaryalt_df
| 0.722233 | 0.707155 |
# Basic finmag tutorial
## Setting up a basic simulation
Depending on the functionalities required, different finmag modules should be imported. For the beginning, we import the following:
```
import dolfin as df # enables us to create basic meshes
from finmag import Simulation as Sim
from finmag.energies import Exchange, DMI, Zeeman, Demag, UniaxialAnisotropy
```
Firstly, we create a three-dimensional mesh with dimensions $l_{x} = 50$, $l_{y} = 50$, and $l_{z} = 10$. So, we need to provide two points as well as the mesh discretisation in all three directions ($n_{x}$, $n_{y}$, $n_{z}$).
```
lx = 50 # (m)
ly = 50 # (m)
lz = 10 # (m)
nx = 25 # number of vertices in x-direction
ny = 25
nz = 5
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(lx, ly, lz), nx, ny, nz)
```
In the next step, we need to define some basic material parameters:
```
Ms = 1e6 # magnetisation saturation (A/m)
A = 1e-12 # exchange energy constant (J/m)
D = 1e-3 # DMI constant (J/m**2)
K = 1e5 # uniaxial anisotropy constant (J/m**3)
Kaxis = (0, 0, 1) # uniaxial anisotropy axis
H = (0, 0, 1e7) # external magnetic field (A/m)
```
Now, we can create a simulation object. The first argument is the mesh, the second one is the magnetisation saturation, and the third one is the unit length. Unit length should be specified because the mesh is initially created in meteres.
```
sim = Sim(mesh, Ms, unit_length=1e-9)
```
When the simulation object is creates, different interactions can be added to it:
```
sim.add(Exchange(A)) # add exchange interaction
sim.add(DMI(D)) # add DMI interaction
# sim.add(DMI(D, dmi_type='interfacial')) # interfacial DMI interaction
sim.add(Zeeman(H)) # add Zeeman energy
sim.add(UniaxialAnisotropy(K, Kaxis)) # add uniaxial anisotropy
sim.add(Demag()) # add demagnetisation (magneostatic) energy
```
So, at this point the Hamiltonian is created. Now, we can set parameters in the LLG equation. Precession and damping terms are enabled by default and other terms can be added separately.
```
sim.alpha = 0.5 # set the Gilbert damping
```
When both Hamiltonian and LLG equations are set, we need to set the intial magnetisation before we relax the system:
```
sim.set_m((1, 0, 0))
```
The system is relaxed:
```
sim.relax()
```
The magnetisation data can be either:
- shown as the numpy array
- saved to the h5 file
- saved as a pvd file so that it can be visualised in paraview or mayavi
```
print sim.llg.m_field.vector().array() # show as a numpy array
# Save the magnetisation in the HDF5 file
h5_filename = 'example1'
sim.llg.m_field.save_hdf5(h5_filename, t=0)
```
A more detailed tutorial on saving and reading HDF5 files is provided separately.
```
# Save the VTK file for visualisation using Paraview or Mayavi
pvd_filename = 'relaxed_state.pvd'
sim.save_vtk(pvd_filename)
```
## Multiple materials (spatially varying parameters)
If multiple materials are present in the system, spatially varying parameters should be provided. The simplest way of doing that is to define a Python function for these parameters. For instance, in the case of DMI:
```
def D_fun(pos):
x, y, z = pos[0], pos[1], pos[2]
if x < lx/2.:
return -D
else:
return D
```
Now, this function can be added to the simulation, instead of D. For instance: sim.add(DMI(D_fun)). The same procedure applies for all parameters introduced so far.
## Extracting magnetisation data
The magnetisation at a single point can be sampled as:
```
sampling_point = (20, 15, 5) # coordinates of a sampling point
sampled_magnetisation = sim.llg.m_field.probe(sampling_point)
print sampled_magnetisation
```
|
github_jupyter
|
import dolfin as df # enables us to create basic meshes
from finmag import Simulation as Sim
from finmag.energies import Exchange, DMI, Zeeman, Demag, UniaxialAnisotropy
lx = 50 # (m)
ly = 50 # (m)
lz = 10 # (m)
nx = 25 # number of vertices in x-direction
ny = 25
nz = 5
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(lx, ly, lz), nx, ny, nz)
Ms = 1e6 # magnetisation saturation (A/m)
A = 1e-12 # exchange energy constant (J/m)
D = 1e-3 # DMI constant (J/m**2)
K = 1e5 # uniaxial anisotropy constant (J/m**3)
Kaxis = (0, 0, 1) # uniaxial anisotropy axis
H = (0, 0, 1e7) # external magnetic field (A/m)
sim = Sim(mesh, Ms, unit_length=1e-9)
sim.add(Exchange(A)) # add exchange interaction
sim.add(DMI(D)) # add DMI interaction
# sim.add(DMI(D, dmi_type='interfacial')) # interfacial DMI interaction
sim.add(Zeeman(H)) # add Zeeman energy
sim.add(UniaxialAnisotropy(K, Kaxis)) # add uniaxial anisotropy
sim.add(Demag()) # add demagnetisation (magneostatic) energy
sim.alpha = 0.5 # set the Gilbert damping
sim.set_m((1, 0, 0))
sim.relax()
print sim.llg.m_field.vector().array() # show as a numpy array
# Save the magnetisation in the HDF5 file
h5_filename = 'example1'
sim.llg.m_field.save_hdf5(h5_filename, t=0)
# Save the VTK file for visualisation using Paraview or Mayavi
pvd_filename = 'relaxed_state.pvd'
sim.save_vtk(pvd_filename)
def D_fun(pos):
x, y, z = pos[0], pos[1], pos[2]
if x < lx/2.:
return -D
else:
return D
sampling_point = (20, 15, 5) # coordinates of a sampling point
sampled_magnetisation = sim.llg.m_field.probe(sampling_point)
print sampled_magnetisation
| 0.47098 | 0.984767 |
### Figure generation notebook for SI Fig 1
```
import os
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
mpl.rcParams.update(mpl.rcParamsDefault) #Reset rcParams to default
# Plotting style function (run this before plotting the final figure)
def set_plotting_style():
plt.style.use('seaborn-paper')
plt.rc('axes', labelsize=12)
plt.rc('axes', titlesize=12)
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
plt.rc('legend', fontsize=10)
plt.rc('text.latex', preamble=r'\usepackage{sfmath}')
plt.rc('xtick.major', pad=2)
plt.rc('ytick.major', pad=2)
plt.rc('mathtext', fontset='stixsans', sf='sansserif')
plt.rc('figure', figsize=[10,9])
plt.rc('svg', fonttype='none')
# To do:
# Load data
dataStatusFile = '../../../data/RIN_scores.csv'
dataPath = '/../.'
# Load DataStatus spreadsheet
dataStatus = pd.read_csv(dataStatusFile, index_col=0)
# Subset by the data that have RIN score
dataStatus = dataStatus[dataStatus['RIN score'] > 0]
# Get the experiment names
experimentName = dataStatus['Name']
# Re-index DataStatus by name
dataStatus = dataStatus.set_index('Name')
# Load the experiment files
detectedTranscripts = []
for i in tqdm(range(len(experimentName))):
detectedTranscripts.append(pd.read_csv(os.path.join(dataPath, experimentName[i], 'ExportBarcodes',
'region_0','barcodes.csv')))
print('Data loaded.')
# Calculate transcript density
dataStatus['transcript density'] = np.nan
for i, name in enumerate(experimentName):
dataStatus.loc[name, 'transcript density'] = detectedTranscripts[i].shape[0] / detectedTranscripts[i]['fov'].max()
print('Transcript densities calculated.')
# Rename tissues
dataStatus = dataStatus.replace({'mouse liver':'liver', 'mouse pancreas':'pancreas',
'mouse kidney':'kidney'})
print('Tissues renamed.')
dataStatus.head()
set_plotting_style()
# Plot the whole figure
# Get the reduced dataframe to plot
df_plot = dataStatus[['RIN score','transcript density','Tissue']].copy()
# Define tissue colors
df_plot['tissue_color'] = df_plot['Tissue'].map({'liver':'tab:blue',
'kidney':'tab:orange',
'pancreas':'tab:green'})
# Plot the figure
fig, axes = plt.subplots(figsize=(4,4))
ax = axes
df_plot.plot.scatter(x='RIN score', y='transcript density', c='tissue_color',ax=ax)
ax.set_yscale('log')
# Add color legend
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], marker='o', color='w', label='liver',
markerfacecolor='tab:blue', markersize=5),
Line2D([0], [0], marker='o', color='w', label='kidney',
markerfacecolor='tab:orange', markersize=5),
Line2D([0], [0], marker='o', color='w', label='pancreas',
markerfacecolor='tab:green', markersize=5)]
ax.legend(handles=legend_elements)
fig.tight_layout()
plt.show()
# Export figures
fig.savefig('../../figures/SIFig_RINAnalysis.pdf')
fig.savefig('../../figures/SIFig_RINAnalysiss.png')
```
|
github_jupyter
|
import os
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
mpl.rcParams.update(mpl.rcParamsDefault) #Reset rcParams to default
# Plotting style function (run this before plotting the final figure)
def set_plotting_style():
plt.style.use('seaborn-paper')
plt.rc('axes', labelsize=12)
plt.rc('axes', titlesize=12)
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
plt.rc('legend', fontsize=10)
plt.rc('text.latex', preamble=r'\usepackage{sfmath}')
plt.rc('xtick.major', pad=2)
plt.rc('ytick.major', pad=2)
plt.rc('mathtext', fontset='stixsans', sf='sansserif')
plt.rc('figure', figsize=[10,9])
plt.rc('svg', fonttype='none')
# To do:
# Load data
dataStatusFile = '../../../data/RIN_scores.csv'
dataPath = '/../.'
# Load DataStatus spreadsheet
dataStatus = pd.read_csv(dataStatusFile, index_col=0)
# Subset by the data that have RIN score
dataStatus = dataStatus[dataStatus['RIN score'] > 0]
# Get the experiment names
experimentName = dataStatus['Name']
# Re-index DataStatus by name
dataStatus = dataStatus.set_index('Name')
# Load the experiment files
detectedTranscripts = []
for i in tqdm(range(len(experimentName))):
detectedTranscripts.append(pd.read_csv(os.path.join(dataPath, experimentName[i], 'ExportBarcodes',
'region_0','barcodes.csv')))
print('Data loaded.')
# Calculate transcript density
dataStatus['transcript density'] = np.nan
for i, name in enumerate(experimentName):
dataStatus.loc[name, 'transcript density'] = detectedTranscripts[i].shape[0] / detectedTranscripts[i]['fov'].max()
print('Transcript densities calculated.')
# Rename tissues
dataStatus = dataStatus.replace({'mouse liver':'liver', 'mouse pancreas':'pancreas',
'mouse kidney':'kidney'})
print('Tissues renamed.')
dataStatus.head()
set_plotting_style()
# Plot the whole figure
# Get the reduced dataframe to plot
df_plot = dataStatus[['RIN score','transcript density','Tissue']].copy()
# Define tissue colors
df_plot['tissue_color'] = df_plot['Tissue'].map({'liver':'tab:blue',
'kidney':'tab:orange',
'pancreas':'tab:green'})
# Plot the figure
fig, axes = plt.subplots(figsize=(4,4))
ax = axes
df_plot.plot.scatter(x='RIN score', y='transcript density', c='tissue_color',ax=ax)
ax.set_yscale('log')
# Add color legend
from matplotlib.lines import Line2D
legend_elements = [Line2D([0], [0], marker='o', color='w', label='liver',
markerfacecolor='tab:blue', markersize=5),
Line2D([0], [0], marker='o', color='w', label='kidney',
markerfacecolor='tab:orange', markersize=5),
Line2D([0], [0], marker='o', color='w', label='pancreas',
markerfacecolor='tab:green', markersize=5)]
ax.legend(handles=legend_elements)
fig.tight_layout()
plt.show()
# Export figures
fig.savefig('../../figures/SIFig_RINAnalysis.pdf')
fig.savefig('../../figures/SIFig_RINAnalysiss.png')
| 0.734024 | 0.759292 |
# Support Vector Machines on Iris flower data set
The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by Sir Ronald Fisher in the 1936 as an example of discriminant analysis.
The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor), so 150 total samples. Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.
```
# The Iris Setosa
from IPython.display import Image
url = 'http://upload.wikimedia.org/wikipedia/commons/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg'
Image(url, width=300, height=300)
```
The iris dataset contains measurements for 150 iris flowers from three different species.
The three classes in the Iris dataset:
Iris-setosa (n=50)
Iris-versicolor (n=50)
Iris-virginica (n=50)
The four features of the Iris dataset:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
## Loading the data
```
# load libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# load data
iris = sns.load_dataset('iris')
```
## Exploratory Data Analysis
```
iris.sample(7)
# sns pairplot of the data set
sns.pairplot(iris, hue='species')
```
**Creating a kde plot of sepal_length versus sepal width for setosa species of flower.**
```
setosa = iris[iris['species']=='setosa']
sns.jointplot(setosa['sepal_width'], setosa['sepal_length'],
kind='kde', cmap="plasma", shade_lowest=False)
```
### Train Test Split
** Splitting data into a training set and a testing set.**
```
from sklearn.model_selection import train_test_split
X = iris.drop('species', axis=1)
y = iris['species']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.28)
```
### Train Support Vector Machine Classifier
```
from sklearn.svm import SVC
svc = SVC()
svc
svc.fit(X_train,y_train)
```
### Model Evaluation
```
predictions = svc.predict(X_test)
len(predictions)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
```
### Using Gridsearch to tune parameters
```
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.1, 1, 10, 100],
'gamma': [1, 0.1, 0.01, 0.001],
'kernel': ['rbf', 'linear', 'poly']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
grid.best_estimator_
grid.best_params_
grid.best_score_
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test,grid_predictions))
```
|
github_jupyter
|
# The Iris Setosa
from IPython.display import Image
url = 'http://upload.wikimedia.org/wikipedia/commons/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg'
Image(url, width=300, height=300)
# load libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# load data
iris = sns.load_dataset('iris')
iris.sample(7)
# sns pairplot of the data set
sns.pairplot(iris, hue='species')
setosa = iris[iris['species']=='setosa']
sns.jointplot(setosa['sepal_width'], setosa['sepal_length'],
kind='kde', cmap="plasma", shade_lowest=False)
from sklearn.model_selection import train_test_split
X = iris.drop('species', axis=1)
y = iris['species']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.28)
from sklearn.svm import SVC
svc = SVC()
svc
svc.fit(X_train,y_train)
predictions = svc.predict(X_test)
len(predictions)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test, predictions))
print(classification_report(y_test, predictions))
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.1, 1, 10, 100],
'gamma': [1, 0.1, 0.01, 0.001],
'kernel': ['rbf', 'linear', 'poly']}
grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2)
grid.fit(X_train, y_train)
grid.best_estimator_
grid.best_params_
grid.best_score_
grid_predictions = grid.predict(X_test)
print(confusion_matrix(y_test, grid_predictions))
print(classification_report(y_test,grid_predictions))
| 0.543106 | 0.973215 |
```
from keras.models import Sequential
import numpy as np
import tensorflow as tf
from data_utils import load_CIFAR10
import keras
from sklearn import preprocessing
from keras import regularizers
trainSet_size = 5000
crossval_size = 2000 # Last XXX
train_size = trainSet_size - crossval_size
num_classes = 10
#Load data
cifar10_dir = 'Dataset/cifar-10-batches-py'
x_train, y_train, x_test, y_test = load_CIFAR10(cifar10_dir)
#Pick Cross-Validation Data
x_train = x_train[0: trainSet_size]
x_crossval = x_train[train_size: len(x_train)]
x_train = x_train[0: train_size]
y_train = y_train[0: trainSet_size]
y_crossval = y_train[train_size: len(y_train)]
y_train = y_train[0: train_size]
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
y_crossval = keras.utils.to_categorical(y_crossval, num_classes)
# Reshape the image data into rows
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
print x_train.shape, x_test.shape
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_crossval = x_crossval.astype('float32')
#Preprocesssing
x_train = preprocessing.scale(x_train.transpose()).transpose()
x_test = preprocessing.scale(x_test.transpose()).transpose()
model = Sequential()
from keras.layers import Dense, Activation
model.add(Dense(units=35, input_shape=(3072,) , kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=30, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=25, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=20, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=10))
model.add(Activation('softmax'))
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(x_train[0:train_size],
y_train[0:train_size],
validation_data=(x_test, y_test),
epochs=5,
batch_size=16)
loss_and_metrics = model.evaluate(X_test, y_test, batch_size=128)
classes = model.predict(X_crossval, batch_size=128)
count = 0
for i in range(len(y_test)):
result = np.argmax(classes[i]) == np.argmax(y_crossval[i])
if result :
count+=1
print str(count) + " / " + str(len(y_test))
```
|
github_jupyter
|
from keras.models import Sequential
import numpy as np
import tensorflow as tf
from data_utils import load_CIFAR10
import keras
from sklearn import preprocessing
from keras import regularizers
trainSet_size = 5000
crossval_size = 2000 # Last XXX
train_size = trainSet_size - crossval_size
num_classes = 10
#Load data
cifar10_dir = 'Dataset/cifar-10-batches-py'
x_train, y_train, x_test, y_test = load_CIFAR10(cifar10_dir)
#Pick Cross-Validation Data
x_train = x_train[0: trainSet_size]
x_crossval = x_train[train_size: len(x_train)]
x_train = x_train[0: train_size]
y_train = y_train[0: trainSet_size]
y_crossval = y_train[train_size: len(y_train)]
y_train = y_train[0: train_size]
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
y_crossval = keras.utils.to_categorical(y_crossval, num_classes)
# Reshape the image data into rows
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
print x_train.shape, x_test.shape
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_crossval = x_crossval.astype('float32')
#Preprocesssing
x_train = preprocessing.scale(x_train.transpose()).transpose()
x_test = preprocessing.scale(x_test.transpose()).transpose()
model = Sequential()
from keras.layers import Dense, Activation
model.add(Dense(units=35, input_shape=(3072,) , kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=30, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=25, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=20, kernel_regularizer=regularizers.l2(0.1)))
model.add(Activation('relu'))
model.add(Dense(units=10))
model.add(Activation('softmax'))
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True))
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(x_train[0:train_size],
y_train[0:train_size],
validation_data=(x_test, y_test),
epochs=5,
batch_size=16)
loss_and_metrics = model.evaluate(X_test, y_test, batch_size=128)
classes = model.predict(X_crossval, batch_size=128)
count = 0
for i in range(len(y_test)):
result = np.argmax(classes[i]) == np.argmax(y_crossval[i])
if result :
count+=1
print str(count) + " / " + str(len(y_test))
| 0.654784 | 0.583797 |
```
myList=[1,2,3,4,5]
myTuple=(1,2,3,4,5)
type(myList)
type(myTuple)
myList=tuple(myList)
type(myList)
myTuple=list(myList)
type(myTuple)
10
#Control blocks
for i in range(1,20,4):
print("valueof i {} and i^2 {}" .format(i,i**2))
userInt= int(input("please enter a positive integer: "))
if (userInt>99):
print("it is 3 dig +ve int")
elif(userInt>9):
print("it is 2 dig +ve int")
elif(userInt>-1):
print("it is 1 dig +ve int")
else:
print("Fooling the system")
#writing a function
import numpy as np
def areaOfCircle(r):
return(np.pi*(r**2))
areaOfCircle(2)
#understanding sigmoid function
def sigmoid(x):
return(1/(1+np.exp(-x)))
sigmoid(10)
sigmoid(-10)
xvalues=np.linspace(-15,15,100)
xvalues
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(xvalues,sigmoid(xvalues),color='blue')
def derivative (f,x,epsilon=1e-14):
return ((f(x+epsilon)-f(x-epsilon))/(2*epsilon))
def TstFunc(x):
return ((5*x)+2)
TstFunc(7)
derivative(TstFunc,int(7))
derivative(TstFunc,int(78))
arr =np.zeros(10)
arr
arr.dtype
def Xsquare(x):
return(x*x)
derivative(Xsquare,5)
```
# how to select xvalues
```
plt.plot(xvalues,sigmoid(xvalues),color='red')
plt.plot(xvalues,derivative(sigmoid,xvalues),color='blue')
#random distribution
randomD=np.random.normal(65,10,10000)
randomD
randomD.mean()
meanManually=np.sum(randomD)/len(randomD)
meanManually
randomD.std()
stdManually= np.sqrt((sum(np.square(randomD-(np.sum(randomD)/len(randomD)))))/(len(randomD)))
stdManually
import seaborn as sns
sns.distplot(randomD)
```
# Taxi fare prediction using linear model
# ClassifierByExample
```
from sklearn import datasets
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
dataset= datasets.load_iris()
X= dataset.data[:,[0,1,2,3]]
y=dataset.target
X
X.shape
y
dataset.target_names
#initialise input features
dataframe= pd.DataFrame(X)
dataframe['target']=y
dataframe.shape
dataframe.head()
corrmap=dataframe.corr()
sns.heatmap(corrmap,annot =True,cmap='coolwarm')
X=dataframe.iloc[:,[2,3]].values
X[95:105]
X
y
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=0.3,random_state=1,stratify=y)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
sc.fit(X_train)
X_train_std=sc.transform(X_train)
X_test_std=sc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(max_iter=1000,eta0=0.1,random_state=1)
ppn.fit(X_train_std, Y_train)
Y_pred=ppn.predict(X_test_std)
print("y true label are: ", Y_test)
print("Predictions are: ",Y_pred)
print("No of misclassified sample: ", np.sum(Y_test!=y_pred))
from sklearn.metrics import accuracy_score
print("accuracy_score is", accuracy_score(Y_test,y_pred))
```
|
github_jupyter
|
myList=[1,2,3,4,5]
myTuple=(1,2,3,4,5)
type(myList)
type(myTuple)
myList=tuple(myList)
type(myList)
myTuple=list(myList)
type(myTuple)
10
#Control blocks
for i in range(1,20,4):
print("valueof i {} and i^2 {}" .format(i,i**2))
userInt= int(input("please enter a positive integer: "))
if (userInt>99):
print("it is 3 dig +ve int")
elif(userInt>9):
print("it is 2 dig +ve int")
elif(userInt>-1):
print("it is 1 dig +ve int")
else:
print("Fooling the system")
#writing a function
import numpy as np
def areaOfCircle(r):
return(np.pi*(r**2))
areaOfCircle(2)
#understanding sigmoid function
def sigmoid(x):
return(1/(1+np.exp(-x)))
sigmoid(10)
sigmoid(-10)
xvalues=np.linspace(-15,15,100)
xvalues
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(xvalues,sigmoid(xvalues),color='blue')
def derivative (f,x,epsilon=1e-14):
return ((f(x+epsilon)-f(x-epsilon))/(2*epsilon))
def TstFunc(x):
return ((5*x)+2)
TstFunc(7)
derivative(TstFunc,int(7))
derivative(TstFunc,int(78))
arr =np.zeros(10)
arr
arr.dtype
def Xsquare(x):
return(x*x)
derivative(Xsquare,5)
plt.plot(xvalues,sigmoid(xvalues),color='red')
plt.plot(xvalues,derivative(sigmoid,xvalues),color='blue')
#random distribution
randomD=np.random.normal(65,10,10000)
randomD
randomD.mean()
meanManually=np.sum(randomD)/len(randomD)
meanManually
randomD.std()
stdManually= np.sqrt((sum(np.square(randomD-(np.sum(randomD)/len(randomD)))))/(len(randomD)))
stdManually
import seaborn as sns
sns.distplot(randomD)
from sklearn import datasets
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
dataset= datasets.load_iris()
X= dataset.data[:,[0,1,2,3]]
y=dataset.target
X
X.shape
y
dataset.target_names
#initialise input features
dataframe= pd.DataFrame(X)
dataframe['target']=y
dataframe.shape
dataframe.head()
corrmap=dataframe.corr()
sns.heatmap(corrmap,annot =True,cmap='coolwarm')
X=dataframe.iloc[:,[2,3]].values
X[95:105]
X
y
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=0.3,random_state=1,stratify=y)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
sc.fit(X_train)
X_train_std=sc.transform(X_train)
X_test_std=sc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(max_iter=1000,eta0=0.1,random_state=1)
ppn.fit(X_train_std, Y_train)
Y_pred=ppn.predict(X_test_std)
print("y true label are: ", Y_test)
print("Predictions are: ",Y_pred)
print("No of misclassified sample: ", np.sum(Y_test!=y_pred))
from sklearn.metrics import accuracy_score
print("accuracy_score is", accuracy_score(Y_test,y_pred))
| 0.414543 | 0.718854 |
# xESMF backend usage and benchmark
xESMF isn't just a wrapper of ESMPy. It only uses ESMPy to generate regridding weights, but has its own Scipy-based method for applying weights (see [more about regridding weights](./Reuse_regridder.ipynb#Why-applying-regridding-is-so-fast?)).
We switch to the Scipy method because its serial performance is much higher than ESMPy's own engine and can also reuse weights ([issue#2](https://github.com/JiaweiZhuang/xESMF/issues/2)). ESMPy's native method is available in the backend, mainly for benchmarking Scipy results in unit tests.
Here we show how to use xESMF backend and compare the performance of two methods. Note that the backend is still pretty easy to use compared to the original ESMPy -- it just doesn't have a fancy API and cannot deal with xarray metadata.
```
import os
import numpy as np
import xesmf as xe
# backend functions
from xesmf.backend import (esmf_grid, esmf_regrid_build,
esmf_regrid_apply, esmf_regrid_finalize)
from xesmf.smm import read_weights, apply_weights
```
## Prepare data
We use the same data as in the [reusing regridder example](./Reuse_regridder.ipynb), but convert xarray DataSet to pure numpy arrays to work with the backend.
```
ds_in = xe.util.grid_2d(-120, 120, 0.4, # longitude range and resolution
-60, 60, 0.3) # latitude range and resolution
ds_out = xe.util.grid_2d(-120, 120, 0.6,
-60, 60, 0.4)
ds_in.coords['time'] = np.arange(1, 11)
ds_in.coords['lev'] = np.arange(1, 51)
ds_in['data2D'] = xe.data.wave_smooth(ds_in['lon'], ds_in['lat'])
ds_in['data4D'] = ds_in['time'] * ds_in['lev'] * ds_in['data2D']
# backend only accepts pure numpy array
lon_in = ds_in['lon'].values
lat_in = ds_in['lat'].values
lon_out = ds_out['lon'].values
lat_out = ds_out['lat'].values
data_in = ds_in['data4D'].values
data_in.shape
```
## Make ESMF Grid objects
```
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
```
This is a native ESMPy Grid object:
```
type(grid_in)
```
We pass the transpose (`lon.T`) because ESMPy prefer Fortran-ordering to C-ordering (see this [issue](https://github.com/nawendt/esmpy-tutorial/issues/4)).
```
lon_in.flags # numpy arrays are mostly C-ordered
lon_in.T.flags # a memory view on its tranpose would be Fortran-ordered
```
## Compute weights
```
filename = 'test_weights.nc' # weight filename
if os.path.exists(filename):
os.remove(filename) # ESMPy will crash if the file exists
```
Computing weights takes ~7s, as in the [reusing regridder example](./Reuse_regridder.ipynb#Build-Regridder).
```
%%time
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear',
extra_dims=[50, 10], # reversed to Fortran-ordering
filename=filename)
```
It returns a native ESMPy Regrid object:
```
type(regrid)
```
It also writes weights to disk so we can then read them back for Scipy.
```
%%bash
ncdump -h test_weights.nc
```
## Apply weights using ESMPy backend
It takes ~3s with ESMPy's native method.
```
%%time
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
```
The first `.T` converts C-ordering to F-ordering for ESMPy, and the second `.T` converts the result back to C-ordering. It just gets a memory view and thus incurs almost no overhead.
```
data_out_esmpy.flags
data_out_esmpy.shape # broadcasted over extra dimensions
```
## Apply weights using Scipy backend
Read weights back for Scipy. `read_weights` needs to know the shape of the sparse matrix, i.e. how many points in input and output grids.
```
weights = read_weights(filename, lon_in.size, lon_out.size)
weights
```
`apply_weights` needs to know shape of the output grid.
```
lon_out.shape
%%time
data_out_scipy = apply_weights(weights, data_in, lon_in.shape, lon_out.shape)
```
It is several times faster than ESMPy's native method. The conclusion seems to be pretty robust across different platforms (feel free to verify on your own), so we choose Scipy as the default backend.
A likely explanation for this performance discrepancy is, the original ESMF is optimized for large processor counts (~1000 CPUs) at the expense of serial performance (ESMF team, personal communication).
```
data_out_scipy.shape # broadcasted over extra dimensions
np.testing.assert_equal(data_out_scipy, data_out_esmpy) # exactly the same
os.remove(filename) # clean-up
```
|
github_jupyter
|
import os
import numpy as np
import xesmf as xe
# backend functions
from xesmf.backend import (esmf_grid, esmf_regrid_build,
esmf_regrid_apply, esmf_regrid_finalize)
from xesmf.smm import read_weights, apply_weights
ds_in = xe.util.grid_2d(-120, 120, 0.4, # longitude range and resolution
-60, 60, 0.3) # latitude range and resolution
ds_out = xe.util.grid_2d(-120, 120, 0.6,
-60, 60, 0.4)
ds_in.coords['time'] = np.arange(1, 11)
ds_in.coords['lev'] = np.arange(1, 51)
ds_in['data2D'] = xe.data.wave_smooth(ds_in['lon'], ds_in['lat'])
ds_in['data4D'] = ds_in['time'] * ds_in['lev'] * ds_in['data2D']
# backend only accepts pure numpy array
lon_in = ds_in['lon'].values
lat_in = ds_in['lat'].values
lon_out = ds_out['lon'].values
lat_out = ds_out['lat'].values
data_in = ds_in['data4D'].values
data_in.shape
grid_in = esmf_grid(lon_in.T, lat_in.T)
grid_out = esmf_grid(lon_out.T, lat_out.T)
type(grid_in)
lon_in.flags # numpy arrays are mostly C-ordered
lon_in.T.flags # a memory view on its tranpose would be Fortran-ordered
filename = 'test_weights.nc' # weight filename
if os.path.exists(filename):
os.remove(filename) # ESMPy will crash if the file exists
%%time
regrid = esmf_regrid_build(grid_in, grid_out, 'bilinear',
extra_dims=[50, 10], # reversed to Fortran-ordering
filename=filename)
type(regrid)
%%bash
ncdump -h test_weights.nc
%%time
data_out_esmpy = esmf_regrid_apply(regrid, data_in.T).T
data_out_esmpy.flags
data_out_esmpy.shape # broadcasted over extra dimensions
weights = read_weights(filename, lon_in.size, lon_out.size)
weights
lon_out.shape
%%time
data_out_scipy = apply_weights(weights, data_in, lon_in.shape, lon_out.shape)
data_out_scipy.shape # broadcasted over extra dimensions
np.testing.assert_equal(data_out_scipy, data_out_esmpy) # exactly the same
os.remove(filename) # clean-up
| 0.412767 | 0.930899 |
# Artificial Intelligence Nanodegree
## Convolutional Neural Networks
---
In your upcoming project, you will download pre-computed bottleneck features. In this notebook, we'll show you how to calculate VGG-16 bottleneck features on a toy dataset. Note that unless you have a powerful GPU, computing the bottleneck features takes a significant amount of time.
### 1. Load and Preprocess Sample Images
Before supplying an image to a pre-trained network in Keras, there are some required preprocessing steps. You will learn more about this in the project; for now, we have implemented this functionality for you in the first code cell of the notebook. We have imported a very small dataset of 8 images and stored the preprocessed image input as `img_input`. Note that the dimensionality of this array is `(8, 224, 224, 3)`. In this case, each of the 8 images is a 3D tensor, with shape `(224, 224, 3)`.
```
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
import numpy as np
import glob
img_paths = glob.glob("images/*.jpg")
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in img_paths]
return np.vstack(list_of_tensors)
# calculate the image input. you will learn more about how this works the project!
img_input = preprocess_input(paths_to_tensor(img_paths))
print(img_input.shape)
```
### 2. Recap How to Import VGG-16
Recall how we import the VGG-16 network (including the final classification layer) that has been pre-trained on ImageNet.

```
from keras.applications.vgg16 import VGG16
model = VGG16()
model.summary()
```
For this network, `model.predict` returns a 1000-dimensional probability vector containing the predicted probability that an image returns each of the 1000 ImageNet categories. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 1000)`. The first value of `8` merely denotes that 8 images were passed through the network.
```
model.predict(img_input).shape
```
### 3. Import the VGG-16 Model, with the Final Fully-Connected Layers Removed
When performing transfer learning, we need to remove the final layers of the network, as they are too specific to the ImageNet database. This is accomplished in the code cell below.

```
from keras.applications.vgg16 import VGG16
model = VGG16(include_top=False)
model.summary()
```
### 4. Extract Output of Final Max Pooling Layer
Now, the network stored in `model` is a truncated version of the VGG-16 network, where the final three fully-connected layers have been removed. In this case, `model.predict` returns a 3D array (with dimensions $7\times 7\times 512$) corresponding to the final max pooling layer of VGG-16. The dimensionality of the obtained output from passing `img_input` through the model is `(8, 7, 7, 512)`. The first value of `8` merely denotes that 8 images were passed through the network.
```
print(model.predict(img_input).shape)
```
This is exactly how we calculate the bottleneck features for your project!
|
github_jupyter
|
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
import numpy as np
import glob
img_paths = glob.glob("images/*.jpg")
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in img_paths]
return np.vstack(list_of_tensors)
# calculate the image input. you will learn more about how this works the project!
img_input = preprocess_input(paths_to_tensor(img_paths))
print(img_input.shape)
from keras.applications.vgg16 import VGG16
model = VGG16()
model.summary()
model.predict(img_input).shape
from keras.applications.vgg16 import VGG16
model = VGG16(include_top=False)
model.summary()
print(model.predict(img_input).shape)
| 0.781414 | 0.988536 |
#### Data Fetch
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#extracting lines for simplied verion
open('2-fft-malicious-n-0-4-m-11.txt','w').writelines([ line for line in open("2-fft-malicious-n-0-4-m-11.log") if "Enqueue" in line])
print ("done")
#extracting content from lines
csv_out = open('2-fft-malicious-n-0-4-m-11-csv.txt','w')
with open ('2-fft-malicious-n-0-4-m-11.txt', 'rt') as fft:
csv_out.write("time,router,outport,inport,packet_address,packet_type,flit_id,flit_type,vnet,vc,src_ni,src_router,dst_ni,dst_router,enq_time\n")
for line in fft:
line_split = line.split()
time = line_split[line_split.index("time:") + 1]
router = line_split[line_split.index("SwitchAllocator") + 3]
outport = line_split[line_split.index("outport") + 1]
inport = line_split[line_split.index("inport") + 1]
packet_address = line_split[line_split.index("addr") + 2][1:-1]
packet_type = line_split[line_split.index("addr") + 7]
flit_id = line_split[line_split.index("[flit::") + 1][3:]
flit_type = line_split[line_split.index("Id="+str(flit_id)) + 1][5:]
vnet = line_split[line_split.index("Type="+str(flit_type)) + 1][5:]
vc = line_split[line_split.index("Vnet="+str(vnet)) + 1][3:]
src_ni = line_split[line_split.index("VC="+str(vc)) + 2][3:]
src_router = line_split[line_split.index("NI="+str(src_ni)) + 2][7:]
dst_ni = line_split[line_split.index("Router="+str(src_router)) + 2][3:]
dst_router = line_split[line_split.index("NI="+str(dst_ni)) + 2][7:]
enq_time = str(line_split[line_split.index("Enqueue") + 1][5:])
line_csv = time+","+router+","+outport+","+inport+","+packet_address+","+packet_type+","+flit_id+","+flit_type+","+vnet+","+vc+","+src_ni+","+src_router+","+dst_ni+","+dst_router+","+enq_time+"\n"
csv_out.write(line_csv)
print ("done")
#convert txt to csv
df = pd.read_csv("2-fft-malicious-n-0-4-m-11-csv.txt",delimiter=',')
df.to_csv('2-fft-malicious-n-0-4-m-11.csv',index=False)
#dataset
df = pd.read_csv('2-fft-malicious-n-0-4-m-11.csv')
df.shape
df.describe()
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='blue')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
sns.distplot(df['outport'], kde = False, bins=30, color='green')
sns.distplot(df['packet_type'], kde = False, bins=30, color='red')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
df = df.replace({'inport': direction, 'outport': direction})
data = {'GETS': 1,'GETX': 2,'GUX': 3,'DATA': 4, 'PUTX': 5,'PUTS': 6,'WB_ACK':7}
df = df.replace({'packet_type': data})
df['flit_id'] = df['flit_id']+1
df['flit_type'] = df['flit_type']+1
df['vnet'] = df['vnet']+1
df['vc'] = df['vc']+1
hoparr = {"0to0":0,"0to1":1,"0to2":2,"0to3":3,"0to4":1,"0to5":2,"0to6":3,"0to7":4,"0to8":2,"0to9":3,"0to10":4,"0to11":5,"0to12":3,"0to13":4,"0to14":5,"0to15":6,
"1to1":0,"1to2":1,"1to3":2,"1to4":2,"1to5":1,"1to6":2,"1to7":3,"1to8":3,"1to9":2,"1to10":3,"1to11":4,"1to12":5,"1to13":3,"1to14":4,"1to15":5,
"2to2":0,"2to3":1,"2to4":3,"2to5":2,"2to6":1,"2to7":2,"2to8":4,"2to9":3,"2to10":2,"2to11":3,"2to12":5,"2to13":4,"2to14":3,"2to15":4,
"3to3":0,"3to4":4,"3to5":3,"3to6":2,"3to7":1,"3to8":5,"3to9":4,"3to10":3,"3to11":2,"3to12":6,"3to13":5,"3to14":4,"3to15":3,
"4to4":0,"4to5":1,"4to6":2,"4to7":3,"4to8":1,"4to9":2,"4to10":3,"4to11":4,"4to12":2,"4to13":3,"4to14":4,"4to15":5,
"5to5":0,"5to6":1,"5to7":2,"5to8":2,"5to9":1,"5to10":2,"5to11":3,"5to12":3,"5to13":2,"5to14":3,"5to15":4,
"6to6":0,"6to7":1,"6to8":3,"6to9":2,"6to10":1,"6to11":2,"6to12":4,"6to13":3,"6to14":2,"6to15":3,
"7to7":0,"7to8":4,"7to9":3,"7to10":2,"7to11":1,"7to12":5,"7to13":4,"7to14":3,"7to15":2,
"8to8":0,"8to9":1,"8to10":2,"8to11":3,"8to12":1,"8to13":2,"8to14":3,"8to15":4,
"9to9":0,"9to10":1,"9to11":2,"9to12":2,"9to13":1,"9to14":2,"9to15":4,
"10to10":0,"10to11":1,"10to12":3,"10to13":2,"10to14":1,"10to15":2,
"11to11":0,"11to12":4,"11to13":3,"11to14":2,"11to15":1,
"12to12":0,"12to13":1,"12to14":2,"12to15":3,
"13to13":0,"13to14":1,"13to15":2,
"14to14":0,"14to15":1,
"15to15":0}
packarr = {}
packtime = {}
packchunk = []
hopcurrentarr = []
hoptotarr = []
hoppercentarr =[]
waitingarr = []
interval = 500
count = 0
for index, row in df.iterrows():
current_time = row["time"]
enqueue_time = row["enq_time"]
waiting_time = current_time - enqueue_time
waitingarr.append(waiting_time)
current_router = row["router"]
src_router = row["src_router"]
dst_router = row["dst_router"]
src_router_temp = src_router
if src_router_temp>dst_router:
temph = src_router_temp
src_router_temp = dst_router
dst_router = temph
hop_count_string = str(src_router_temp)+"to"+str(dst_router)
src_router_temp = src_router
hop_count = hoparr.get(hop_count_string)
if src_router_temp>current_router:
tempc = src_router_temp
src_router_temp = current_router
current_router = tempc
current_hop_string = str(src_router_temp)+"to"+str(current_router)
current_hop = hoparr.get(current_hop_string)
if(current_hop == 0 and hop_count ==0):
hop_percent = 0
else:
hop_percent = current_hop/hop_count
hoptotarr.append(hop_count)
hopcurrentarr.append(current_hop)
hoppercentarr.append(hop_percent)
if row["packet_address"] not in packarr:
packarr[row["packet_address"]] = count
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
count+=1
else:
current_time = row["time"]
position = packarr.get(row["packet_address"])
pkt_time = packtime.get(row["packet_address"])
current_max = max(packarr.values())
if (current_time-pkt_time)<interval:
packchunk.append(packarr.get(row["packet_address"]))
else:
del packarr[row["packet_address"]]
del packtime[row["packet_address"]]
packarr[row["packet_address"]] = current_max+1
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
if (current_max)==count:
count+=2
elif (current_max+1)==count:
count+=1
df['packet_address'].nunique()
print(len(packarr))
print(len(packchunk))
df = df.assign(traversal_id=packchunk)
df = df.assign(hop_count=hoptotarr)
df = df.assign(current_hop=hopcurrentarr)
df = df.assign(hop_percentage=hoppercentarr)
df = df.assign(enqueue_time=waitingarr)
df.rename(columns={'packet_type': 'cache_coherence_type', 'time': 'timestamp'}, inplace=True)
df = df.drop(columns=['packet_address','enq_time'])
df.isnull().sum()
df.dtypes
df.to_csv('2-fft-malicious-n-0-4-m-11.csv',index=False)
```
#### Router Fetch
```
def fetch(i):
df = pd.read_csv('2-fft-malicious-n-0-4-m-11.csv')
df = df.loc[df['router'] == i]
df = df.drop(columns=['router'])
df.to_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv',index=False)
df = pd.read_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df = maxcount(timecount(df),df)
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
rename(df)
df['target'] = 1
print(df.shape)
df.to_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv',index=False)
for i in range (0,16):
fetch(i)
```
|
github_jupyter
|
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
#extracting lines for simplied verion
open('2-fft-malicious-n-0-4-m-11.txt','w').writelines([ line for line in open("2-fft-malicious-n-0-4-m-11.log") if "Enqueue" in line])
print ("done")
#extracting content from lines
csv_out = open('2-fft-malicious-n-0-4-m-11-csv.txt','w')
with open ('2-fft-malicious-n-0-4-m-11.txt', 'rt') as fft:
csv_out.write("time,router,outport,inport,packet_address,packet_type,flit_id,flit_type,vnet,vc,src_ni,src_router,dst_ni,dst_router,enq_time\n")
for line in fft:
line_split = line.split()
time = line_split[line_split.index("time:") + 1]
router = line_split[line_split.index("SwitchAllocator") + 3]
outport = line_split[line_split.index("outport") + 1]
inport = line_split[line_split.index("inport") + 1]
packet_address = line_split[line_split.index("addr") + 2][1:-1]
packet_type = line_split[line_split.index("addr") + 7]
flit_id = line_split[line_split.index("[flit::") + 1][3:]
flit_type = line_split[line_split.index("Id="+str(flit_id)) + 1][5:]
vnet = line_split[line_split.index("Type="+str(flit_type)) + 1][5:]
vc = line_split[line_split.index("Vnet="+str(vnet)) + 1][3:]
src_ni = line_split[line_split.index("VC="+str(vc)) + 2][3:]
src_router = line_split[line_split.index("NI="+str(src_ni)) + 2][7:]
dst_ni = line_split[line_split.index("Router="+str(src_router)) + 2][3:]
dst_router = line_split[line_split.index("NI="+str(dst_ni)) + 2][7:]
enq_time = str(line_split[line_split.index("Enqueue") + 1][5:])
line_csv = time+","+router+","+outport+","+inport+","+packet_address+","+packet_type+","+flit_id+","+flit_type+","+vnet+","+vc+","+src_ni+","+src_router+","+dst_ni+","+dst_router+","+enq_time+"\n"
csv_out.write(line_csv)
print ("done")
#convert txt to csv
df = pd.read_csv("2-fft-malicious-n-0-4-m-11-csv.txt",delimiter=',')
df.to_csv('2-fft-malicious-n-0-4-m-11.csv',index=False)
#dataset
df = pd.read_csv('2-fft-malicious-n-0-4-m-11.csv')
df.shape
df.describe()
sns.distplot(df['router'], kde = False, bins=30, color='blue')
sns.distplot(df['src_router'], kde = False, bins=30, color='blue')
sns.distplot(df['dst_router'], kde = False, bins=30, color='red')
sns.distplot(df['inport'], kde = False, bins=30, color='green')
sns.distplot(df['outport'], kde = False, bins=30, color='green')
sns.distplot(df['packet_type'], kde = False, bins=30, color='red')
direction = {'Local': 0,'North': 1, 'East': 2, 'South':3,'West':4}
df = df.replace({'inport': direction, 'outport': direction})
data = {'GETS': 1,'GETX': 2,'GUX': 3,'DATA': 4, 'PUTX': 5,'PUTS': 6,'WB_ACK':7}
df = df.replace({'packet_type': data})
df['flit_id'] = df['flit_id']+1
df['flit_type'] = df['flit_type']+1
df['vnet'] = df['vnet']+1
df['vc'] = df['vc']+1
hoparr = {"0to0":0,"0to1":1,"0to2":2,"0to3":3,"0to4":1,"0to5":2,"0to6":3,"0to7":4,"0to8":2,"0to9":3,"0to10":4,"0to11":5,"0to12":3,"0to13":4,"0to14":5,"0to15":6,
"1to1":0,"1to2":1,"1to3":2,"1to4":2,"1to5":1,"1to6":2,"1to7":3,"1to8":3,"1to9":2,"1to10":3,"1to11":4,"1to12":5,"1to13":3,"1to14":4,"1to15":5,
"2to2":0,"2to3":1,"2to4":3,"2to5":2,"2to6":1,"2to7":2,"2to8":4,"2to9":3,"2to10":2,"2to11":3,"2to12":5,"2to13":4,"2to14":3,"2to15":4,
"3to3":0,"3to4":4,"3to5":3,"3to6":2,"3to7":1,"3to8":5,"3to9":4,"3to10":3,"3to11":2,"3to12":6,"3to13":5,"3to14":4,"3to15":3,
"4to4":0,"4to5":1,"4to6":2,"4to7":3,"4to8":1,"4to9":2,"4to10":3,"4to11":4,"4to12":2,"4to13":3,"4to14":4,"4to15":5,
"5to5":0,"5to6":1,"5to7":2,"5to8":2,"5to9":1,"5to10":2,"5to11":3,"5to12":3,"5to13":2,"5to14":3,"5to15":4,
"6to6":0,"6to7":1,"6to8":3,"6to9":2,"6to10":1,"6to11":2,"6to12":4,"6to13":3,"6to14":2,"6to15":3,
"7to7":0,"7to8":4,"7to9":3,"7to10":2,"7to11":1,"7to12":5,"7to13":4,"7to14":3,"7to15":2,
"8to8":0,"8to9":1,"8to10":2,"8to11":3,"8to12":1,"8to13":2,"8to14":3,"8to15":4,
"9to9":0,"9to10":1,"9to11":2,"9to12":2,"9to13":1,"9to14":2,"9to15":4,
"10to10":0,"10to11":1,"10to12":3,"10to13":2,"10to14":1,"10to15":2,
"11to11":0,"11to12":4,"11to13":3,"11to14":2,"11to15":1,
"12to12":0,"12to13":1,"12to14":2,"12to15":3,
"13to13":0,"13to14":1,"13to15":2,
"14to14":0,"14to15":1,
"15to15":0}
packarr = {}
packtime = {}
packchunk = []
hopcurrentarr = []
hoptotarr = []
hoppercentarr =[]
waitingarr = []
interval = 500
count = 0
for index, row in df.iterrows():
current_time = row["time"]
enqueue_time = row["enq_time"]
waiting_time = current_time - enqueue_time
waitingarr.append(waiting_time)
current_router = row["router"]
src_router = row["src_router"]
dst_router = row["dst_router"]
src_router_temp = src_router
if src_router_temp>dst_router:
temph = src_router_temp
src_router_temp = dst_router
dst_router = temph
hop_count_string = str(src_router_temp)+"to"+str(dst_router)
src_router_temp = src_router
hop_count = hoparr.get(hop_count_string)
if src_router_temp>current_router:
tempc = src_router_temp
src_router_temp = current_router
current_router = tempc
current_hop_string = str(src_router_temp)+"to"+str(current_router)
current_hop = hoparr.get(current_hop_string)
if(current_hop == 0 and hop_count ==0):
hop_percent = 0
else:
hop_percent = current_hop/hop_count
hoptotarr.append(hop_count)
hopcurrentarr.append(current_hop)
hoppercentarr.append(hop_percent)
if row["packet_address"] not in packarr:
packarr[row["packet_address"]] = count
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
count+=1
else:
current_time = row["time"]
position = packarr.get(row["packet_address"])
pkt_time = packtime.get(row["packet_address"])
current_max = max(packarr.values())
if (current_time-pkt_time)<interval:
packchunk.append(packarr.get(row["packet_address"]))
else:
del packarr[row["packet_address"]]
del packtime[row["packet_address"]]
packarr[row["packet_address"]] = current_max+1
packtime[row["packet_address"]] = row["time"]
packchunk.append(packarr.get(row["packet_address"]))
if (current_max)==count:
count+=2
elif (current_max+1)==count:
count+=1
df['packet_address'].nunique()
print(len(packarr))
print(len(packchunk))
df = df.assign(traversal_id=packchunk)
df = df.assign(hop_count=hoptotarr)
df = df.assign(current_hop=hopcurrentarr)
df = df.assign(hop_percentage=hoppercentarr)
df = df.assign(enqueue_time=waitingarr)
df.rename(columns={'packet_type': 'cache_coherence_type', 'time': 'timestamp'}, inplace=True)
df = df.drop(columns=['packet_address','enq_time'])
df.isnull().sum()
df.dtypes
df.to_csv('2-fft-malicious-n-0-4-m-11.csv',index=False)
def fetch(i):
df = pd.read_csv('2-fft-malicious-n-0-4-m-11.csv')
df = df.loc[df['router'] == i]
df = df.drop(columns=['router'])
df.to_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv',index=False)
df = pd.read_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv')
def timecount(df):
timearr = []
interval = 99
count = 0
for index, row in df.iterrows():
if row["timestamp"]<=interval:
count+=1
else:
timearr.append([interval+1,count])
count=1
interval+=100
timearr.append([interval+1,count])
return timearr
def maxcount(timearr,df):
countarr = []
increarr = []
maxarr = []
for i in range(len(timearr)):
for cnt in range(timearr[i][1],0,-1):
countarr.append(cnt)
maxarr.append(timearr[i][1])
increment = timearr[i][1] - cnt + 1
increarr.append(increment)
df = df.assign(packet_count_decr=countarr)
df = df.assign(packet_count_incr=increarr)
df = df.assign(max_packet_count=maxarr)
return df
df = maxcount(timecount(df),df)
def rename(df):
df['traversal_id'] = df['traversal_id']+1
df["packet_count_index"] = df["packet_count_decr"]*df["packet_count_incr"]
df["packet_max_index"] = df["packet_count_index"]*df["max_packet_count"]
df["port_index"] = df["outport"]*df["inport"]
df["cache_coherence_flit_index"] = df["cache_coherence_type"]*df["flit_id"]
df["flit_index"] = df["cache_coherence_flit_index"]*df["flit_type"]
df["traversal_index"] = df["flit_index"]*df["traversal_id"]
df["cache_coherence_vnet_index"] = df["cache_coherence_type"]*df["vnet"]
df["vnet_vc_index"] = df["vnet"]*df["vc"]
df["vnet_vc_cc_index"] = df["vnet"]*df["cache_coherence_vnet_index"]
rename(df)
df['target'] = 1
print(df.shape)
df.to_csv('2-fft-malicious-n-0-4-m-11-r'+str(i)+'.csv',index=False)
for i in range (0,16):
fetch(i)
| 0.078647 | 0.530541 |
# <center> COVID-19 Spread Visualisation </center>
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import networkx as nx
from ipywidgets import interactive
%matplotlib inline
def read_data(name):
result = {"cumulative_cities":[], "daily_cities":[], "total_infected":[]}
for simulation in range(10):
with open(f"{name}_{simulation}", 'rb') as f:
data = pickle.load(f)
data = pd.DataFrame(data).T
cumulative_cities = np.cumsum(data.groupby('day')['infected'].count())
daily_cities = data.groupby('day')['infected'].count()
total_infected = data['infected'].apply(np.max).sum()
result["cumulative_cities"].append(cumulative_cities)
result["daily_cities"].append(daily_cities)
result["total_infected"].append(total_infected)
return result
def plot_with_std(data, label, color):
average = data.mean()
std = data.std()
plt.plot(
average,
label=label,
color=color,
linewidth=2
)
plt.fill_between(range(len(average)), average+std, average, alpha=0.1, color=color)
plt.fill_between(range(len(average)), average-std, average, alpha=0.1, color=color)
realistic = read_data("../simulation_data/INFECTED_CITIES_realistic_3")
strict = read_data("../simulation_data/INFECTED_CITIES_strict_3")
mild = read_data("../simulation_data/INFECTED_CITIES_mild_3")
airport_df = pd.read_csv("../data/airport_df_preprocessed.csv", index_col=0)
realistic_label = 'Realistic quarantines after 30 days, 14 days to enforce (R0=4, R44=1)'
strict_label = 'Strict quarantines after 14 days, 7 days to enforce (R0=4, R21=1)'
mild_label = 'Mild quarantines after 60 days, 30 days to enforce (R0=4, R90=1)'
plt.figure(figsize=(10, 8))
plot_with_std(pd.DataFrame(realistic['cumulative_cities']),
label=realistic_label,
color='blue'
)
plot_with_std(pd.DataFrame(strict['cumulative_cities']),
label=strict_label,
color='green'
)
plot_with_std(pd.DataFrame(mild['cumulative_cities']),
label=mild_label,
color='red'
)
plt.title("Cumulative number of infected cities", fontsize=14)
plt.ylabel("Cities")
plt.xlabel("Days from the beginning of the infection")
plt.grid(alpha=0.5)
plt.legend(loc=(0.002,0.888))
plt.show()
plt.figure(figsize=(10, 8))
plot_with_std(pd.DataFrame(realistic['daily_cities']),
label='Realistic quarantines after 30 days, 20 days to enforce (R0=5, R50=1)',
color='blue'
)
plot_with_std(pd.DataFrame(strict['daily_cities']),
label='Strict quarantines after 14 days, 7 days to enforce (R0=5, 21=1)',
color='green'
)
plot_with_std(pd.DataFrame(mild['daily_cities']),
label='Mild quarantines after 60 days, 30 days to enforce (R0=5, R90=1)',
color='red'
)
plt.title("Daily number of infected cities", fontsize=14)
plt.ylabel("Cities")
plt.xlabel("Days from the beginning of the infection")
plt.grid(alpha=0.5)
plt.legend(loc=(0.002,0.888))
plt.show()
scenarios = ['Mild', 'Realistic', 'Strict']
x_pos = np.arange(len(scenarios))
averages = [
np.mean(mild['total_infected']),
np.mean(realistic['total_infected']),
np.mean(strict['total_infected']),
]
stds = [
np.std(mild['total_infected']),
np.std(realistic['total_infected']),
np.std(strict['total_infected']),
]
fig, ax = plt.subplots(figsize=(10, 6))
ax.barh(x_pos, averages, align='center', xerr=stds, alpha=0.5, ecolor='black', capsize=10)
ax.set_yticks(x_pos)
ax.set_yticklabels(scenarios)
ax.set_title('Total number of infected')
ax.yaxis.grid(True)
for i, v in enumerate(averages):
av = round(v/1000000, 1)
std = round(stds[i]/1000000, 1)
ax.text(v, i + .25, f"{av} ± {std} mln", color='black', fontweight='bold')
plt.tight_layout()
sns.despine()
plt.show()
np.array(averages)/airport_df.city_population.sum()
def get_spread_data(file_name, airport_df=airport_df):
with open(file_name, 'rb') as f:
spread_data = pickle.load(f)
spread_data = pd.DataFrame(spread_data).T
spread_data = spread_data[['from', 'day']].reset_index()
spread_data.columns = ['destination', 'source', 'day']
spread_data = spread_data[['source', 'destination', 'day']]
spread_data = pd.merge(
spread_data, airport_df[['City', 'Lat', 'Long']], left_on='source', right_on='City')
spread_data = pd.merge(
spread_data, airport_df[['City', 'Lat', 'Long']], left_on='destination', right_on='City')
spread_data.drop(['City_x', 'City_y'], axis=1, inplace=True)
spread_data.rename(columns={
'Lat_x':'lat_source',
'Long_x':'long_source',
'Lat_y':'lat_dest',
'Long_y':'long_dest'
}, inplace=True)
return spread_data
def get_graph(result_cities, airport_df=airport_df):
graph = nx.from_pandas_edgelist(
result_cities,
source = 'source',
target = 'destination',
create_using = nx.DiGraph()
)
xycoords = {}
for node in tqdm_notebook(list(graph.nodes), leave=False):
xycoords[str(node)] = (
airport_df.Long[airport_df.City ==node].values[0],
airport_df.Lat[airport_df.City == node].values[0]
)
return graph, xycoords
def visualise_spread(x, y):
graph = nx.from_pandas_edgelist(
result_cities[result_cities.day<=x],
source = 'source',
target = 'destination',
create_using = nx.DiGraph()
)
plt.figure(figsize=(30, 15))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.add_feature(cfeature.LAND, color='#3d3d3d')
ax.add_feature(cfeature.OCEAN, color='#04031c')
#ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS)
ax.outline_patch.set_visible(False)
if y=='Nodes':
nx.draw_networkx_nodes(graph,
pos=xycoords,
with_labels=False,
node_color='r',
node_size=5,
alpha=1)
else:
nx.draw_networkx(graph,
arrows=True,
pos=xycoords,
with_labels=False,
node_color='r',
edge_color='orange',
style='dashed',
arrowstyle='->',
node_size=10,
alpha=0.5)
plt.xlim(-180, 180)
plt.ylim(-60, 80)
plt.show()
```
## Realistic scenario

```
result_cities = get_spread_data("INFECTED_CITIES_realistic_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
```
## Optimistic (strict quarantines) scenario

```
result_cities = get_spread_data("INFECTED_CITIES_strict_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
```
## Pessimistic (mild quarantines) scenario

```
result_cities = get_spread_data("INFECTED_CITIES_mild_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import networkx as nx
from ipywidgets import interactive
%matplotlib inline
def read_data(name):
result = {"cumulative_cities":[], "daily_cities":[], "total_infected":[]}
for simulation in range(10):
with open(f"{name}_{simulation}", 'rb') as f:
data = pickle.load(f)
data = pd.DataFrame(data).T
cumulative_cities = np.cumsum(data.groupby('day')['infected'].count())
daily_cities = data.groupby('day')['infected'].count()
total_infected = data['infected'].apply(np.max).sum()
result["cumulative_cities"].append(cumulative_cities)
result["daily_cities"].append(daily_cities)
result["total_infected"].append(total_infected)
return result
def plot_with_std(data, label, color):
average = data.mean()
std = data.std()
plt.plot(
average,
label=label,
color=color,
linewidth=2
)
plt.fill_between(range(len(average)), average+std, average, alpha=0.1, color=color)
plt.fill_between(range(len(average)), average-std, average, alpha=0.1, color=color)
realistic = read_data("../simulation_data/INFECTED_CITIES_realistic_3")
strict = read_data("../simulation_data/INFECTED_CITIES_strict_3")
mild = read_data("../simulation_data/INFECTED_CITIES_mild_3")
airport_df = pd.read_csv("../data/airport_df_preprocessed.csv", index_col=0)
realistic_label = 'Realistic quarantines after 30 days, 14 days to enforce (R0=4, R44=1)'
strict_label = 'Strict quarantines after 14 days, 7 days to enforce (R0=4, R21=1)'
mild_label = 'Mild quarantines after 60 days, 30 days to enforce (R0=4, R90=1)'
plt.figure(figsize=(10, 8))
plot_with_std(pd.DataFrame(realistic['cumulative_cities']),
label=realistic_label,
color='blue'
)
plot_with_std(pd.DataFrame(strict['cumulative_cities']),
label=strict_label,
color='green'
)
plot_with_std(pd.DataFrame(mild['cumulative_cities']),
label=mild_label,
color='red'
)
plt.title("Cumulative number of infected cities", fontsize=14)
plt.ylabel("Cities")
plt.xlabel("Days from the beginning of the infection")
plt.grid(alpha=0.5)
plt.legend(loc=(0.002,0.888))
plt.show()
plt.figure(figsize=(10, 8))
plot_with_std(pd.DataFrame(realistic['daily_cities']),
label='Realistic quarantines after 30 days, 20 days to enforce (R0=5, R50=1)',
color='blue'
)
plot_with_std(pd.DataFrame(strict['daily_cities']),
label='Strict quarantines after 14 days, 7 days to enforce (R0=5, 21=1)',
color='green'
)
plot_with_std(pd.DataFrame(mild['daily_cities']),
label='Mild quarantines after 60 days, 30 days to enforce (R0=5, R90=1)',
color='red'
)
plt.title("Daily number of infected cities", fontsize=14)
plt.ylabel("Cities")
plt.xlabel("Days from the beginning of the infection")
plt.grid(alpha=0.5)
plt.legend(loc=(0.002,0.888))
plt.show()
scenarios = ['Mild', 'Realistic', 'Strict']
x_pos = np.arange(len(scenarios))
averages = [
np.mean(mild['total_infected']),
np.mean(realistic['total_infected']),
np.mean(strict['total_infected']),
]
stds = [
np.std(mild['total_infected']),
np.std(realistic['total_infected']),
np.std(strict['total_infected']),
]
fig, ax = plt.subplots(figsize=(10, 6))
ax.barh(x_pos, averages, align='center', xerr=stds, alpha=0.5, ecolor='black', capsize=10)
ax.set_yticks(x_pos)
ax.set_yticklabels(scenarios)
ax.set_title('Total number of infected')
ax.yaxis.grid(True)
for i, v in enumerate(averages):
av = round(v/1000000, 1)
std = round(stds[i]/1000000, 1)
ax.text(v, i + .25, f"{av} ± {std} mln", color='black', fontweight='bold')
plt.tight_layout()
sns.despine()
plt.show()
np.array(averages)/airport_df.city_population.sum()
def get_spread_data(file_name, airport_df=airport_df):
with open(file_name, 'rb') as f:
spread_data = pickle.load(f)
spread_data = pd.DataFrame(spread_data).T
spread_data = spread_data[['from', 'day']].reset_index()
spread_data.columns = ['destination', 'source', 'day']
spread_data = spread_data[['source', 'destination', 'day']]
spread_data = pd.merge(
spread_data, airport_df[['City', 'Lat', 'Long']], left_on='source', right_on='City')
spread_data = pd.merge(
spread_data, airport_df[['City', 'Lat', 'Long']], left_on='destination', right_on='City')
spread_data.drop(['City_x', 'City_y'], axis=1, inplace=True)
spread_data.rename(columns={
'Lat_x':'lat_source',
'Long_x':'long_source',
'Lat_y':'lat_dest',
'Long_y':'long_dest'
}, inplace=True)
return spread_data
def get_graph(result_cities, airport_df=airport_df):
graph = nx.from_pandas_edgelist(
result_cities,
source = 'source',
target = 'destination',
create_using = nx.DiGraph()
)
xycoords = {}
for node in tqdm_notebook(list(graph.nodes), leave=False):
xycoords[str(node)] = (
airport_df.Long[airport_df.City ==node].values[0],
airport_df.Lat[airport_df.City == node].values[0]
)
return graph, xycoords
def visualise_spread(x, y):
graph = nx.from_pandas_edgelist(
result_cities[result_cities.day<=x],
source = 'source',
target = 'destination',
create_using = nx.DiGraph()
)
plt.figure(figsize=(30, 15))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.add_feature(cfeature.LAND, color='#3d3d3d')
ax.add_feature(cfeature.OCEAN, color='#04031c')
#ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.BORDERS)
ax.outline_patch.set_visible(False)
if y=='Nodes':
nx.draw_networkx_nodes(graph,
pos=xycoords,
with_labels=False,
node_color='r',
node_size=5,
alpha=1)
else:
nx.draw_networkx(graph,
arrows=True,
pos=xycoords,
with_labels=False,
node_color='r',
edge_color='orange',
style='dashed',
arrowstyle='->',
node_size=10,
alpha=0.5)
plt.xlim(-180, 180)
plt.ylim(-60, 80)
plt.show()
result_cities = get_spread_data("INFECTED_CITIES_realistic_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
result_cities = get_spread_data("INFECTED_CITIES_strict_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
result_cities = get_spread_data("INFECTED_CITIES_mild_2_0")
graph, xycoords = get_graph(result_cities)
interactive_plot = interactive(visualise_spread,
x=list(range(0, 100)),
y=['Nodes', 'Nodes and paths']
)
interactive_plot
| 0.393385 | 0.75392 |
```
import numpy as np
import matplotlib.pyplot as plt
import pickle
with open("details/ped_dataset.pkl", 'rb') as f:
ped_dataset = pickle.load(f)
total_ped_matrix = np.load("details/total_ped_matrix.npy")
with open("details/trajectories_2d.txt", 'rb') as f:
trajectories_2d = pickle.load(f)
# initial uncertainty P0
P = np.diag([1000.0, 1000.0, 1000.0, 1000.0])
# dynamic matrix A (motion model)
dt = 0.5
A = np.matrix([[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
# measurement matrix H - directly measuring velocity
H = np.matrix([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
# measurement covariance R - tells the filter how bad the sensor readings are
ra = 10.0**2
R = np.matrix([[ra, 0.0],
[0.0, ra]])
# process noise - impact of wind force on the acceleration (IMPORTANT?)
sv = 0.8 # have to check the value for pedestrians
G = np.matrix([[0.5*dt**2],
[0.5*dt**2],
[dt],
[dt]])
Q = G*G.T*sv**2
# index matrix
I = np.eye(4)
# only predicting from the 3 seconds to 8 seconds
vel_measurements = total_ped_matrix[9800:, 6:, 2:4]
# initial state
x = np.matrix([[trajectories_2d[9800,5,0], trajectories_2d[9800,5,1],
0.0, 0.0]]).T
# Preallocation for Plotting
xt = []
yt = []
dxt= []
dyt= []
Zx = []
Zy = []
Px = []
Py = []
Pdx= []
Pdy= []
Rdx= []
Rdy= []
Kx = []
Ky = []
Kdx= []
Kdy= []
def savestates(x, Z, P, R, K):
xt.append(float(x[0]))
yt.append(float(x[1]))
dxt.append(float(x[2]))
dyt.append(float(x[3]))
Zx.append(float(Z[0]))
Zy.append(float(Z[1]))
Px.append(float(P[0,0]))
Py.append(float(P[1,1]))
Pdx.append(float(P[2,2]))
Pdy.append(float(P[3,3]))
Rdx.append(float(R[0,0]))
Rdy.append(float(R[1,1]))
Kx.append(float(K[0,0]))
Ky.append(float(K[1,0]))
Kdx.append(float(K[2,0]))
Kdy.append(float(K[3,0]))
cur_diff = np.diff(trajectories_2d[9800,6:, :], axis=0) / 0.5
cur_diff = np.vstack((cur_diff[0], cur_diff))
# kalman filter
for n in range(10):
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x
# Project the error covariance ahead
P = A*P*A.T + Q
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = H*P*H.T + R
K = (P*H.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = cur_diff[n, :].reshape(2,1)
y = Z - (H*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*H))*P
# Save states (for Plotting)
savestates(x, Z, P, R, K)
xy_values = [[xp, yp] for xp, yp in zip(xt, yt)]
plt.plot(*zip(*np.array(trajectories_2d[9800,6:, :])))
plt.plot(*zip(*np.array(xy_values)))
def plot_K():
fig = plt.figure(figsize=(16,9))
plt.plot(range(10),Kx, label='Kalman Gain for $x$')
plt.plot(range(10),Ky, label='Kalman Gain for $y$')
plt.plot(range(10),Kdx, label='Kalman Gain for $\dot x$')
plt.plot(range(10),Kdy, label='Kalman Gain for $\dot y$')
plt.xlabel('Filter Step')
plt.ylabel('')
plt.title('Kalman Gain (the lower, the more the measurement fullfill the prediction)')
plt.legend(loc='best',prop={'size':22})
plot_K()
np.array(total_ped_matrix[9800,6:, :2])
trajectories_2d.shape
trajectories_2d[9800, :, :2]
with open("details/new_ped_details.pkl", 'rb') as f:
new_ped_details = pickle.load(f)
len(new_ped_details)
new_ped_details.keys()
new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['translation']
new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['velocity']
np.diff(np.array(new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['translation']), axis=0) / 0.5
ped_dataset[10]['translation']
ped_dataset[10]['velocity']
np.diff(np.array(ped_dataset[10]['translation']), axis=0) / 0.5
total_ped_matrix[10,:,2:4]
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pickle
with open("details/ped_dataset.pkl", 'rb') as f:
ped_dataset = pickle.load(f)
total_ped_matrix = np.load("details/total_ped_matrix.npy")
with open("details/trajectories_2d.txt", 'rb') as f:
trajectories_2d = pickle.load(f)
# initial uncertainty P0
P = np.diag([1000.0, 1000.0, 1000.0, 1000.0])
# dynamic matrix A (motion model)
dt = 0.5
A = np.matrix([[1.0, 0.0, dt, 0.0],
[0.0, 1.0, 0.0, dt],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
# measurement matrix H - directly measuring velocity
H = np.matrix([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0]])
# measurement covariance R - tells the filter how bad the sensor readings are
ra = 10.0**2
R = np.matrix([[ra, 0.0],
[0.0, ra]])
# process noise - impact of wind force on the acceleration (IMPORTANT?)
sv = 0.8 # have to check the value for pedestrians
G = np.matrix([[0.5*dt**2],
[0.5*dt**2],
[dt],
[dt]])
Q = G*G.T*sv**2
# index matrix
I = np.eye(4)
# only predicting from the 3 seconds to 8 seconds
vel_measurements = total_ped_matrix[9800:, 6:, 2:4]
# initial state
x = np.matrix([[trajectories_2d[9800,5,0], trajectories_2d[9800,5,1],
0.0, 0.0]]).T
# Preallocation for Plotting
xt = []
yt = []
dxt= []
dyt= []
Zx = []
Zy = []
Px = []
Py = []
Pdx= []
Pdy= []
Rdx= []
Rdy= []
Kx = []
Ky = []
Kdx= []
Kdy= []
def savestates(x, Z, P, R, K):
xt.append(float(x[0]))
yt.append(float(x[1]))
dxt.append(float(x[2]))
dyt.append(float(x[3]))
Zx.append(float(Z[0]))
Zy.append(float(Z[1]))
Px.append(float(P[0,0]))
Py.append(float(P[1,1]))
Pdx.append(float(P[2,2]))
Pdy.append(float(P[3,3]))
Rdx.append(float(R[0,0]))
Rdy.append(float(R[1,1]))
Kx.append(float(K[0,0]))
Ky.append(float(K[1,0]))
Kdx.append(float(K[2,0]))
Kdy.append(float(K[3,0]))
cur_diff = np.diff(trajectories_2d[9800,6:, :], axis=0) / 0.5
cur_diff = np.vstack((cur_diff[0], cur_diff))
# kalman filter
for n in range(10):
# Time Update (Prediction)
# ========================
# Project the state ahead
x = A*x
# Project the error covariance ahead
P = A*P*A.T + Q
# Measurement Update (Correction)
# ===============================
# Compute the Kalman Gain
S = H*P*H.T + R
K = (P*H.T) * np.linalg.pinv(S)
# Update the estimate via z
Z = cur_diff[n, :].reshape(2,1)
y = Z - (H*x) # Innovation or Residual
x = x + (K*y)
# Update the error covariance
P = (I - (K*H))*P
# Save states (for Plotting)
savestates(x, Z, P, R, K)
xy_values = [[xp, yp] for xp, yp in zip(xt, yt)]
plt.plot(*zip(*np.array(trajectories_2d[9800,6:, :])))
plt.plot(*zip(*np.array(xy_values)))
def plot_K():
fig = plt.figure(figsize=(16,9))
plt.plot(range(10),Kx, label='Kalman Gain for $x$')
plt.plot(range(10),Ky, label='Kalman Gain for $y$')
plt.plot(range(10),Kdx, label='Kalman Gain for $\dot x$')
plt.plot(range(10),Kdy, label='Kalman Gain for $\dot y$')
plt.xlabel('Filter Step')
plt.ylabel('')
plt.title('Kalman Gain (the lower, the more the measurement fullfill the prediction)')
plt.legend(loc='best',prop={'size':22})
plot_K()
np.array(total_ped_matrix[9800,6:, :2])
trajectories_2d.shape
trajectories_2d[9800, :, :2]
with open("details/new_ped_details.pkl", 'rb') as f:
new_ped_details = pickle.load(f)
len(new_ped_details)
new_ped_details.keys()
new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['translation']
new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['velocity']
np.diff(np.array(new_ped_details['739041c5fc6c43b8b9ef0612e7b0b2f7']['translation']), axis=0) / 0.5
ped_dataset[10]['translation']
ped_dataset[10]['velocity']
np.diff(np.array(ped_dataset[10]['translation']), axis=0) / 0.5
total_ped_matrix[10,:,2:4]
| 0.54698 | 0.622373 |
```
from qiskit import *
from numpy.random import randint, shuffle
from qiskit.visualization import plot_histogram, plot_bloch_multivector
import numpy as np
%run ../Functions/BB84_functions.ipynb
%run ../Functions/LDPC_functions.ipynb
n, N =1000, 4200
if n == 648 or n == 50 :
MAX_ERROR_RATE = 1
else :
MAX_ERROR_RATE = 0.70
EMPTY = QuantumCircuit(N, N)
MAX_ITERS = 30
def QCC(qubits, N) :
#e = randint((MAX_ERROR_RATE * N) // 100 + 1)
e = int((MAX_ERROR_RATE * N) // 100) + 1
circ = qubits.copy()
rand_pos = random.sample(range(N), e)
for pos in rand_pos :
random_gate = randint(2)
if random_gate == 0:
circ.x(pos)
else :
circ.h(pos)
return circ
def CAC(bits) :
return bits
def generate_and_send(N) :
#Step 1 - Alice generates bits
alice_bits = bit_string(N)
#Step 2 - Alice generates the bases string
alice_bases = bit_string(N)
#Step 3 - Bob generates the bases string
bob_bases = bit_string(N)
#Step 4 - Alice encodes her bits in the chosen bases
alice_encode = encode_bits(alice_bits, alice_bases)
#Step 5 - Alice sends her encoded bits to Bob and Eve tries to intercept over the Quantum channel
received_encode = QCC(alice_encode, N)
#Step 6 - Bob announces that he has received the encoding and measured it over CAC
if received_encode == EMPTY :
received = False
else :
received = True
return alice_bits, alice_bases, bob_bases, received_encode, received
```
|Information|Alice|Bob|
|:-:|:-:|:-:|
|Alice's bits|Y|Noisy|
|Alice's bases|Y|N|
|Bob's bases|N|Y|
```
def sifting(alice_bases, bob_bases, received_encode) :
#Step 7 - Bob measures Alice's bits in the chosen bases
bob_circuit, bob_bits = measure_bits(received_encode, bob_bases)
#Step 8 - Alice and Bob exchange their bases over CAC
a2b = CAC(alice_bases)
b2a = CAC(bob_bases)
#Step 9 - Alice and Bob discard all the bits that correspond to disagreed bases
agreed_base_indices = agreed_bases(alice_bases, bob_bases)
#bob_circuit.draw(output = 'mpl')
return bob_bits, agreed_base_indices
```
|Information|Alice|Bob|
|:-:|:-:|:-:|
|Bob's and Alice's bases|Y|Y|
```
def qber(alice_bits, bob_bits, agreed_base_indices) :
#Step 10 - Error rate checking
S2T = bit_string(len(agreed_base_indices))
T = select_bits(agreed_base_indices, S2T, 0)
#Step 11 - Alice tells T to Bob over CAC
bob_T = CAC(T)
#Step 12 - Alice and Bob generate their test bits
alice_test_bits, bob_test_bits = [], []
for i in T :
alice_test_bits.append(alice_bits[i])
bob_test_bits.append(bob_bits[i])
#Step 12 - Alice and Bob announce their test bits to each other over the CAC
rec_bob_test_bits = CAC(bob_test_bits)
rec_alice_test_bits = CAC(alice_test_bits)
#Step 13 - Alice and Bob compute the error rate
error = error_rate(rec_alice_test_bits, rec_bob_test_bits)
return error, S2T
```
|Information|Alice|Bob|
|:-:|:-:|:-:|
|T|Y|Y|
|Alice's test bits|Y|Y|
|Bob's test bits|Y|Y|
```
def reconciliation(alice_bits, bob_bits, qber) :
p = qber
n = len(alice_bits)
#Step1 : Produce parity check matrix(M1) of dimension m*n, n is length of bitstring and m is no. of parity check equations
H, m = parity_matrix(n, p)
#Step2 : Alice produces the syndrome and hash values
C = syndrome(H, alice_bits)
#Step3 : Alice sends syndrome via CAC
received_C = CAC(C)
#Step4 : Bob produces the syndrome
D = syndrome(H, bob_bits)
#Step5 : Alice performs belief propagation algorithm
y, success, i = belief_prop(received_C, D, bob_bits, MAX_ITERS, p, H)
#Step6 : Bob sends success of reconciliation
return y, success
def privacy_amplification(alice_pseudokey, bob_pseudokey) :
n, k = len(alice_pseudokey), len(alice_pseudokey) // 2
seed = bit_string(n + k - 1)
alice_key = toeplitz(n, k, alice_pseudokey, seed)
bob_key = toeplitz(n, k, bob_pseudokey, seed)
return alice_key, bob_key, error_rate(alice_key, bob_key)
#Step 1 to Step 6
alice_bits, alice_bases, bob_bases, received_encode, received = generate_and_send(N)
if not received :
print('Abort : Did not receive qubits')
else :
#Step 7 to Step 9
bob_bits, agreed_base_indices = sifting(alice_bases, bob_bases, received_encode)
#Step 10 to Step 13
error, S2T = qber(alice_bits, bob_bits, agreed_base_indices)
print("QBER : ", error)
#Step 14 - Alice and Bob check over a threshold for error before proceeding ahead
if error > 0.11 :
print ("Abort : Error rate is too high")
else :
#Step 15 - Alice and Bob generate their pseudo keys
SminusT = select_bits(agreed_base_indices, S2T, 1)
alice_pseudokey, bob_pseudokey = [], []
for i in SminusT :
alice_pseudokey.append(alice_bits[i])
bob_pseudokey.append(bob_bits[i])
if error != 0 :
#Step 16 - Information Reconciliation
if 648 <= len(alice_pseudokey) <= 700 :
alice_pseudokey = alice_pseudokey[:648]
bob_pseudokey = bob_pseudokey[:648]
if 50 <= len(alice_pseudokey) <= 75 :
bob_corrected_key, success = reconciliation(alice_pseudokey, bob_pseudokey, error) if error != 0 else (bob_pseudokey, 1)
if not success :
print("Abort : Reconcilation not succeeded")
else :
#Step 17 - alice and bob perform privacy amplification
alice_key, bob_key, error = privacy_amplification(alice_pseudokey, bob_corrected_key)
print("Alice's key : ", alice_key)
print("Bob's key : ", bob_key)
print("Final error rate in Bob's key : ", error)
else :
#Step 17 - alice and bob perform privacy amplification
alice_key, bob_key, error = privacy_amplification(alice_pseudokey, bob_pseudokey)
print("Alice's key : ", alice_key)
print("Bob's key : ", bob_key)
print("Final error rate in Bob's key : ", error)
```
|Information|Alice|Bob|
|:-:|:-:|:-:|
|Final key|Y|Y|
|
github_jupyter
|
from qiskit import *
from numpy.random import randint, shuffle
from qiskit.visualization import plot_histogram, plot_bloch_multivector
import numpy as np
%run ../Functions/BB84_functions.ipynb
%run ../Functions/LDPC_functions.ipynb
n, N =1000, 4200
if n == 648 or n == 50 :
MAX_ERROR_RATE = 1
else :
MAX_ERROR_RATE = 0.70
EMPTY = QuantumCircuit(N, N)
MAX_ITERS = 30
def QCC(qubits, N) :
#e = randint((MAX_ERROR_RATE * N) // 100 + 1)
e = int((MAX_ERROR_RATE * N) // 100) + 1
circ = qubits.copy()
rand_pos = random.sample(range(N), e)
for pos in rand_pos :
random_gate = randint(2)
if random_gate == 0:
circ.x(pos)
else :
circ.h(pos)
return circ
def CAC(bits) :
return bits
def generate_and_send(N) :
#Step 1 - Alice generates bits
alice_bits = bit_string(N)
#Step 2 - Alice generates the bases string
alice_bases = bit_string(N)
#Step 3 - Bob generates the bases string
bob_bases = bit_string(N)
#Step 4 - Alice encodes her bits in the chosen bases
alice_encode = encode_bits(alice_bits, alice_bases)
#Step 5 - Alice sends her encoded bits to Bob and Eve tries to intercept over the Quantum channel
received_encode = QCC(alice_encode, N)
#Step 6 - Bob announces that he has received the encoding and measured it over CAC
if received_encode == EMPTY :
received = False
else :
received = True
return alice_bits, alice_bases, bob_bases, received_encode, received
def sifting(alice_bases, bob_bases, received_encode) :
#Step 7 - Bob measures Alice's bits in the chosen bases
bob_circuit, bob_bits = measure_bits(received_encode, bob_bases)
#Step 8 - Alice and Bob exchange their bases over CAC
a2b = CAC(alice_bases)
b2a = CAC(bob_bases)
#Step 9 - Alice and Bob discard all the bits that correspond to disagreed bases
agreed_base_indices = agreed_bases(alice_bases, bob_bases)
#bob_circuit.draw(output = 'mpl')
return bob_bits, agreed_base_indices
def qber(alice_bits, bob_bits, agreed_base_indices) :
#Step 10 - Error rate checking
S2T = bit_string(len(agreed_base_indices))
T = select_bits(agreed_base_indices, S2T, 0)
#Step 11 - Alice tells T to Bob over CAC
bob_T = CAC(T)
#Step 12 - Alice and Bob generate their test bits
alice_test_bits, bob_test_bits = [], []
for i in T :
alice_test_bits.append(alice_bits[i])
bob_test_bits.append(bob_bits[i])
#Step 12 - Alice and Bob announce their test bits to each other over the CAC
rec_bob_test_bits = CAC(bob_test_bits)
rec_alice_test_bits = CAC(alice_test_bits)
#Step 13 - Alice and Bob compute the error rate
error = error_rate(rec_alice_test_bits, rec_bob_test_bits)
return error, S2T
def reconciliation(alice_bits, bob_bits, qber) :
p = qber
n = len(alice_bits)
#Step1 : Produce parity check matrix(M1) of dimension m*n, n is length of bitstring and m is no. of parity check equations
H, m = parity_matrix(n, p)
#Step2 : Alice produces the syndrome and hash values
C = syndrome(H, alice_bits)
#Step3 : Alice sends syndrome via CAC
received_C = CAC(C)
#Step4 : Bob produces the syndrome
D = syndrome(H, bob_bits)
#Step5 : Alice performs belief propagation algorithm
y, success, i = belief_prop(received_C, D, bob_bits, MAX_ITERS, p, H)
#Step6 : Bob sends success of reconciliation
return y, success
def privacy_amplification(alice_pseudokey, bob_pseudokey) :
n, k = len(alice_pseudokey), len(alice_pseudokey) // 2
seed = bit_string(n + k - 1)
alice_key = toeplitz(n, k, alice_pseudokey, seed)
bob_key = toeplitz(n, k, bob_pseudokey, seed)
return alice_key, bob_key, error_rate(alice_key, bob_key)
#Step 1 to Step 6
alice_bits, alice_bases, bob_bases, received_encode, received = generate_and_send(N)
if not received :
print('Abort : Did not receive qubits')
else :
#Step 7 to Step 9
bob_bits, agreed_base_indices = sifting(alice_bases, bob_bases, received_encode)
#Step 10 to Step 13
error, S2T = qber(alice_bits, bob_bits, agreed_base_indices)
print("QBER : ", error)
#Step 14 - Alice and Bob check over a threshold for error before proceeding ahead
if error > 0.11 :
print ("Abort : Error rate is too high")
else :
#Step 15 - Alice and Bob generate their pseudo keys
SminusT = select_bits(agreed_base_indices, S2T, 1)
alice_pseudokey, bob_pseudokey = [], []
for i in SminusT :
alice_pseudokey.append(alice_bits[i])
bob_pseudokey.append(bob_bits[i])
if error != 0 :
#Step 16 - Information Reconciliation
if 648 <= len(alice_pseudokey) <= 700 :
alice_pseudokey = alice_pseudokey[:648]
bob_pseudokey = bob_pseudokey[:648]
if 50 <= len(alice_pseudokey) <= 75 :
bob_corrected_key, success = reconciliation(alice_pseudokey, bob_pseudokey, error) if error != 0 else (bob_pseudokey, 1)
if not success :
print("Abort : Reconcilation not succeeded")
else :
#Step 17 - alice and bob perform privacy amplification
alice_key, bob_key, error = privacy_amplification(alice_pseudokey, bob_corrected_key)
print("Alice's key : ", alice_key)
print("Bob's key : ", bob_key)
print("Final error rate in Bob's key : ", error)
else :
#Step 17 - alice and bob perform privacy amplification
alice_key, bob_key, error = privacy_amplification(alice_pseudokey, bob_pseudokey)
print("Alice's key : ", alice_key)
print("Bob's key : ", bob_key)
print("Final error rate in Bob's key : ", error)
| 0.500732 | 0.833325 |
# Pairing crypto
## Resources:
### Software:
- Get rust at:
[www.rust-lang.org](https://www.rust-lang.org)
- Get jupyter notebook directly at [jupyter.org](https://www.jupyter.org) or through anaconda distribution at [anaconda.com](https://www.anaconda.com)
- get rust jupyter kernel at [https://github.com/google/evcxr/blob/master/evcxr_jupyter/README.md](https://github.com/google/evcxr/blob/master/evcxr_jupyter/README.md) or run the code normally
### Pairings:
- Theory on pairings
[Dan Boneh Talk - Pairings in Cryptography](https://www.youtube.com/watch?v=8WDOpzxpnTE)
- Rust pairing library
[pairing](https://crates.io/crates/pairing)
- Rust threshold signatures with pairings
[threshold_crypto](https://crates.io/crates/threshold_crypto)
# Preliminaries
## Group
Group:
$$\text{Set + Operation}$$
for example:
(Integers + Addition), (Integers + Multiplication), (Elliptic curve points, point addition)
## Modular arithmetic

$$9 + 8 = 6 \ \text{in}\ \mathbb{Z}_{11}$$
## Elliptic curves
[More detailed information](https://www.youtube.com/watch?v=mFVKuFZ29Fc&list=PLN9KZDpNfsHMd7d7PX87JGesGY_Qzyb3V&index=2)
### Elliptic curves
<img src="img/elliptic_curve_over_finite_field.png" width="480">
Elliptic curve:
$$ y^2 = x^3 + ax + b$$
Elliptic curve point:
$$g = (x,y)$$
### Point addition
- take secant of p and q
- invert intersection with elliptic curve
<img src="img/point_addition.png" width="720">
$$ p + q = r$$
### Point doubling
- take tangent at point p
- invert intersection with elliptic curve
<img src="img/point_doubling.png" width="720">
$$p + p = r$$
### Exponentiation
- exponentiation means multiple applications of the group operation
$$g^a \rightarrow g + g\ ...\ \text{a-times}$$
- not to be confused with exponentiation for integers, which is the same only if group operation is multiplication
# Pairings
## Symmetric pairings
<img src="img/pairing1.png" width="480">
Definition: A pairing is a map $e:G\times G \rightarrow G_T$ which is:
1. Bilinear:
$$e(g^a,g^b) = e(g,g)^{ab}$$
1. Polynomialtime computable and non-degenerate
## Asymmetric pairings
Definition: A pairing is a map $e:G_1\times G_2 \rightarrow G_T$ which is:
1. Bilinear:
$$e(g_1^a,g_2^b) = e(g_1,g_2)^{ab}$$
1. Polynomialtime computable and non-degenerate
```
:dep rand = "0.4.6"
:dep bn = "0.4.3"
:dep sha2 = "0.8.0"
extern crate rand;
extern crate bn;
extern crate sha2;
use bn::{Group, Fr, G1, G2, pairing};
use sha2::{Sha512, Digest};
let mut rng = rand::thread_rng();
let a = Fr::random(&mut rng);
let b = Fr::random(&mut rng);
(pairing(G1::one()*a,G2::one()*b) == pairing(G1::one(),G2::one()).pow(a*b))
```
# Boneh-Lynn-Shacham signature scheme
- private key $p$ is random integer from $\mathbb{Z}_r$
$$ p \leftarrow \mathbb{Z}_r$$
- public key $P$ is $G_1$ curve point
$$ P = g_1^p$$
- hash message to $G_2$ curve point, [secure way](https://medium.com/cryptoadvance/bls-signatures-better-than-schnorr-5a7fe30ea716)
$$ H(m) \in G_2$$
- signature
$$S = H(m)^p$$
- verify
$$e(g_1,S) \overset{?}{=} e(P,H(m))$$
- is equal to
$$e(g_1,H(m)^p) \overset{?}{=} e(g_1^p,H(m))$$
```
//create random private key
let p = Fr::random(&mut rng);
//create corresponding public key
let P = G1::one()*p;
//hash message to G2 curve point, this is insecure version, see link in description for secure version
let mut temp: [u8;64] = [0u8;64];
let mut hasher = Sha512::new();
hasher.input("message".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H = G2::one()*Fr::interpret(&temp);
//create signature from Hash and private key
let S = H*p;
//check whether signature is valid
(pairing(G1::one(),S) == pairing(P,H))
```
# Signature aggregation
- private keys $p_1,p_2$ is random integer from $\mathbb{Z}_r$
$$ p_1,p_2 \leftarrow \mathbb{Z}_r$$
- public keys $P_1,P_2$ are $G_1$ curve points
$$ P_1 = g_1^{p_1}, \qquad P_2 = g_1^{p_2}$$
- hash messages $m_1,m_2$ to $G_2$ curve points, [secure way](https://medium.com/cryptoadvance/bls-signatures-better-than-schnorr-5a7fe30ea716)
$$ H(m_1),H(m_2) \in G_2$$
- signatures
$$S_1 = H(m_1)^{p_1}, \qquad S_2 = H(m_2)^{p_2}$$
- aggregate signature
$$ S = S_1 + S_2$$
- verify
$$e(g_1,S) \overset{?}{=} e(P_1,H(m_1)) e(P_2,H(m_2))$$
- is equal to
$$e(g_1,H(m_1)^{p_1} + H(m_2)^{p_2}) = e(g_1,H(m_1)^{p_1}) e(g_1, H(m_2)^{p_2}) \overset{?}{=} e(g_1^{p_1},H(m_1))e(g_1^{p_2},H(m_2))$$
```
//create random private keys
let p1 = Fr::random(&mut rng);
let p2 = Fr::random(&mut rng);
//create corresponding public keys
let P1 = G1::one()*p1;
let P2 = G1::one()*p2;
//hash messages to G2 curve points, this is insecure version, see link in description for secure version
let mut temp: [u8;64] = [0u8;64];
let mut hasher = Sha512::new();
hasher.input("message1".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H1 = G2::one()*Fr::interpret(&temp);
let mut hasher = Sha512::new();
hasher.input("message2".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H2 = G2::one()*Fr::interpret(&temp);
//create signature from Hashes and private keys
let S1 = H1*p1;
let S2 = H2*p2;
//aggregate signatures
let S = S1 + S2;
//check whether signature is valid
(pairing(G1::one(),S) == pairing(P1,H1)*pairing(P2,H2))
```
|
github_jupyter
|
:dep rand = "0.4.6"
:dep bn = "0.4.3"
:dep sha2 = "0.8.0"
extern crate rand;
extern crate bn;
extern crate sha2;
use bn::{Group, Fr, G1, G2, pairing};
use sha2::{Sha512, Digest};
let mut rng = rand::thread_rng();
let a = Fr::random(&mut rng);
let b = Fr::random(&mut rng);
(pairing(G1::one()*a,G2::one()*b) == pairing(G1::one(),G2::one()).pow(a*b))
//create random private key
let p = Fr::random(&mut rng);
//create corresponding public key
let P = G1::one()*p;
//hash message to G2 curve point, this is insecure version, see link in description for secure version
let mut temp: [u8;64] = [0u8;64];
let mut hasher = Sha512::new();
hasher.input("message".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H = G2::one()*Fr::interpret(&temp);
//create signature from Hash and private key
let S = H*p;
//check whether signature is valid
(pairing(G1::one(),S) == pairing(P,H))
//create random private keys
let p1 = Fr::random(&mut rng);
let p2 = Fr::random(&mut rng);
//create corresponding public keys
let P1 = G1::one()*p1;
let P2 = G1::one()*p2;
//hash messages to G2 curve points, this is insecure version, see link in description for secure version
let mut temp: [u8;64] = [0u8;64];
let mut hasher = Sha512::new();
hasher.input("message1".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H1 = G2::one()*Fr::interpret(&temp);
let mut hasher = Sha512::new();
hasher.input("message2".as_bytes());
temp.copy_from_slice(hasher.result().as_slice());
let H2 = G2::one()*Fr::interpret(&temp);
//create signature from Hashes and private keys
let S1 = H1*p1;
let S2 = H2*p2;
//aggregate signatures
let S = S1 + S2;
//check whether signature is valid
(pairing(G1::one(),S) == pairing(P1,H1)*pairing(P2,H2))
| 0.641198 | 0.899033 |
#### import libraries
```
import numpy as np
from typing import Tuple, Dict
```
#### generating data for our xnor problem
```
def generate_data(N: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Original code credits to Prof. Dr. Stefan Harmeling
Generate a data for training our linear model.
:param N: number of samples multiplier.
:return: tuple of x and y data as numpy ndarrays.
"""
X = np.repeat(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), N, axis=0)
X = X + np.random.randn(4 * N, 2) * 0.2
y = np.repeat([0, 1, 1, 0], N)
y = np.reshape(y, (len(y), 1))
return X, y
X_train, y_train = generate_data(N=100)
X_test, y_test = generate_data(N=50)
```
#### extra code
to plot the data we just created
```
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.rcParams.update({"savefig.facecolor": (0.0, 0.0, 0.0, 0.0),})
plt.figure(figsize=(20, 15))
plt.grid(color="#474842")
plt.xticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.yticks(np.arange(-.5, 1.51, .5), fontsize=17)
ax = plt.gca()
ax.set_facecolor("#272822")
plt.title("A XNOR B", fontsize=18)
plt.xlabel("State of A", fontsize=18)
plt.ylabel("State of B", fontsize=18)
plt.scatter(X_train[:,0], X_train[:,1], c=y_train, s=50,
cmap="RdBu", vmin=-.2, vmax=1.2,
edgecolor="white", linewidth=1)
plt.savefig("xnor_problem.svg")
plt.show()
```
#### define non-linear and loss function as well as their derivatives
```
def sigmoid(X: np.ndarray) -> np.ndarray:
"""
The sigmoid function will return 0 for every x << 0 and 1 for every x >> 0.
:param X: data to transform via sigmoid function:
:return: transformed data that lies between 0 and 1.
"""
return 1 / (1 + np.exp(-X))
def sigmoid_derivative(X: np.ndarray) -> np.ndarray:
"""
The derivative of the sigmoid function.
:param X: the data points for that the slope of the sigmoid function should be returned.
:return: the slope of the sigmoid function at every given X.
"""
sig = sigmoid(X)
return sig * (1 - sig)
def cross_entropy_loss(
h: np.ndarray, y: np.ndarray
) -> np.ndarray:
"""
Compute the cross entropy loss for the given hypothesis (h) in contrast to the true results (y).
:param h: Hypothesis of the NN to compare with y.
:param y: True results of the data.
:return: Cost/Loss of the current hypothesis.
"""
return -(1/y.size) * ((y.T @ np.log(h)) + ((1 - y.T) @ np.log(1 - h)))
def cross_entropy_derivative(
h: np.ndarray, y: np.ndarray
) -> np.ndarray:
"""
Compute the derivative of the cross entropy loss.
:param h: Hypothesis of the NN to compare with y.
:param y: True results of the data.
:return: Derivative of the cross entropy loss with the current hypothesis.
"""
return -(y // h) + ((1 - y) // (1 - h))
```
#### putting the initialization and the backprop algorithm together
```
def fit(X: np.ndarray, y: np.ndarray) -> Tuple[Dict, Dict]:
"""
Create a simple ANN and train it on the given binary classification data.
:param X: Input data.
:param y: Output data.
:return: Weights and biases as dictionaries.
"""
weights = dict()
biases = dict()
weights[1] = 2 * np.random.random((2,3)) - 1
biases[1] = np.zeros(3)
weights[2] = 2 * np.random.random((3,1)) - 1
biases[2] = np.zeros(1)
a = dict()
z = dict()
da = dict()
dz = dict()
for iteration in range(1000):
z[1] = X @ weights[1] + biases[1]
a[1] = sigmoid(z[1])
z[2] = a[1] @ weights[2] + biases[2]
a[2] = sigmoid(z[2])
h = a[2]
loss = cross_entropy_loss(h=h, y=y)
print(f"Loss ({iteration}): {loss[0][0]}")
da[2] = cross_entropy_derivative(h=h, y=y)
dz[2] = da[2] * sigmoid_derivative(z[2])
da[1] = dz[2] @ weights[2].T
dz[1] = da[1] * sigmoid_derivative(z[1])
weights[1] -= (X.T @ dz[1]) * (1 / y.size)
biases[1] -= np.mean(dz[1], axis=0)
weights[2] -= (a[1].T @ dz[2]) * (1 / y.size)
biases[2] -= np.mean(dz[2], axis=0)
return weights, biases
```
#### run the training
```
w, b = fit(X=X_train, y=y_train)
```
#### function to make predictions on trained neural network
```
def predict(w: Dict, b: Dict, X: np.ndarray) -> np.ndarray:
"""
Use the given weights (w) and biases (b) to make a prediction for the given input data (X).
:param w: Dictionary of weight matrices.
:param b: Dictionary of bias vectors.
:param X: Input data to make predictions on.
:return: An numpy array of predictions for every sample in X.
"""
z = dict()
a = dict()
z[1] = X @ w[1] + b[1]
a[1] = sigmoid(z[1])
z[2] = a[1] @ w[2] + b[2]
a[2] = sigmoid(z[2])
return a[2]
```
#### extra code
Create a grid over X- and Y-axis and make a prediction for each spot. visualize the result
```
X = X_test
xx, yy = np.mgrid[1.1*min(X[:,0]):1.1*max(X[:,0]):0.1, 1.1*min(X[:,1]):1.1*max(X[:,1]):0.1]
grid = np.c_[xx.ravel(), yy.ravel()]
# Find the probabilities for each combination of features
probs = predict(w, b, grid).reshape(xx.shape)
f, ax = plt.subplots(figsize=(20, 15))
# Create contour lines for each set of probabilities
contour = ax.contourf(xx, yy, probs, 25, cmap="RdBu", vmin=0, vmax=1)
plt.xticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.yticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.title("A XNOR B", fontsize=18)
plt.xlabel("A", fontsize=18)
plt.ylabel("B", fontsize=18)
ax_c = f.colorbar(contour)
ax.scatter(X_test[:,0], X_test[:, 1], c=y_test[:,0], s=50,
cmap="RdBu", vmin=-.2, vmax=1.2,
edgecolor="white", linewidth=1)
ax.set(aspect="equal")
plt.savefig("xnor_decision_boundaries.svg")
plt.show()
```
|
github_jupyter
|
import numpy as np
from typing import Tuple, Dict
def generate_data(N: int) -> Tuple[np.ndarray, np.ndarray]:
"""
Original code credits to Prof. Dr. Stefan Harmeling
Generate a data for training our linear model.
:param N: number of samples multiplier.
:return: tuple of x and y data as numpy ndarrays.
"""
X = np.repeat(np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), N, axis=0)
X = X + np.random.randn(4 * N, 2) * 0.2
y = np.repeat([0, 1, 1, 0], N)
y = np.reshape(y, (len(y), 1))
return X, y
X_train, y_train = generate_data(N=100)
X_test, y_test = generate_data(N=50)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
plt.rcParams.update({"savefig.facecolor": (0.0, 0.0, 0.0, 0.0),})
plt.figure(figsize=(20, 15))
plt.grid(color="#474842")
plt.xticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.yticks(np.arange(-.5, 1.51, .5), fontsize=17)
ax = plt.gca()
ax.set_facecolor("#272822")
plt.title("A XNOR B", fontsize=18)
plt.xlabel("State of A", fontsize=18)
plt.ylabel("State of B", fontsize=18)
plt.scatter(X_train[:,0], X_train[:,1], c=y_train, s=50,
cmap="RdBu", vmin=-.2, vmax=1.2,
edgecolor="white", linewidth=1)
plt.savefig("xnor_problem.svg")
plt.show()
def sigmoid(X: np.ndarray) -> np.ndarray:
"""
The sigmoid function will return 0 for every x << 0 and 1 for every x >> 0.
:param X: data to transform via sigmoid function:
:return: transformed data that lies between 0 and 1.
"""
return 1 / (1 + np.exp(-X))
def sigmoid_derivative(X: np.ndarray) -> np.ndarray:
"""
The derivative of the sigmoid function.
:param X: the data points for that the slope of the sigmoid function should be returned.
:return: the slope of the sigmoid function at every given X.
"""
sig = sigmoid(X)
return sig * (1 - sig)
def cross_entropy_loss(
h: np.ndarray, y: np.ndarray
) -> np.ndarray:
"""
Compute the cross entropy loss for the given hypothesis (h) in contrast to the true results (y).
:param h: Hypothesis of the NN to compare with y.
:param y: True results of the data.
:return: Cost/Loss of the current hypothesis.
"""
return -(1/y.size) * ((y.T @ np.log(h)) + ((1 - y.T) @ np.log(1 - h)))
def cross_entropy_derivative(
h: np.ndarray, y: np.ndarray
) -> np.ndarray:
"""
Compute the derivative of the cross entropy loss.
:param h: Hypothesis of the NN to compare with y.
:param y: True results of the data.
:return: Derivative of the cross entropy loss with the current hypothesis.
"""
return -(y // h) + ((1 - y) // (1 - h))
def fit(X: np.ndarray, y: np.ndarray) -> Tuple[Dict, Dict]:
"""
Create a simple ANN and train it on the given binary classification data.
:param X: Input data.
:param y: Output data.
:return: Weights and biases as dictionaries.
"""
weights = dict()
biases = dict()
weights[1] = 2 * np.random.random((2,3)) - 1
biases[1] = np.zeros(3)
weights[2] = 2 * np.random.random((3,1)) - 1
biases[2] = np.zeros(1)
a = dict()
z = dict()
da = dict()
dz = dict()
for iteration in range(1000):
z[1] = X @ weights[1] + biases[1]
a[1] = sigmoid(z[1])
z[2] = a[1] @ weights[2] + biases[2]
a[2] = sigmoid(z[2])
h = a[2]
loss = cross_entropy_loss(h=h, y=y)
print(f"Loss ({iteration}): {loss[0][0]}")
da[2] = cross_entropy_derivative(h=h, y=y)
dz[2] = da[2] * sigmoid_derivative(z[2])
da[1] = dz[2] @ weights[2].T
dz[1] = da[1] * sigmoid_derivative(z[1])
weights[1] -= (X.T @ dz[1]) * (1 / y.size)
biases[1] -= np.mean(dz[1], axis=0)
weights[2] -= (a[1].T @ dz[2]) * (1 / y.size)
biases[2] -= np.mean(dz[2], axis=0)
return weights, biases
w, b = fit(X=X_train, y=y_train)
def predict(w: Dict, b: Dict, X: np.ndarray) -> np.ndarray:
"""
Use the given weights (w) and biases (b) to make a prediction for the given input data (X).
:param w: Dictionary of weight matrices.
:param b: Dictionary of bias vectors.
:param X: Input data to make predictions on.
:return: An numpy array of predictions for every sample in X.
"""
z = dict()
a = dict()
z[1] = X @ w[1] + b[1]
a[1] = sigmoid(z[1])
z[2] = a[1] @ w[2] + b[2]
a[2] = sigmoid(z[2])
return a[2]
X = X_test
xx, yy = np.mgrid[1.1*min(X[:,0]):1.1*max(X[:,0]):0.1, 1.1*min(X[:,1]):1.1*max(X[:,1]):0.1]
grid = np.c_[xx.ravel(), yy.ravel()]
# Find the probabilities for each combination of features
probs = predict(w, b, grid).reshape(xx.shape)
f, ax = plt.subplots(figsize=(20, 15))
# Create contour lines for each set of probabilities
contour = ax.contourf(xx, yy, probs, 25, cmap="RdBu", vmin=0, vmax=1)
plt.xticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.yticks(np.arange(-.5, 1.51, .5), fontsize=17)
plt.title("A XNOR B", fontsize=18)
plt.xlabel("A", fontsize=18)
plt.ylabel("B", fontsize=18)
ax_c = f.colorbar(contour)
ax.scatter(X_test[:,0], X_test[:, 1], c=y_test[:,0], s=50,
cmap="RdBu", vmin=-.2, vmax=1.2,
edgecolor="white", linewidth=1)
ax.set(aspect="equal")
plt.savefig("xnor_decision_boundaries.svg")
plt.show()
| 0.943263 | 0.988119 |
<a href="https://colab.research.google.com/github/nsi319/Reformers/blob/main/reformer_pytorch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
!nvidia-smi
!pip install -U reformer_pytorch
!pip install -U transformers
!wget http://www.gutenberg.org/files/2554/2554-0.txt
import torch
from reformer_pytorch import ReformerEncDec
from datetime import datetime
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
# text = """On March 2, 2018, the Securities and Exchange Commission announced securities fraud charges against a U.K.-based broker-dealer and its investment manager in connection with manipulative trading in the securities of HD View 360 Inc., a U.S.-based microcap issuer. The SEC also announced charges against HD View's CEO, another individual, and three entities they control for manipulating HD View's securities as well as the securities of another microcap issuer, West Coast Ventures Group Corp. The SEC further announced the institution of an order suspending trading in the securities of HD View.These charges arise in part from an undercover operation by the Federal Bureau of Investigation, which also resulted in related criminal prosecutions against these defendants by the Office of the United States Attorney for the Eastern District of New York.In a complaint filed in the U.S. District Court for the Eastern District of New York, the SEC alleges that Beaufort Securities Ltd. and Peter Kyriacou, an investment manager at Beaufort, manipulated the market for HD View's common stock. The scheme involved an undercover FBI agent who described his business as manipulating U.S. stocks through pump-and-dump schemes. Kyriacou and the agent discussed depositing large blocks of microcap stock in Beaufort accounts, driving up the price of the stock through promotions, manipulating the stock's price and volume through matched trades, and then selling the shares for a large profit.The SEC's complaint against Beaufort and Kyriacou alleges that they:opened brokerage accounts for the undercover agent in the names of nominees in order to conceal his identity and his connection to the anticipated trading activity in the accounts suggested that the undercover agent could create the false appearance that HD View's stock was liquid in advance of a pump-and-dump by "gam[ing] the market" through matched trades executed multiple purchase orders of HD View shares with the understanding that Beaufort's client had arranged for an associate to simultaneously offer an equivalent number of shares at the same priceA second complaint filed by the SEC in the U.S. District Court for the Eastern District of New York alleges that in a series of recorded telephone conversations with the undercover agent, HD View CEO Dennis Mancino and William T. Hirschy agreed to manipulate HD View's common stock by using the agent's network of brokers to generate fraudulent retail demand for the stock in exchange for a kickback from the trading proceeds. According to the complaint, the three men agreed that Mancino and Hirschy would manipulate HD View stock to a higher price before using the agent's brokers to liquidate their positions at an artificially inflated price. The SEC's complaint also alleges that Mancino and Hirschy executed a "test trade" on Jan. 31, 2018, coordinated by the agent, consisting of a sell order placed by the defendants filled by an opposing purchase order placed by a broker into an account at Beaufort. Unbeknownst to Mancino and Hirschy, the Beaufort account used for this trade was a nominal account that was opened and funded by the agent. The SEC's complaint also alleges that, prior to their contact with the undercover agent, Mancino and Hirschy manipulated the market for HD View and for West Coast by using brokerage accounts that they owned, controlled, or were associated with –including TJM Investments Inc., DJK Investments 10 Inc., WT Consulting Group LLC – to effect manipulative "matched trades."The SEC's complaint against Beaufort and Kyriacou charges the defendants with violating Section 10(b) of the Securities Exchange Act of 1934 and Rule 10b-5 thereunder. The SEC also charged Hirschy, Mancino, and their corporate entities with violating Section 17(a)(1) of the Securities Act of 1933, Sections 9(a)(1), 9(a)(2), and 10(b) of the Exchange Act and Rules 10b-5(a) and (c) thereunder. The SEC is seeking injunctions, disgorgement, prejudgment interest, penalties, and penny stock bars from Beaufort and Kyriacou. With respect to Hirschy, Mancino, and their corporate entities, the SEC is seeking injunctions, disgorgement, prejudgment interest, penalties, penny stock bars, and an officer-and-director bar against Mancino.The investigation was conducted in the SEC's New York Regional Office by Tejal Shah and Joseph Darragh, Lorraine Collazo, and Michael D. Paley of the Microcap Fraud Task Force and supervised by Lara S. Mehraban, and in Washington, D.C. by Patrick L. Feeney, Robert Nesbitt, and Kevin Guerrero, and supervised by Antonia Chion. Preethi Krishnamurthy and Ms. Shah will lead the SEC's litigation against Beaufort and Kyriacou. Ann H. Petalas and Mr. Feeney, under the supervision of Cheryl Crumpton, will handle the SEC's litigation against Mancino, Hirschy, and their entities. The SEC appreciates the assistance of the Office of the United States Attorney for the Eastern District of New York, the Federal Bureau of Investigation, the Internal Revenue Service, the Alberta Securities Commission, the Ontario Securities Commission, the Financial Conduct Authority of the United Kingdom, and the Financial Industry Regulatory Authority.The Commission's investigation in this matter is continuing."""
# summ = """On March 2, 2018, the Securities and Exchange Commission charged Beaufort Securities Ltd. and Peter Kyriacou, an investment manager at Beaufort, with manipulating the market for HD View 360 Inc., a U.S.-based microcap issuer. The SEC also announced charges against HD View's CEO, another individual, and three entities they control for manipulating HD View through pump-and-dump schemes. According to the SEC's complaint, the defendants discussed depositing large blocks of microcap stock in Beaufort accounts, driving up the price of the stock through promotions, manipulating the stock's price and volume through matched trades, and then selling the shares for a large profit. In a parallel action, the United States Attorney's Office for the Eastern District of New York announced criminal charges against the defendants. On March 4, the SEC announced the entry of an order suspending trading in the securities of HD View and for West Coast, pending the outcome of a parallel criminal action by the Federal Bureau of Investigation. Following the announcement of the suspension, HD View stock prices and volume increased significantly, and the defendants agreed to pay over $1.5 million in disgorgement, prejudgment interest, penalties, and an officer and director bar. Beaufort agreed to settle the charges without admitting or denying the allegations of the complaint, and to pay a $1 million civil penalty. The SEC's investigation, which is continuing, has been conducted by Patrick McCluskey and Cheryl Crumpton of the SEC Enforcement Division's Market Abuse Unit in the New York Regional Office. The SEC appreciates the assistance of the Financial Industry Regulatory Authority of the United Kingdom, the Canadian Securities Commission, the Alberta Securities Commission and the Ontario Securities Commission."""
text = """He had shown signs of some obscure nervous disease before his arrest and this now developed into violent attacks of epilepsy, from which he suffered for the rest of his life. The fits occurred three or four times a year and were more frequent in periods of great strain. In 1859 he was allowed to return to Russia. He started a journal--“Vremya,” which was forbidden by the Censorship through a misunderstanding. In 1864 he lost his first wife and his brother Mihail. He was in terrible poverty, yet he took upon himself the payment of his brother’s debts. He started another journal--“The Epoch,” which within a few months was also prohibited. He was weighed down by debt, his brother’s family was dependent on him, he was forced to write at heart-breaking speed, and is said never to have corrected his work. The later years of his life were much softened by the tenderness and devotion of his second wife."""
summ = """In 1864 he lost his first wife and his brother Mihail. He was in terrible poverty, yet he took upon himself the payment of his brother’s debts. He was weighed down by debt, his brother’s family was dependent on him, he was forced to write at heart-breaking speed, and is said never to have corrected his work."""
CUDA_LAUNCH_BLOCKING=1
%env CUDA_LAUNCH_BLOCKING=1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
input_ids = tokenizer.encode(text, return_tensors='pt').long().cuda()
output_ids = tokenizer.encode(summ, return_tensors="pt").long().cuda()
print(input_ids)
print("Input shape: ", input_ids.shape)
print("Output shape: ", output_ids.shape)
t1 = datetime.now()
print("Start Time: ", t1)
DE_SEQ_LEN = 2048
EN_SEQ_LEN = 2048
enc_dec = ReformerEncDec(
dim = 512,
enc_num_tokens = 60000,
enc_depth = 4,
enc_max_seq_len = DE_SEQ_LEN,
dec_num_tokens = 60000,
dec_depth = 4,
dec_max_seq_len = EN_SEQ_LEN
).cuda()
t2 = datetime.now()
print("Time taken to load model (in seconds): ",(t2-t1).total_seconds())
# train_seq_in = torch.randint(0, 64000, (1, DE_SEQ_LEN)).long().cuda()
# train_seq_out = torch.randint(0, 64000, (1, EN_SEQ_LEN)).long().cuda()
train_seq_in = input_ids
train_seq_out = output_ids
input_mask = torch.ones(1, DE_SEQ_LEN).bool().cuda()
loss = enc_dec(train_seq_in, train_seq_out, return_loss = True, enc_input_mask = input_mask)
print("Loss:",loss)
loss.backward()
# learn
t3 = datetime.now()
print("Time taken to train the model (in seconds): ",(t3-t2).total_seconds())
# evaluate with the following
eval_seq_in = torch.randint(0, 310, (1, DE_SEQ_LEN)).long().cuda()
eval_seq_out_start = torch.tensor([[0.]]).long().cuda() # assume 0 is id of start token
samples = enc_dec.generate(input_ids, eval_seq_out_start, seq_len = EN_SEQ_LEN, eos_token = 1) # assume 1 is id of stop token
print(samples.shape) # (1, <= 1024) decode the tokens
print("generated output: ", samples)
summary = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in samples][0]
print("Decoded text: ")
print(summary)
t4 = datetime.now()
print("Time taken to generate (in seconds): ",(t4-t3).total_seconds())
!pip install sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
print(len(tokenizer))
print(tokenizer.all_special_ids)
print(tokenizer.all_special_tokens_extended)
print(tokenizer.encode("Hello I am naren. This is a new naren. hhh"))
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from datetime import datetime
# constants
NUM_BATCHES = int(100)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 10
GENERATE_EVERY = 20
GENERATE_LENGTH = 300
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = 1024,
num_tokens = len(tokenizer),
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with open('/content/2554-0.txt') as file:
data = file.read()
X = tokenizer.encode(data, return_tensors="pt")[0]
print("\n")
print(X)
print(len(X))
trX, vaX = np.split(X, [int(30000)])
data_train, data_val = trX,vaX
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
full_seq = self.data[:self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'\n training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'\nvalidation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
# prime = tokenizer.decode(inp, skip_special_tokens=True, clean_up_tokenization_spaces=False)
# print(f'\n', (prime))
t1 = datetime.now()
sample = model.generate(inp, GENERATE_LENGTH)
t2 = datetime.now()
print("\nTime taken to generate for {} is {} seconds".format(i,t2-t1))
output_str = tokenizer.decode(sample, skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("\n Generated sample: ", output_str)
text = "A few months later"
input_ids = tokenizer.encode(text, return_tensors="pt").cuda()
t1 = datetime.now()
output_ids = model.generate(input_ids,100)
t2 = datetime.now()
print("\nTime taken to generate is {} seconds".format(t2-t1))
print(output_ids)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("Generated sample: ", output_str)
google = AutoModelWithLMHead.from_pretrained("google/reformer-crime-and-punishment")
google_tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
print(google_tokenizer.decode(google.generate(google_tokenizer.encode("A few months later", return_tensors="pt"), do_sample=True,temperature=0.7, max_length=100)[0]))
text = "A few months later"
input_ids = google_tokenizer.encode(text, return_tensors="pt").cuda()
t1 = datetime.now()
output_ids = model.generate(input_ids,100)
t2 = datetime.now()
print("\nTime taken to generate is {} seconds".format(t2-t1))
print(output_ids)
output_str = google_tokenizer.decode(output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("Generated sample: ", output_str)
```
|
github_jupyter
|
from google.colab import drive
drive.mount('/content/drive')
!nvidia-smi
!pip install -U reformer_pytorch
!pip install -U transformers
!wget http://www.gutenberg.org/files/2554/2554-0.txt
import torch
from reformer_pytorch import ReformerEncDec
from datetime import datetime
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
# text = """On March 2, 2018, the Securities and Exchange Commission announced securities fraud charges against a U.K.-based broker-dealer and its investment manager in connection with manipulative trading in the securities of HD View 360 Inc., a U.S.-based microcap issuer. The SEC also announced charges against HD View's CEO, another individual, and three entities they control for manipulating HD View's securities as well as the securities of another microcap issuer, West Coast Ventures Group Corp. The SEC further announced the institution of an order suspending trading in the securities of HD View.These charges arise in part from an undercover operation by the Federal Bureau of Investigation, which also resulted in related criminal prosecutions against these defendants by the Office of the United States Attorney for the Eastern District of New York.In a complaint filed in the U.S. District Court for the Eastern District of New York, the SEC alleges that Beaufort Securities Ltd. and Peter Kyriacou, an investment manager at Beaufort, manipulated the market for HD View's common stock. The scheme involved an undercover FBI agent who described his business as manipulating U.S. stocks through pump-and-dump schemes. Kyriacou and the agent discussed depositing large blocks of microcap stock in Beaufort accounts, driving up the price of the stock through promotions, manipulating the stock's price and volume through matched trades, and then selling the shares for a large profit.The SEC's complaint against Beaufort and Kyriacou alleges that they:opened brokerage accounts for the undercover agent in the names of nominees in order to conceal his identity and his connection to the anticipated trading activity in the accounts suggested that the undercover agent could create the false appearance that HD View's stock was liquid in advance of a pump-and-dump by "gam[ing] the market" through matched trades executed multiple purchase orders of HD View shares with the understanding that Beaufort's client had arranged for an associate to simultaneously offer an equivalent number of shares at the same priceA second complaint filed by the SEC in the U.S. District Court for the Eastern District of New York alleges that in a series of recorded telephone conversations with the undercover agent, HD View CEO Dennis Mancino and William T. Hirschy agreed to manipulate HD View's common stock by using the agent's network of brokers to generate fraudulent retail demand for the stock in exchange for a kickback from the trading proceeds. According to the complaint, the three men agreed that Mancino and Hirschy would manipulate HD View stock to a higher price before using the agent's brokers to liquidate their positions at an artificially inflated price. The SEC's complaint also alleges that Mancino and Hirschy executed a "test trade" on Jan. 31, 2018, coordinated by the agent, consisting of a sell order placed by the defendants filled by an opposing purchase order placed by a broker into an account at Beaufort. Unbeknownst to Mancino and Hirschy, the Beaufort account used for this trade was a nominal account that was opened and funded by the agent. The SEC's complaint also alleges that, prior to their contact with the undercover agent, Mancino and Hirschy manipulated the market for HD View and for West Coast by using brokerage accounts that they owned, controlled, or were associated with –including TJM Investments Inc., DJK Investments 10 Inc., WT Consulting Group LLC – to effect manipulative "matched trades."The SEC's complaint against Beaufort and Kyriacou charges the defendants with violating Section 10(b) of the Securities Exchange Act of 1934 and Rule 10b-5 thereunder. The SEC also charged Hirschy, Mancino, and their corporate entities with violating Section 17(a)(1) of the Securities Act of 1933, Sections 9(a)(1), 9(a)(2), and 10(b) of the Exchange Act and Rules 10b-5(a) and (c) thereunder. The SEC is seeking injunctions, disgorgement, prejudgment interest, penalties, and penny stock bars from Beaufort and Kyriacou. With respect to Hirschy, Mancino, and their corporate entities, the SEC is seeking injunctions, disgorgement, prejudgment interest, penalties, penny stock bars, and an officer-and-director bar against Mancino.The investigation was conducted in the SEC's New York Regional Office by Tejal Shah and Joseph Darragh, Lorraine Collazo, and Michael D. Paley of the Microcap Fraud Task Force and supervised by Lara S. Mehraban, and in Washington, D.C. by Patrick L. Feeney, Robert Nesbitt, and Kevin Guerrero, and supervised by Antonia Chion. Preethi Krishnamurthy and Ms. Shah will lead the SEC's litigation against Beaufort and Kyriacou. Ann H. Petalas and Mr. Feeney, under the supervision of Cheryl Crumpton, will handle the SEC's litigation against Mancino, Hirschy, and their entities. The SEC appreciates the assistance of the Office of the United States Attorney for the Eastern District of New York, the Federal Bureau of Investigation, the Internal Revenue Service, the Alberta Securities Commission, the Ontario Securities Commission, the Financial Conduct Authority of the United Kingdom, and the Financial Industry Regulatory Authority.The Commission's investigation in this matter is continuing."""
# summ = """On March 2, 2018, the Securities and Exchange Commission charged Beaufort Securities Ltd. and Peter Kyriacou, an investment manager at Beaufort, with manipulating the market for HD View 360 Inc., a U.S.-based microcap issuer. The SEC also announced charges against HD View's CEO, another individual, and three entities they control for manipulating HD View through pump-and-dump schemes. According to the SEC's complaint, the defendants discussed depositing large blocks of microcap stock in Beaufort accounts, driving up the price of the stock through promotions, manipulating the stock's price and volume through matched trades, and then selling the shares for a large profit. In a parallel action, the United States Attorney's Office for the Eastern District of New York announced criminal charges against the defendants. On March 4, the SEC announced the entry of an order suspending trading in the securities of HD View and for West Coast, pending the outcome of a parallel criminal action by the Federal Bureau of Investigation. Following the announcement of the suspension, HD View stock prices and volume increased significantly, and the defendants agreed to pay over $1.5 million in disgorgement, prejudgment interest, penalties, and an officer and director bar. Beaufort agreed to settle the charges without admitting or denying the allegations of the complaint, and to pay a $1 million civil penalty. The SEC's investigation, which is continuing, has been conducted by Patrick McCluskey and Cheryl Crumpton of the SEC Enforcement Division's Market Abuse Unit in the New York Regional Office. The SEC appreciates the assistance of the Financial Industry Regulatory Authority of the United Kingdom, the Canadian Securities Commission, the Alberta Securities Commission and the Ontario Securities Commission."""
text = """He had shown signs of some obscure nervous disease before his arrest and this now developed into violent attacks of epilepsy, from which he suffered for the rest of his life. The fits occurred three or four times a year and were more frequent in periods of great strain. In 1859 he was allowed to return to Russia. He started a journal--“Vremya,” which was forbidden by the Censorship through a misunderstanding. In 1864 he lost his first wife and his brother Mihail. He was in terrible poverty, yet he took upon himself the payment of his brother’s debts. He started another journal--“The Epoch,” which within a few months was also prohibited. He was weighed down by debt, his brother’s family was dependent on him, he was forced to write at heart-breaking speed, and is said never to have corrected his work. The later years of his life were much softened by the tenderness and devotion of his second wife."""
summ = """In 1864 he lost his first wife and his brother Mihail. He was in terrible poverty, yet he took upon himself the payment of his brother’s debts. He was weighed down by debt, his brother’s family was dependent on him, he was forced to write at heart-breaking speed, and is said never to have corrected his work."""
CUDA_LAUNCH_BLOCKING=1
%env CUDA_LAUNCH_BLOCKING=1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
input_ids = tokenizer.encode(text, return_tensors='pt').long().cuda()
output_ids = tokenizer.encode(summ, return_tensors="pt").long().cuda()
print(input_ids)
print("Input shape: ", input_ids.shape)
print("Output shape: ", output_ids.shape)
t1 = datetime.now()
print("Start Time: ", t1)
DE_SEQ_LEN = 2048
EN_SEQ_LEN = 2048
enc_dec = ReformerEncDec(
dim = 512,
enc_num_tokens = 60000,
enc_depth = 4,
enc_max_seq_len = DE_SEQ_LEN,
dec_num_tokens = 60000,
dec_depth = 4,
dec_max_seq_len = EN_SEQ_LEN
).cuda()
t2 = datetime.now()
print("Time taken to load model (in seconds): ",(t2-t1).total_seconds())
# train_seq_in = torch.randint(0, 64000, (1, DE_SEQ_LEN)).long().cuda()
# train_seq_out = torch.randint(0, 64000, (1, EN_SEQ_LEN)).long().cuda()
train_seq_in = input_ids
train_seq_out = output_ids
input_mask = torch.ones(1, DE_SEQ_LEN).bool().cuda()
loss = enc_dec(train_seq_in, train_seq_out, return_loss = True, enc_input_mask = input_mask)
print("Loss:",loss)
loss.backward()
# learn
t3 = datetime.now()
print("Time taken to train the model (in seconds): ",(t3-t2).total_seconds())
# evaluate with the following
eval_seq_in = torch.randint(0, 310, (1, DE_SEQ_LEN)).long().cuda()
eval_seq_out_start = torch.tensor([[0.]]).long().cuda() # assume 0 is id of start token
samples = enc_dec.generate(input_ids, eval_seq_out_start, seq_len = EN_SEQ_LEN, eos_token = 1) # assume 1 is id of stop token
print(samples.shape) # (1, <= 1024) decode the tokens
print("generated output: ", samples)
summary = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in samples][0]
print("Decoded text: ")
print(summary)
t4 = datetime.now()
print("Time taken to generate (in seconds): ",(t4-t3).total_seconds())
!pip install sentencepiece
from transformers import AutoTokenizer, AutoModelWithLMHead
tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
print(len(tokenizer))
print(tokenizer.all_special_ids)
print(tokenizer.all_special_tokens_extended)
print(tokenizer.encode("Hello I am naren. This is a new naren. hhh"))
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from datetime import datetime
# constants
NUM_BATCHES = int(100)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 10
GENERATE_EVERY = 20
GENERATE_LENGTH = 300
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = 1024,
num_tokens = len(tokenizer),
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with open('/content/2554-0.txt') as file:
data = file.read()
X = tokenizer.encode(data, return_tensors="pt")[0]
print("\n")
print(X)
print(len(X))
trX, vaX = np.split(X, [int(30000)])
data_train, data_val = trX,vaX
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
full_seq = self.data[:self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'\n training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'\nvalidation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
# prime = tokenizer.decode(inp, skip_special_tokens=True, clean_up_tokenization_spaces=False)
# print(f'\n', (prime))
t1 = datetime.now()
sample = model.generate(inp, GENERATE_LENGTH)
t2 = datetime.now()
print("\nTime taken to generate for {} is {} seconds".format(i,t2-t1))
output_str = tokenizer.decode(sample, skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("\n Generated sample: ", output_str)
text = "A few months later"
input_ids = tokenizer.encode(text, return_tensors="pt").cuda()
t1 = datetime.now()
output_ids = model.generate(input_ids,100)
t2 = datetime.now()
print("\nTime taken to generate is {} seconds".format(t2-t1))
print(output_ids)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("Generated sample: ", output_str)
google = AutoModelWithLMHead.from_pretrained("google/reformer-crime-and-punishment")
google_tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
print(google_tokenizer.decode(google.generate(google_tokenizer.encode("A few months later", return_tensors="pt"), do_sample=True,temperature=0.7, max_length=100)[0]))
text = "A few months later"
input_ids = google_tokenizer.encode(text, return_tensors="pt").cuda()
t1 = datetime.now()
output_ids = model.generate(input_ids,100)
t2 = datetime.now()
print("\nTime taken to generate is {} seconds".format(t2-t1))
print(output_ids)
output_str = google_tokenizer.decode(output_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("Generated sample: ", output_str)
| 0.360264 | 0.967287 |
# Лабораторная работа №4
# Генерация стационарных СП
```
import numpy as np
import matplotlib.pyplot as plt
import random
f"Вариант {(75350035 + 2) % 3 + 1}"
```
Реализовать программу генерации стационарного СП с заданной автокорреляционной функцией $ K(\tau) $. Сгенерировать реализацию (реализации) СП и провести подходящее (правильное) исследование различных характеристик СП. По возможности продемонстрировать с помощью графиков.
## Автокорреляционная функция
$$ K(\tau) = D e^{-\alpha |\tau|} (1 + \alpha |\tau|) $$
Для формирования случайного процесса с имеющейся автокорреляционной функцией воспользуемся **методом формирующего фильра**
```
def get_x_sample_values(amount=100):
expectation = 0
sigma = 1
return np.random.normal(expectation, sigma, size=amount)
x = get_x_sample_values()
plt.plot(x)
plt.show()
alpha = 3
alpha
t0 = 0
t1 = 10
delta_t = 0.1
time_points = np.arange(t0, t1, delta_t)
def Y(j, alpha, delta_t, x):
return np.sqrt(delta_t) * 2 * (alpha ** 1.5) * sum(i * delta_t * np.exp(-alpha * i * delta_t) * x[j - i] for i in range(len(time_points)))
def get_Y_samples(samles_amount, one_sample_amount):
res = []
for _ in range(samles_amount):
x = get_x_sample_values(one_sample_amount)
y = [Y(j, alpha, delta_t, x) for j in range(one_sample_amount)]
res.append(y)
return res
y_samples = get_Y_samples(samles_amount=20, one_sample_amount=len(time_points))
for y in y_samples:
plt.plot(y)
def expected_value(t, samples):
return sum(s[t] for s in samples) / len(samples)
def expected_value_for_time(time, samples):
return sum(expected_value(i, samples) for i in range(len(time))) / len(time)
print(f"Теоретическое мат ожидание: {0}")
print(f"Практическое мат ожидание: {expected_value_for_time(time_points, y_samples)}")
def variance(t, samples):
exp = expected_value(t, samples)
return sum(s[t]**2 - exp**2 for s in samples) / len(samples)
def variance_value_for_time(time, samples):
return sum(variance(i, y_samples) for i in range(len(time))) / len(time)
print(f"Теоретическая дисперсия: {1}")
print(f"Практическая дисперсия: {variance_value_for_time(time_points, y_samples)}")
def autocorreletion_coefficient(t1, t2, samples):
exp1 = expected_value(t1, samples)
exp2 = expected_value(t2, samples)
return sum((s[t1]-exp1) * (s[t2]-exp2) for s in samples) / (len(samples)-1)
def autocorreletion_function_value(time_points, samples):
time_indicies = [i for i in range(len(time_points))]
return sum(autocorreletion_coefficient(i, j, samples) for i, j in zip(time_indicies[:-1], time_indicies[1:])) / len(time_indicies)
def auto(alpha, delta_t):
return np.exp(-alpha * delta_t) * (1 + alpha * delta_t)
print(f"Теоретическая автокорреляция: {auto(alpha, delta_t)}")
print(f"Практическая автокорреляция: {autocorreletion_function_value(time_points, y_samples)}")
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import random
f"Вариант {(75350035 + 2) % 3 + 1}"
def get_x_sample_values(amount=100):
expectation = 0
sigma = 1
return np.random.normal(expectation, sigma, size=amount)
x = get_x_sample_values()
plt.plot(x)
plt.show()
alpha = 3
alpha
t0 = 0
t1 = 10
delta_t = 0.1
time_points = np.arange(t0, t1, delta_t)
def Y(j, alpha, delta_t, x):
return np.sqrt(delta_t) * 2 * (alpha ** 1.5) * sum(i * delta_t * np.exp(-alpha * i * delta_t) * x[j - i] for i in range(len(time_points)))
def get_Y_samples(samles_amount, one_sample_amount):
res = []
for _ in range(samles_amount):
x = get_x_sample_values(one_sample_amount)
y = [Y(j, alpha, delta_t, x) for j in range(one_sample_amount)]
res.append(y)
return res
y_samples = get_Y_samples(samles_amount=20, one_sample_amount=len(time_points))
for y in y_samples:
plt.plot(y)
def expected_value(t, samples):
return sum(s[t] for s in samples) / len(samples)
def expected_value_for_time(time, samples):
return sum(expected_value(i, samples) for i in range(len(time))) / len(time)
print(f"Теоретическое мат ожидание: {0}")
print(f"Практическое мат ожидание: {expected_value_for_time(time_points, y_samples)}")
def variance(t, samples):
exp = expected_value(t, samples)
return sum(s[t]**2 - exp**2 for s in samples) / len(samples)
def variance_value_for_time(time, samples):
return sum(variance(i, y_samples) for i in range(len(time))) / len(time)
print(f"Теоретическая дисперсия: {1}")
print(f"Практическая дисперсия: {variance_value_for_time(time_points, y_samples)}")
def autocorreletion_coefficient(t1, t2, samples):
exp1 = expected_value(t1, samples)
exp2 = expected_value(t2, samples)
return sum((s[t1]-exp1) * (s[t2]-exp2) for s in samples) / (len(samples)-1)
def autocorreletion_function_value(time_points, samples):
time_indicies = [i for i in range(len(time_points))]
return sum(autocorreletion_coefficient(i, j, samples) for i, j in zip(time_indicies[:-1], time_indicies[1:])) / len(time_indicies)
def auto(alpha, delta_t):
return np.exp(-alpha * delta_t) * (1 + alpha * delta_t)
print(f"Теоретическая автокорреляция: {auto(alpha, delta_t)}")
print(f"Практическая автокорреляция: {autocorreletion_function_value(time_points, y_samples)}")
| 0.461017 | 0.991389 |
# Building a Fraud Prediction Model with blocktorch
In this demo, we will build an optimized fraud prediction model using blocktorch. To optimize the pipeline, we will set up an objective function to minimize the percentage of total transaction value lost to fraud. At the end of this demo, we also show you how introducing the right objective during the training results in a much better than using a generic machine learning metric like AUC.
```
import blocktorch
from blocktorch import AutoMLSearch
from blocktorch.objectives import FraudCost
```
## Configure "Cost of Fraud"
To optimize the pipelines toward the specific business needs of this model, we can set our own assumptions for the cost of fraud. These parameters are
* `retry_percentage` - what percentage of customers will retry a transaction if it is declined?
* `interchange_fee` - how much of each successful transaction do you collect?
* `fraud_payout_percentage` - the percentage of fraud will you be unable to collect
* `amount_col` - the column in the data the represents the transaction amount
Using these parameters, blocktorch determines attempt to build a pipeline that will minimize the financial loss due to fraud.
```
fraud_objective = FraudCost(retry_percentage=.5,
interchange_fee=.02,
fraud_payout_percentage=.75,
amount_col='amount')
```
## Search for best pipeline
In order to validate the results of the pipeline creation and optimization process, we will save some of our data as the holdout set.
```
X, y = blocktorch.demos.load_fraud(n_rows=5000)
```
blocktorch natively supports one-hot encoding. Here we keep 1 out of the 6 categorical columns to decrease computation time.
```
cols_to_drop = ['datetime', 'expiration_date', 'country', 'region', 'provider']
for col in cols_to_drop:
X.ww.pop(col)
X_train, X_holdout, y_train, y_holdout = blocktorch.preprocessing.split_data(X, y, problem_type='binary', test_size=0.2, random_seed=0)
X.ww
```
Because the fraud labels are binary, we will use `AutoMLSearch(X_train=X_train, y_train=y_train, problem_type='binary')`. When we call `.search()`, the search for the best pipeline will begin.
```
automl = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective=fraud_objective,
additional_objectives=['auc', 'f1', 'precision'],
allowed_model_families=["random_forest", "linear_model"],
max_batches=1,
optimize_thresholds=True,
verbose=True)
automl.search()
```
### View rankings and select pipelines
Once the fitting process is done, we can see all of the pipelines that were searched, ranked by their score on the fraud detection objective we defined.
```
automl.rankings
```
To select the best pipeline we can call `automl.best_pipeline`.
```
best_pipeline = automl.best_pipeline
```
### Describe pipelines
We can get more details about any pipeline created during the search process, including how it performed on other objective functions, by calling the `describe_pipeline` method and passing the `id` of the pipeline of interest.
```
automl.describe_pipeline(automl.rankings.iloc[1]["id"])
```
## Evaluate on holdout data
Finally, since the best pipeline is already trained, we evaluate it on the holdout data.
Now, we can score the pipeline on the holdout data using both our fraud cost objective and the AUC (Area under the ROC Curve) objective.
```
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
```
## Why optimize for a problem-specific objective?
To demonstrate the importance of optimizing for the right objective, let's search for another pipeline using AUC, a common machine learning metric. After that, we will score the holdout data using the fraud cost objective to see how the best pipelines compare.
```
automl_auc = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective='auc',
additional_objectives=['f1', 'precision'],
max_batches=1,
allowed_model_families=["random_forest", "linear_model"],
optimize_thresholds=True,
verbose=True)
automl_auc.search()
```
Like before, we can look at the rankings of all of the pipelines searched and pick the best pipeline.
```
automl_auc.rankings
best_pipeline_auc = automl_auc.best_pipeline
# get the fraud score on holdout data
best_pipeline_auc.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
# fraud score on fraud optimized again
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
```
When we optimize for AUC, we can see that the AUC score from this pipeline performs better compared to the AUC score from the pipeline optimized for fraud cost; however, the losses due to fraud are a much larger percentage of the total transaction amount when optimized for AUC and much smaller when optimized for fraud cost. As a result, we lose a noticable percentage of the total transaction amount by not optimizing for fraud cost specifically.
Optimizing for AUC does not take into account the user-specified `retry_percentage`, `interchange_fee`, `fraud_payout_percentage` values, which could explain the decrease in fraud performance. Thus, the best pipelines may produce the highest AUC but may not actually reduce the amount loss due to your specific type fraud.
This example highlights how performance in the real world can diverge greatly from machine learning metrics.
|
github_jupyter
|
import blocktorch
from blocktorch import AutoMLSearch
from blocktorch.objectives import FraudCost
fraud_objective = FraudCost(retry_percentage=.5,
interchange_fee=.02,
fraud_payout_percentage=.75,
amount_col='amount')
X, y = blocktorch.demos.load_fraud(n_rows=5000)
cols_to_drop = ['datetime', 'expiration_date', 'country', 'region', 'provider']
for col in cols_to_drop:
X.ww.pop(col)
X_train, X_holdout, y_train, y_holdout = blocktorch.preprocessing.split_data(X, y, problem_type='binary', test_size=0.2, random_seed=0)
X.ww
automl = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective=fraud_objective,
additional_objectives=['auc', 'f1', 'precision'],
allowed_model_families=["random_forest", "linear_model"],
max_batches=1,
optimize_thresholds=True,
verbose=True)
automl.search()
automl.rankings
best_pipeline = automl.best_pipeline
automl.describe_pipeline(automl.rankings.iloc[1]["id"])
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
automl_auc = AutoMLSearch(X_train=X_train, y_train=y_train,
problem_type='binary',
objective='auc',
additional_objectives=['f1', 'precision'],
max_batches=1,
allowed_model_families=["random_forest", "linear_model"],
optimize_thresholds=True,
verbose=True)
automl_auc.search()
automl_auc.rankings
best_pipeline_auc = automl_auc.best_pipeline
# get the fraud score on holdout data
best_pipeline_auc.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
# fraud score on fraud optimized again
best_pipeline.score(X_holdout, y_holdout, objectives=["auc", fraud_objective])
| 0.485844 | 0.980394 |
# Starbucks Capstone Challenge
### Introduction
This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks.
Not all users receive the same offer, and that is the challenge to solve with this data set.
Your task is to combine transaction, demographic and offer data to determine which demographic groups respond best to which offer type. This data set is a simplified version of the real Starbucks app because the underlying simulator only has one product whereas Starbucks actually sells dozens of products.
Every offer has a validity period before the offer expires. As an example, a BOGO offer might be valid for only 5 days. You'll see in the data set that informational offers have a validity period even though these ads are merely providing information about a product; for example, if an informational offer has 7 days of validity, you can assume the customer is feeling the influence of the offer for 7 days after receiving the advertisement.
You'll be given transactional data showing user purchases made on the app including the timestamp of purchase and the amount of money spent on a purchase. This transactional data also has a record for each offer that a user receives as well as a record for when a user actually views the offer. There are also records for when a user completes an offer.
Keep in mind as well that someone using the app might make a purchase through the app without having received an offer or seen an offer.
### Example
To give an example, a user could receive a discount offer buy 10 dollars get 2 off on Monday. The offer is valid for 10 days from receipt. If the customer accumulates at least 10 dollars in purchases during the validity period, the customer completes the offer.
However, there are a few things to watch out for in this data set. Customers do not opt into the offers that they receive; in other words, a user can receive an offer, never actually view the offer, and still complete the offer. For example, a user might receive the "buy 10 dollars get 2 dollars off offer", but the user never opens the offer during the 10 day validity period. The customer spends 15 dollars during those ten days. There will be an offer completion record in the data set; however, the customer was not influenced by the offer because the customer never viewed the offer.
### Cleaning
This makes data cleaning especially important and tricky.
You'll also want to take into account that some demographic groups will make purchases even if they don't receive an offer. From a business perspective, if a customer is going to make a 10 dollar purchase without an offer anyway, you wouldn't want to send a buy 10 dollars get 2 dollars off offer. You'll want to try to assess what a certain demographic group will buy when not receiving any offers.
### Final Advice
Because this is a capstone project, you are free to analyze the data any way you see fit. For example, you could build a machine learning model that predicts how much someone will spend based on demographics and offer type. Or you could build a model that predicts whether or not someone will respond to an offer. Or, you don't need to build a machine learning model at all. You could develop a set of heuristics that determine what offer you should send to each customer (i.e., 75 percent of women customers who were 35 years old responded to offer A vs 40 percent from the same demographic to offer B, so send offer A).
# Data Sets
The data is contained in three files:
* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)
* profile.json - demographic data for each customer
* transcript.json - records for transactions, offers received, offers viewed, and offers completed
Here is the schema and explanation of each variable in the files:
**portfolio.json**
* id (string) - offer id
* offer_type (string) - type of offer ie BOGO, discount, informational
* difficulty (int) - minimum required spend to complete an offer
* reward (int) - reward given for completing an offer
* duration (int) - time for offer to be open, in days
* channels (list of strings)
**profile.json**
* age (int) - age of the customer
* became_member_on (int) - date when customer created an app account
* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)
* id (str) - customer id
* income (float) - customer's income
**transcript.json**
* event (str) - record description (ie transaction, offer received, offer viewed, etc.)
* person (str) - customer id
* time (int) - time in hours since start of test. The data begins at time t=0
* value - (dict of strings) - either an offer id or transaction amount depending on the record
**Note:** If you are using the workspace, you will need to go to the terminal and run the command `conda update pandas` before reading in the files. This is because the version of pandas in the workspace cannot read in the transcript.json file correctly, but the newest version of pandas can. You can access the termnal from the orange icon in the top left of this notebook.
You can see how to access the terminal and how the install works using the two images below. First you need to access the terminal:
<img src="pic1.png"/>
Then you will want to run the above command:
<img src="pic2.png"/>
Finally, when you enter back into the notebook (use the jupyter icon again), you should be able to run the below cell without any errors.
## 0. Problem Statement
For this Starbucks capstone project, I want to see what factors influence whether a customer completes the Buy One Get One Free (BOGO) offer. Some people might be incentivized after viewing the offer information; some might complete the offer due to certain characteristics other than the offer.
```
import pandas as pd
import numpy as np
import math
import json
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from datetime import datetime
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
```
## 1. Data Exploration & Visualization & Preprocessing
### 1.1. portfolio - containing offer ids and meta data about each offer (duration, type, etc.)
```
portfolio.head()
portfolio.describe()
# There are 10 different offers
portfolio.shape
```
Based on the bar chart **Average Difficulty by Offer Type**, we know informational offer type is the easiest offer since the minimum required spend to complete an offer is \\$0. On ther other hand, discount offer type on average requires roughly \\$11.75 to complete the offer.
```
portfolio.groupby(['offer_type']).mean()['difficulty'].plot.bar()
plt.ylabel('difficulty')
plt.xlabel('offer type')
plt.title('Average Difficulty by Offer Type')
plt.show()
```
Based on the chart **Average Duration by Offer Type**, we know Starbucks usually runs discount offer type for a longer period (8.5 days) than others.
```
portfolio.groupby(['offer_type']).mean()['duration'].plot.bar()
plt.xlabel('offer type')
plt.ylabel('duration')
plt.title('Average Duration by Offer Type')
plt.show()
```
Based on the chart **Average Reward by Offer Type**, we know bogo offer type has the highest dollar amount rewards (\\$7.5) compared to others.
```
portfolio.groupby(['offer_type']).mean()['reward'].plot.bar()
plt.xlabel('offer type')
plt.ylabel('reward')
plt.title('Average Reward by Offer Type')
plt.show()
```
Note, for data preprocessing, we need to divide the list values in the **channels** column into separate columns as 4 dummy variables - email, mobile, social, and web.
```
# Convert list-like elements in channels column into 4 separate columns - email, mobile, social, and web.]
dummy_channels = pd.get_dummies(portfolio['channels'].apply(pd.Series).stack()).sum(level=0)
portfolio = pd.merge(portfolio, dummy_channels, left_index=True, right_index=True).drop(columns=['channels'])
portfolio
# Rename id to offer id for joining dataframes later
portfolio.rename(columns={'id':'offer id'}, inplace=True)
```
### 1.2. profile - demographic data for each customer
```
profile.head()
profile.shape
# Percentage of missing values across variables
profile.isnull().sum()/len(profile)
```
Based on the above summary, `gender` and `income` have the exact same percentage of missing values, which might indicate a group of people who don't want to share private information or simply errors in the database. To investigate further, I extracted the data points where `gender` is missing.
```
gender_missing = profile[profile['gender'].isnull()]
```
All `income` information are missing for data points where `gender` information are also unavailable.
```
gender_missing.isnull().sum()/len(gender_missing)
```
All data points with `gender` and `income` missing have `age` as 118, confirming these data points are erroneous user profile. Therefore, For data preprocessing, I'll drop any data points with `gender` and `income` as missing values.
```
gender_missing['age'].value_counts()
# drop any data points with gender and income as missing values
profile.dropna(subset=['gender', 'age'], inplace=True)
# After dropping records with missing values, we have 14,825 customer profile left
len(profile['id'].unique())
```
Note, column `became_member_on` is cast as data type int not datetime. Hence, we need to convert the data type.
```
profile.info()
# Convert became_member_on from int to datetime
profile['became_member_on'] = pd.to_datetime(profile['became_member_on'], format='%Y%m%d')
```
According to **Customer Age Distribution** histogram, we can see that most of the customers' age ranges from 50 to 70 years old.
```
plt.hist(profile['age'], bins=25)
plt.title('Customer Age Distribution')
plt.xlabel('Age')
plt.ylabel('Count')
plt.show()
```
Breaking down the age distribution by gender, we can see that the age distribution across all gender are pretty similar. Although, for male customers, the people of age 20 to 30 are higher than other gender.
```
profile['gender'].unique()
sns.kdeplot(profile[profile['gender'] == 'F']['age'], label = 'Female')
sns.kdeplot(profile[profile['gender'] == 'M']['age'], label = 'Male')
sns.kdeplot(profile[profile['gender'] == 'O']['age'], label = 'Other')
plt.xlabel('Age')
plt.ylabel('Density')
plt.title('Age Distribution by Gender')
plt.show()
```
Based on **Customer Income Distribution** chart, we know that it's a right-skewed distribution, meaning the mean (\\$65,405) is greater than the median (\\$64,000).
```
plt.hist(profile['income'], bins=25)
plt.title('Customer Income Distribution')
plt.xlabel('Income')
plt.ylabel('Count')
plt.show()
print("Maximum income: ${:,.0f}".format(profile['income'].max()))
print("Minimum income: ${:,.0f}".format(profile['income'].min()))
print("Average income: ${:,.0f}".format(profile['income'].mean()))
print("Median income: ${:,.0f}".format(profile['income'].median()))
```
When breaking down the income distribution by gender, we can see that the income of female customers is relatively normal distributed. Most of the female customers have income range from \\$70k to \\$85k. As for male customers, their income mostly concentrated between \\$50k and \\$80k. For other customers, their income mostly range from \\$55k to \\$70k. Also, there are more female customer with income more than \\$80k than male and other customers.
```
sns.kdeplot(profile[profile['gender'] == 'F']['income'], label = 'Female')
sns.kdeplot(profile[profile['gender'] == 'M']['income'], label = 'Male')
sns.kdeplot(profile[profile['gender'] == 'O']['income'], label = 'Other')
plt.xlabel('Income')
plt.ylabel('Density')
plt.title('Income Distribution by Gender')
plt.show()
profile.columns
# Rename id to customer id for joining dataframes later
profile.rename(columns={'id':'customer id'}, inplace=True)
```
### 1.3. transcript - records for transactions, offers received, offers viewed, and offers completed
```
transcript.head()
transcript.shape
# Rename person to customer id for joining dataframes later
transcript.rename(columns={'person':'customer id'}, inplace=True)
transcript['event'].value_counts()
```
For data preprocessing, I'll split the dict value column `value` into separate columns - `offer id`, `reward`, and `amount`.
```
value_split = transcript['value'].apply(pd.Series)
```
After the splitting of column `value`, we can see that there are duplicated offer id columns - `offer id` and `offer_id`. To be consistent, I'm going to combine these 2 columns into `offer id`.
```
value_split.head(3)
value_split.loc[((value_split['offer id'].isnull()) &
(value_split['offer_id'].notnull())), 'offer id'] = value_split['offer_id']
value_split.drop(columns=['offer_id'], inplace=True)
transcript = pd.concat([transcript, value_split], axis=1).drop(columns=['value'])
```
### 1.4. Combine the 3 dataframes and perform further data preprocessing
```
df = pd.merge(transcript, portfolio, how='left', on='offer id')
df = pd.merge(df, profile, how='left', on='customer id')
df.head(3)
```
Under the project scope, we only care about the people who completed or being provided the bogo offer. Therefore, I will filter for records with `offer_type` as **bogo**.
```
df_bogo = df[df['offer_type'] == 'bogo']
df_bogo.shape
df_bogo['event'].value_counts()
```
There are 4 scenarios we need to capture:
1. people who didnt view but completed the offer
2. people who viewed the offer and completed the offer
3. people who viewed the offer but didn't complete the offer
4. people who didn't view and didn't completed the offer
```
bogo_r = df_bogo[df_bogo['event'] == 'offer received'].reset_index(drop=True)
bogo_c = df_bogo[df_bogo['event'] == 'offer completed'].reset_index(drop=True)
bogo_v = df_bogo[df_bogo['event'] == 'offer viewed'].reset_index(drop=True)
# There are 984 customers who received the offer but didn't view the information
len(set(bogo_r['customer id'].unique()) - set(bogo_v['customer id'].unique()))
# There are 525 customers who didn't view the offer and didn't complete the offer
len(set(bogo_r['customer id'].unique()) - set(bogo_v['customer id'].unique()) - set(bogo_c['customer id'].unique()))
# There are 5,204 customers who viewed the offer information but never completed the offer
len(set(bogo_v['customer id'].unique()) - set(bogo_c['customer id'].unique()))
# There are 459 customers who never viewed the offer information but completed the offer anyway
len(set(bogo_c['customer id'].unique()) - set(bogo_v['customer id'].unique()))
```
For people who completed the BOGO offers (scenario 1 & 2), we need to first figure out whether they viewed the offer or not.
```
bogo_c['offer_viewed'] = 0
viewed_customer_ids = list(bogo_v['customer id'].unique())
idx_to_remove_for_bogo_v = []
for i in range(len(bogo_c)):
customer_id = bogo_c.iloc[i]['customer id']
offer_id = bogo_c.iloc[i]['offer id']
# Check whether the customer ever viewed any offers
if customer_id in viewed_customer_ids:
# Extract all offers viewed by the customer
offer_ids_viewed = bogo_v[bogo_v['customer id'] == customer_id]['offer id'].values.tolist()
# Check whether the customer viewed the offer
if offer_id in offer_ids_viewed:
bogo_c.loc[i, 'offer_viewed'] = 1
idx_to_remove_for_bogo_v.extend(list(bogo_v[(bogo_v['customer id'] == customer_id) & (bogo_v['offer id'] == offer_id)].index))
bogo_c['offer_completed'] = 1
```
For people who viewed the offers, we need to figure out who didn't complete the offer (scenario 3).
```
# Remove duplicated index
idx_to_remove_for_bogo_v = list(set(idx_to_remove_for_bogo_v))
# Drop records the are already counted in bogo_c
bogo_v1 = bogo_v.drop(index=idx_to_remove_for_bogo_v)
# The rest of the records are people who viewed the offer but didn't complete the offer
bogo_v1['offer_viewed'] = 1
bogo_v1['offer_completed'] = 0
# combine bogo_c and bogo_v1
master_df = pd.concat([bogo_c, bogo_v1], axis=0)
# Double check we haven't include any records from scenario 4
(master_df['offer_viewed']+ master_df['offer_completed'] == 0).sum()
```
Finally, we need to know who didn't view and didn't complete the offer (scenario 4)
```
len(bogo_r)
# bogo_r contains all customer ids from master df
set(master_df['customer id']) - set(bogo_r['customer id'])
len(set(bogo_r['customer id']) - set(master_df['customer id']))
idx_to_remove_for_bogo_r = []
customer_ids_in_master_df = list(set(master_df['customer id']))
for i in range(len(bogo_r)):
customer_id = bogo_r.iloc[i]['customer id']
offer_id = bogo_r.iloc[i]['offer id']
# Check whether the customer id is already in master_df
if customer_id in customer_ids_in_master_df:
# Extract all offer ids covered by master_df for this customer
offer_ids_covered = master_df[master_df['customer id'] == customer_id]['offer id'].values.tolist()
# Check whether the offer id is covered by one of the scenarios; if yes, we need to remove the record
if offer_id in offer_ids_covered:
idx_to_remove_for_bogo_r.append(i)
# Drop the scenarios (1,2,3) we already covered in master_df
bogo_r1 = bogo_r.drop(index=idx_to_remove_for_bogo_r)
# The rest of the records are people who didn't view and didn't complete the offer
bogo_r1['offer_viewed'] = 0
bogo_r1['offer_completed'] = 0
# combine master_df and bogo_r1
master_df = pd.concat([master_df, bogo_r1], axis=0)
```
Additional steps to conduct feature engineering and data cleaning.
> - remove unnecessary columns - 'event', 'customer id', 'offer id', 'reward_x', 'amount', 'offer_type'
> - rename column `reward_y` to `reward`
> - extract feature `membership_days` from `became_member_on`; then drop column `became_member_on`
> - convert gender into dummy variables - M, F, and O; ; then drop column `gender`
> - handle missing values; drop the records with missing values since they are from the same records
```
columns_to_remove = ['event', 'customer id', 'offer id', 'reward_x', 'amount', 'offer_type']
# remove unnecessary columns - 'event', 'customer id', 'offer id', 'reward_x', 'amount', 'offer_type'
master_df = master_df.drop(columns=columns_to_remove)
# rename column reward_y to rewar
master_df.rename(columns={'reward_y':'reward'}, inplace=True)
# extract feature membership_days from became_member_on; then drop column became_member_on
master_df['membership_days'] = (datetime.today() - master_df['became_member_on'])
master_df['membership_days'] = master_df['membership_days'].apply(lambda x: x.days)
master_df = master_df.drop(columns=['became_member_on'])
# convert gender into dummy variables - M, F, and O; then drop column gender
dummy_gender = pd.get_dummies(master_df['gender'].apply(pd.Series).stack()).sum(level=0)
master_df = pd.merge(master_df, dummy_gender, left_index=True, right_index=True).drop(columns=['gender'])
# handle missing values; drop the records with missing values since they are from the same records
master_df.isnull().sum()/len(master_df)
master_df.dropna(subset=['age', 'income', 'membership_days'], inplace=True)
master_df.head()
```
Before building the model, conduct a quick exploratory data analysis on the cleaned dataset.
1. Correlation analysis<br>
> - `reward` has strong positive relationship with `difficulty`
> - `offer_viewed` has relatively positive relationship with `social`
> - `social` has relatively positive relationship with `difficulty` and `reward` but relatively negative relationship with `duration`
> - `web` has relatively negative relationship with `difficulty`, `duration`, and `reward`
> - `M` has negative relationship with `F`
```
corr = master_df.corr()
sns.set(style="white")
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(10, 8))
cmap = sns.diverging_palette(220, 15, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap= cmap, center=0,
square=True, linewidths= 0.5, cbar_kws={"shrink": 0.4})
plt.show()
```
2. Income Distribution by whether the Customer Completed the Offer
> based on the density plot **Income Distribution by whether the Customer Completed the Offer**, we can see that people who completed the BOGO offer tend to have higher income than peopl who didn't. Most of the people who completed the offer have income range from \\$70k to \\$80k; other the other hand, people who didn't complete the offer have income range from \\$30k to \\$45k. Also, there are more people with income higher than \\$80k who completed the BOGO offer than people who didn't.
```
plt.figure(figsize = (12,8))
sns.kdeplot(master_df[master_df['offer_completed'] == 1]['income'], label = 'offer completed')
sns.kdeplot(master_df[master_df['offer_completed'] == 0]['income'], label = 'offer not completed')
plt.xlabel('Income')
plt.ylabel('Density')
plt.title('Income Distribution by whether the Customer Completed the Offer', size=15)
plt.show()
```
3.Customers completed BOGO vs. Customers who didn't complete BOGO
> From the bar chart, we know that there are more people who completed the BOGO offers (\~%60) than people who didn't (\~%40)
```
plt.figure(figsize = (10,6))
sns.countplot(master_df['offer_completed'])
plt.title('Customers completed BOGO vs. Customers who didn\'t complete BOGO', size=15)
plt.show()
```
Shuffle the rows, standardize the data, and conduct train test split for the dataset.
```
master_df = master_df.sample(frac=1).reset_index(drop=True)
feature_cols = list(master_df.columns)
feature_cols.remove('offer_completed')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
y = master_df['offer_completed']
X = master_df[feature_cols]
scaler = MinMaxScaler()
X_standardized = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_standardized, y, test_size=0.2)
```
## 2. Modeling & Merics
```
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
def evaluation_metrics(preds):
accuracy = np.mean(preds[:, np.newaxis] == np.array(y_test))
auc = roc_auc_score(np.array(y_test), preds)
f1 = f1_score(np.array(y_test), preds, average = "weighted")
print('Accuracy: {:.3f}'.format(accuracy))
print('Area Under ROC: {:.3f}'.format(auc))
print('F1 Score: {:.3f}'.format(f1))
```
### 2.1. Naive Bayes
```
from sklearn.naive_bayes import BernoulliNB
nb = BernoulliNB()
nb.fit(X_train, y_train)
pred_nb = nb.predict(X_test)
evaluation_metrics(pred_nb)
```
### 2.2 Logistic Regression
```
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred_lr = lr.predict(X_test)
evaluation_metrics(pred_lr)
```
To refine logistic regression, I implemented grid search cv to fine tune the hyperparameter `C` with 5 fold cross validation.
```
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
parameters = {'C':[1e-1, 5e-1, 0.8, 1, 1.5, 2, 2.5, 5]}
lr_cv = GridSearchCV(LogisticRegression(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
lr_cv.fit(X_train, y_train)
pred_lr_cv = lr_cv.predict(X_test)
evaluation_metrics(pred_lr_cv)
```
### 2.3 K Nearest Neighbors
```
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
pred_knn = knn.predict(X_test)
evaluation_metrics(pred_knn)
```
To refine KNN, I implemented grid search cv to fine tune the hyperparameter `n_neighbors` with 5 fold cross validation.
```
parameters = {'n_neighbors':[3, 4, 5, 6, 7, 8, 9, 10]}
knn_cv = GridSearchCV(KNeighborsClassifier(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
knn_cv.fit(X_train, y_train)
pred_knn_cv = knn_cv.predict(X_test)
evaluation_metrics(pred_knn_cv)
```
### 2.4 Random Forest
```
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
# Convert to nn prediction output to binary
pred_rf[pred_rf>=0.5] = 1
pred_rf[pred_rf<0.5] =0
evaluation_metrics(pred_rf)
```
To refine Random Forest, I implemented grid search cv to fine tune the hyperparameter `n_estimators`, `max_depth`, and `min_samples_leaf` with 5 fold cross validation.
```
parameters = {'n_estimators':[5,10,15,20], 'max_depth':[5,10,15,20], 'min_samples_leaf':[5,10,15,20]}
rf_cv = GridSearchCV(RandomForestRegressor(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
rf_cv.fit(X_train, y_train)
rf_cv.best_estimator_
pred_rf_cv = rf_cv.predict(X_test)
pred_rf_cv[pred_rf_cv >=0.5] = 1
pred_rf_cv[pred_rf_cv <0.5] =0
evaluation_metrics(pred_rf_cv)
```
### 2.5 Neural Network
```
from keras import models
from keras import layers
nn = models.Sequential()
nn.add(layers.Dense(15, input_dim=15, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(10, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(5, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(1, kernel_initializer='normal', activation='sigmoid'))
nn.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
nn.summary()
nn.fit(X_train, y_train,epochs=100, batch_size= 100)
test_loss, test_acc = nn.evaluate(X_test, y_test)
print('Test accuracy:', test_acc)
pred_nn = nn.predict(X_test)
# Convert to nn prediction output to binary
pred_nn[pred_nn>=0.5] = 1
pred_nn[pred_nn<0.5] =0
evaluation_metrics(pred_nn)
```
## 4. Results & Conclusion
According to the **Metrics** table, we can see that overall Random Forest with CV performs better than other models, even the neural network. Although the auccuracy for Random Forest with CV is not the highest, it's AUC and F1 score are not bad at all.
| Metrics | Accuracy | AUC | F1 |
| :- | -: | -: | -: |
| Naive Bayes | 0.525 | 0.600 | 0.619 |
| Logistic Regression | 0.535 | 0.673 | 0.694 |
| Logistic Regression with CV| 0.536 | 0.672 | 0.693 |
| KNearestNeighbors | 0.520 | 0.763 | 0.772 |
| KNearestNeighbors with CV| 0.519 | 0.765 | 0.773 |
| Random Forest | 0.530 | 0.780 | 0.796 |
| Random Forest with CV| 0.524 | 0.802 | 0.813 |
| Neural Network | 0.516 | 0.788 | 0.793 |
When looking at the confusion matrix, the best model `Random Forest with CV` is doing pretty well in terms of classifying the positive class, meaning people who completed the BOGO offer. However, the model can certainly improve on the prediction for people who didn't complete the BOGO offer.
```
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(8,6))
rf_cv_cm = confusion_matrix(np.array(y_test), pred_rf_cv)
sns.heatmap(rf_cv_cm, cmap='Purples', annot=True, fmt=',.0f', center=1400)
axis_font = {'size':'12'}
plt.ylabel('True label', **axis_font)
plt.xlabel('Predicted label', **axis_font)
plt.show()
```
Since we chose the tree-based model, we can see which feature plays the most important role in predicting whether the customer will complete the BOGO offer. According to **Feature Importances** chart, `membership_days` turns out to be the most influential feature. The second and third important features are `time` and `income`. Surprisingy, feature `offer_viewed` is not even the top 5 feature, which means whether a customer views the BOGO offer or not doesn't siginificantly impact the offer completion.
```
sns.set_style("whitegrid")
importances = rf_cv.best_estimator_.feature_importances_
indices = np.argsort(importances)[::-1]
axis_font = {'size':'15'}
plt.figure(figsize = (10,8))
plt.title('Feature Importances', **axis_font, weight='bold')
plt.barh(range(len(importances)), importances[indices], align='center')
plt.yticks(range(len(importances)), X.columns[indices], **axis_font)
plt.xlabel('Relative Importance', **axis_font)
plt.show()
```
Note that there are more data points in positive class than in negative class. Although the class imbalance is not serious (60% vs. 40%), it still might affect the model performance. In addition, we don't have sufficient number of features; the model only has 15 features in total. Therefore, even though we refined the model with cross validation and hyperparameter tuning, the room for model improvement is limited. <br>
In the future, we can gather more information on the offer such as the month when the offer is released, the number of people being targeted for the offer, the type of product in the BOGO offer, etc. Additionally, it'd be beneficial to know more about the customer demographics other than income and age. For example, education level, race, wether the customer lives in the city or not are all good indicators to include in the model.
In terms of further improving the models, I will handle the class imbalance issue and conduct more feature engineering process to make sure the dataset is representive and generalizable enough. I might also try some other machine learning algorithms such as XGboost, Support Vector Machine, and other neural network architecture.
|
github_jupyter
|
import pandas as pd
import numpy as np
import math
import json
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from datetime import datetime
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
portfolio.head()
portfolio.describe()
# There are 10 different offers
portfolio.shape
portfolio.groupby(['offer_type']).mean()['difficulty'].plot.bar()
plt.ylabel('difficulty')
plt.xlabel('offer type')
plt.title('Average Difficulty by Offer Type')
plt.show()
portfolio.groupby(['offer_type']).mean()['duration'].plot.bar()
plt.xlabel('offer type')
plt.ylabel('duration')
plt.title('Average Duration by Offer Type')
plt.show()
portfolio.groupby(['offer_type']).mean()['reward'].plot.bar()
plt.xlabel('offer type')
plt.ylabel('reward')
plt.title('Average Reward by Offer Type')
plt.show()
# Convert list-like elements in channels column into 4 separate columns - email, mobile, social, and web.]
dummy_channels = pd.get_dummies(portfolio['channels'].apply(pd.Series).stack()).sum(level=0)
portfolio = pd.merge(portfolio, dummy_channels, left_index=True, right_index=True).drop(columns=['channels'])
portfolio
# Rename id to offer id for joining dataframes later
portfolio.rename(columns={'id':'offer id'}, inplace=True)
profile.head()
profile.shape
# Percentage of missing values across variables
profile.isnull().sum()/len(profile)
gender_missing = profile[profile['gender'].isnull()]
gender_missing.isnull().sum()/len(gender_missing)
gender_missing['age'].value_counts()
# drop any data points with gender and income as missing values
profile.dropna(subset=['gender', 'age'], inplace=True)
# After dropping records with missing values, we have 14,825 customer profile left
len(profile['id'].unique())
profile.info()
# Convert became_member_on from int to datetime
profile['became_member_on'] = pd.to_datetime(profile['became_member_on'], format='%Y%m%d')
plt.hist(profile['age'], bins=25)
plt.title('Customer Age Distribution')
plt.xlabel('Age')
plt.ylabel('Count')
plt.show()
profile['gender'].unique()
sns.kdeplot(profile[profile['gender'] == 'F']['age'], label = 'Female')
sns.kdeplot(profile[profile['gender'] == 'M']['age'], label = 'Male')
sns.kdeplot(profile[profile['gender'] == 'O']['age'], label = 'Other')
plt.xlabel('Age')
plt.ylabel('Density')
plt.title('Age Distribution by Gender')
plt.show()
plt.hist(profile['income'], bins=25)
plt.title('Customer Income Distribution')
plt.xlabel('Income')
plt.ylabel('Count')
plt.show()
print("Maximum income: ${:,.0f}".format(profile['income'].max()))
print("Minimum income: ${:,.0f}".format(profile['income'].min()))
print("Average income: ${:,.0f}".format(profile['income'].mean()))
print("Median income: ${:,.0f}".format(profile['income'].median()))
sns.kdeplot(profile[profile['gender'] == 'F']['income'], label = 'Female')
sns.kdeplot(profile[profile['gender'] == 'M']['income'], label = 'Male')
sns.kdeplot(profile[profile['gender'] == 'O']['income'], label = 'Other')
plt.xlabel('Income')
plt.ylabel('Density')
plt.title('Income Distribution by Gender')
plt.show()
profile.columns
# Rename id to customer id for joining dataframes later
profile.rename(columns={'id':'customer id'}, inplace=True)
transcript.head()
transcript.shape
# Rename person to customer id for joining dataframes later
transcript.rename(columns={'person':'customer id'}, inplace=True)
transcript['event'].value_counts()
value_split = transcript['value'].apply(pd.Series)
value_split.head(3)
value_split.loc[((value_split['offer id'].isnull()) &
(value_split['offer_id'].notnull())), 'offer id'] = value_split['offer_id']
value_split.drop(columns=['offer_id'], inplace=True)
transcript = pd.concat([transcript, value_split], axis=1).drop(columns=['value'])
df = pd.merge(transcript, portfolio, how='left', on='offer id')
df = pd.merge(df, profile, how='left', on='customer id')
df.head(3)
df_bogo = df[df['offer_type'] == 'bogo']
df_bogo.shape
df_bogo['event'].value_counts()
bogo_r = df_bogo[df_bogo['event'] == 'offer received'].reset_index(drop=True)
bogo_c = df_bogo[df_bogo['event'] == 'offer completed'].reset_index(drop=True)
bogo_v = df_bogo[df_bogo['event'] == 'offer viewed'].reset_index(drop=True)
# There are 984 customers who received the offer but didn't view the information
len(set(bogo_r['customer id'].unique()) - set(bogo_v['customer id'].unique()))
# There are 525 customers who didn't view the offer and didn't complete the offer
len(set(bogo_r['customer id'].unique()) - set(bogo_v['customer id'].unique()) - set(bogo_c['customer id'].unique()))
# There are 5,204 customers who viewed the offer information but never completed the offer
len(set(bogo_v['customer id'].unique()) - set(bogo_c['customer id'].unique()))
# There are 459 customers who never viewed the offer information but completed the offer anyway
len(set(bogo_c['customer id'].unique()) - set(bogo_v['customer id'].unique()))
bogo_c['offer_viewed'] = 0
viewed_customer_ids = list(bogo_v['customer id'].unique())
idx_to_remove_for_bogo_v = []
for i in range(len(bogo_c)):
customer_id = bogo_c.iloc[i]['customer id']
offer_id = bogo_c.iloc[i]['offer id']
# Check whether the customer ever viewed any offers
if customer_id in viewed_customer_ids:
# Extract all offers viewed by the customer
offer_ids_viewed = bogo_v[bogo_v['customer id'] == customer_id]['offer id'].values.tolist()
# Check whether the customer viewed the offer
if offer_id in offer_ids_viewed:
bogo_c.loc[i, 'offer_viewed'] = 1
idx_to_remove_for_bogo_v.extend(list(bogo_v[(bogo_v['customer id'] == customer_id) & (bogo_v['offer id'] == offer_id)].index))
bogo_c['offer_completed'] = 1
# Remove duplicated index
idx_to_remove_for_bogo_v = list(set(idx_to_remove_for_bogo_v))
# Drop records the are already counted in bogo_c
bogo_v1 = bogo_v.drop(index=idx_to_remove_for_bogo_v)
# The rest of the records are people who viewed the offer but didn't complete the offer
bogo_v1['offer_viewed'] = 1
bogo_v1['offer_completed'] = 0
# combine bogo_c and bogo_v1
master_df = pd.concat([bogo_c, bogo_v1], axis=0)
# Double check we haven't include any records from scenario 4
(master_df['offer_viewed']+ master_df['offer_completed'] == 0).sum()
len(bogo_r)
# bogo_r contains all customer ids from master df
set(master_df['customer id']) - set(bogo_r['customer id'])
len(set(bogo_r['customer id']) - set(master_df['customer id']))
idx_to_remove_for_bogo_r = []
customer_ids_in_master_df = list(set(master_df['customer id']))
for i in range(len(bogo_r)):
customer_id = bogo_r.iloc[i]['customer id']
offer_id = bogo_r.iloc[i]['offer id']
# Check whether the customer id is already in master_df
if customer_id in customer_ids_in_master_df:
# Extract all offer ids covered by master_df for this customer
offer_ids_covered = master_df[master_df['customer id'] == customer_id]['offer id'].values.tolist()
# Check whether the offer id is covered by one of the scenarios; if yes, we need to remove the record
if offer_id in offer_ids_covered:
idx_to_remove_for_bogo_r.append(i)
# Drop the scenarios (1,2,3) we already covered in master_df
bogo_r1 = bogo_r.drop(index=idx_to_remove_for_bogo_r)
# The rest of the records are people who didn't view and didn't complete the offer
bogo_r1['offer_viewed'] = 0
bogo_r1['offer_completed'] = 0
# combine master_df and bogo_r1
master_df = pd.concat([master_df, bogo_r1], axis=0)
columns_to_remove = ['event', 'customer id', 'offer id', 'reward_x', 'amount', 'offer_type']
# remove unnecessary columns - 'event', 'customer id', 'offer id', 'reward_x', 'amount', 'offer_type'
master_df = master_df.drop(columns=columns_to_remove)
# rename column reward_y to rewar
master_df.rename(columns={'reward_y':'reward'}, inplace=True)
# extract feature membership_days from became_member_on; then drop column became_member_on
master_df['membership_days'] = (datetime.today() - master_df['became_member_on'])
master_df['membership_days'] = master_df['membership_days'].apply(lambda x: x.days)
master_df = master_df.drop(columns=['became_member_on'])
# convert gender into dummy variables - M, F, and O; then drop column gender
dummy_gender = pd.get_dummies(master_df['gender'].apply(pd.Series).stack()).sum(level=0)
master_df = pd.merge(master_df, dummy_gender, left_index=True, right_index=True).drop(columns=['gender'])
# handle missing values; drop the records with missing values since they are from the same records
master_df.isnull().sum()/len(master_df)
master_df.dropna(subset=['age', 'income', 'membership_days'], inplace=True)
master_df.head()
corr = master_df.corr()
sns.set(style="white")
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(10, 8))
cmap = sns.diverging_palette(220, 15, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap= cmap, center=0,
square=True, linewidths= 0.5, cbar_kws={"shrink": 0.4})
plt.show()
plt.figure(figsize = (12,8))
sns.kdeplot(master_df[master_df['offer_completed'] == 1]['income'], label = 'offer completed')
sns.kdeplot(master_df[master_df['offer_completed'] == 0]['income'], label = 'offer not completed')
plt.xlabel('Income')
plt.ylabel('Density')
plt.title('Income Distribution by whether the Customer Completed the Offer', size=15)
plt.show()
plt.figure(figsize = (10,6))
sns.countplot(master_df['offer_completed'])
plt.title('Customers completed BOGO vs. Customers who didn\'t complete BOGO', size=15)
plt.show()
master_df = master_df.sample(frac=1).reset_index(drop=True)
feature_cols = list(master_df.columns)
feature_cols.remove('offer_completed')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
y = master_df['offer_completed']
X = master_df[feature_cols]
scaler = MinMaxScaler()
X_standardized = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X_standardized, y, test_size=0.2)
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
def evaluation_metrics(preds):
accuracy = np.mean(preds[:, np.newaxis] == np.array(y_test))
auc = roc_auc_score(np.array(y_test), preds)
f1 = f1_score(np.array(y_test), preds, average = "weighted")
print('Accuracy: {:.3f}'.format(accuracy))
print('Area Under ROC: {:.3f}'.format(auc))
print('F1 Score: {:.3f}'.format(f1))
from sklearn.naive_bayes import BernoulliNB
nb = BernoulliNB()
nb.fit(X_train, y_train)
pred_nb = nb.predict(X_test)
evaluation_metrics(pred_nb)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred_lr = lr.predict(X_test)
evaluation_metrics(pred_lr)
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
parameters = {'C':[1e-1, 5e-1, 0.8, 1, 1.5, 2, 2.5, 5]}
lr_cv = GridSearchCV(LogisticRegression(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
lr_cv.fit(X_train, y_train)
pred_lr_cv = lr_cv.predict(X_test)
evaluation_metrics(pred_lr_cv)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
pred_knn = knn.predict(X_test)
evaluation_metrics(pred_knn)
parameters = {'n_neighbors':[3, 4, 5, 6, 7, 8, 9, 10]}
knn_cv = GridSearchCV(KNeighborsClassifier(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
knn_cv.fit(X_train, y_train)
pred_knn_cv = knn_cv.predict(X_test)
evaluation_metrics(pred_knn_cv)
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
pred_rf = rf.predict(X_test)
# Convert to nn prediction output to binary
pred_rf[pred_rf>=0.5] = 1
pred_rf[pred_rf<0.5] =0
evaluation_metrics(pred_rf)
parameters = {'n_estimators':[5,10,15,20], 'max_depth':[5,10,15,20], 'min_samples_leaf':[5,10,15,20]}
rf_cv = GridSearchCV(RandomForestRegressor(), cv = StratifiedKFold(n_splits=5, shuffle = True),
param_grid = parameters, n_jobs = -1)
rf_cv.fit(X_train, y_train)
rf_cv.best_estimator_
pred_rf_cv = rf_cv.predict(X_test)
pred_rf_cv[pred_rf_cv >=0.5] = 1
pred_rf_cv[pred_rf_cv <0.5] =0
evaluation_metrics(pred_rf_cv)
from keras import models
from keras import layers
nn = models.Sequential()
nn.add(layers.Dense(15, input_dim=15, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(10, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(5, kernel_initializer='normal', activation='relu'))
nn.add(layers.Dense(1, kernel_initializer='normal', activation='sigmoid'))
nn.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
nn.summary()
nn.fit(X_train, y_train,epochs=100, batch_size= 100)
test_loss, test_acc = nn.evaluate(X_test, y_test)
print('Test accuracy:', test_acc)
pred_nn = nn.predict(X_test)
# Convert to nn prediction output to binary
pred_nn[pred_nn>=0.5] = 1
pred_nn[pred_nn<0.5] =0
evaluation_metrics(pred_nn)
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(8,6))
rf_cv_cm = confusion_matrix(np.array(y_test), pred_rf_cv)
sns.heatmap(rf_cv_cm, cmap='Purples', annot=True, fmt=',.0f', center=1400)
axis_font = {'size':'12'}
plt.ylabel('True label', **axis_font)
plt.xlabel('Predicted label', **axis_font)
plt.show()
sns.set_style("whitegrid")
importances = rf_cv.best_estimator_.feature_importances_
indices = np.argsort(importances)[::-1]
axis_font = {'size':'15'}
plt.figure(figsize = (10,8))
plt.title('Feature Importances', **axis_font, weight='bold')
plt.barh(range(len(importances)), importances[indices], align='center')
plt.yticks(range(len(importances)), X.columns[indices], **axis_font)
plt.xlabel('Relative Importance', **axis_font)
plt.show()
| 0.363421 | 0.987042 |
# Pooled Cross Sections
* Policy evaluation
* Collected across time, but not necessarily the same individual
# Difference-in-Difference
* Example: the opening of MTR station
* HKU station was open in Dec, 2014
* Control group: HKUST
1. Analysis of the grouped average
2. Regression analysis
$$y = \beta_1 + \beta_2 d_{ust} + \beta_3 d_{hku} + \beta_4 d_{ust} \times d_{hku} + e$$
* Variables indexed by $i$ and $t$
* Convenience of the latter approach: statistical inference and covariate control
# Panel Data
* Economists mostly work with observational data.
* The data generation process is out of the researchers' control.
* Difficult to control heterogeneity among the individuals in cross-sectional data.
* panel data offers a chance
* Panel data track the same individuals across time $t=1,\ldots,T$.
* assume the observations are independent across $i=1,\ldots,n$,
* allow of dependence for $i$ across $t=1,\ldots,T$.
# Linear Equation
$$y_{it}=\beta_{1}+x_{it}\beta_{2}+u_{it},\ i=1,\ldots,n;t=1,\ldots,T\label{eq:basic_eq}$$
* **composite error**: $u_{it}=\alpha_{i}+\epsilon_{it}$ is called the.
* $\alpha_{i}$ is ime-invariant unobserved heterogeneity
* $\epsilon_{it}$ varies across individuals and time periods.
# Motivating Example
* Air pollution at city level
$$\mathrm{PM2.5}_{it}= \alpha_i + \beta_1 \mathrm{GdpGrwoth}_{it} + \beta_2' \mathrm{OtherControls}_{it} + e_{it}$$
* $\alpha_i$ is used to control the geographic composition
# Real Data Example
* a dataset from [NBER-CES Manufacturing Industry Database](http://www.nber.org/nberces/).
* contains annual information of 473 USA industries during 1958 to 2009.
```
g0 <- read.csv("naics5809.csv")
g0[c(1:10, 50:60), 1:10]
```
# R package
`plm`: panel data
```
library(plm)
g <- pdata.frame( g0, index = c("naics", "year") )
# the regression equation
equation <- emp~invest+cap
# Nothing prevents from running an OLS.
g.ols <- lm(equation, data=g)
summary(g.ols)
# The OLS coefficient estimates are exactly the same as the pooled OLS.
# The only difference in the summary is that the later shows the panel structure
# of the data.
g.pool <- plm(equation,data=g,model="pooling")
summary(g.pool)
```
# Panel Data Models
* Fixed effect
* Random effect
* Estimation: OLS
# Fixed Effect
* FE model allows $\alpha_{i}$ and $x_{it}$ to be arbitrarily
correlated.
* Need to eliminate $\alpha_{i},i=1,\ldots,n$ to restore consistency.
Averaging the $T$ equations of for the
same $i$,
$$\overline{y}_{i}=\beta_{1}+\overline{x}_{i}\beta_{2}+\bar{u}_{it}=\beta_{1}+\overline{x}_{i}\beta_{2}+\alpha_{i}+\bar{\epsilon}_{it}.\label{eq:group_mean}$$
where $\overline{y}_{i}=\frac{1}{T}\sum_{t=1}^{T}y_{it}$.
Subtracting
the average,
$$\tilde{y}_{it}=\tilde{x}_{it}\beta_{2}+\tilde{\epsilon}_{it}$$
where $\tilde{y}_{it}=y_{it}-\overline{y}_{i}$.
Run OLS with the
demeaned data, and obtain the within estimator
$$\widehat{\beta}_{2}^{FE}=\left(\tilde{X}'\tilde{X}\right)^{-1}\tilde{X}'\tilde{y},$$
where $\tilde{y}=\left(y_{it}\right)_{i,t}$ stacks all the $nT$
observations into a vector, and similarly defined is $\tilde{X}$ as an
$nT\times K$ matrix, where $K$ is the dimension of $\beta_{2}$.
# Assumptions
**Assumption FE.1**
$E\left[\epsilon_{it}|\alpha_{i},\mathbf{x}_{i}\right]=0$ where
$\mathbf{x}_{i}=\left(x_{i1},\ldots,x_{iT}\right)$. (*strict exogeneity*)
* The error $\epsilon_{it}$ is mean
independent of the past, present and future explanatory variables.
# Consistency
* Asymptotic framework: $n\to\infty$ while $T$ stays fixed.
* appropriate for panel datasets with many individuals but only a few time periods.
**Proposition** If FE.1 is satisfied, then $\widehat{\beta}_{2}^{FE}$ is consistent.
# Asymptotic Normality
**Assumption FE.2**
$\mathrm{var}\left(\epsilon_{i}|\alpha_{i},\mathbf{x}_{i}\right)=\sigma_{\epsilon}^{2}I_{T}$.
* Under FE.1 and FE.2,
$\widehat{\sigma}_{\epsilon}^{2}=\frac{1}{n\left(T-1\right)}\sum_{i=1}^{n}\sum_{t=1}^{T}\widehat{\tilde{\epsilon}}_{it}^{2}$
is a consistent estimator of $\sigma_{\epsilon}^{2}$.
If FE.1 and FE.2 are satisfied, then
$$\left(\widehat{\sigma}_{\epsilon}^{2}\left(\tilde{X}'\tilde{X}\right)^{-1}\right)^{-1/2}\left(\widehat{\beta}_{2}^{FE}-\beta_{2}^{0}\right)\stackrel{d}{\to} N\left(0,I_{K}\right).$$
# Limitation
* FE eliminates all time-invariant explanatory variables, including the intercept.
* From FE we cannot obtain the coefficient estimates of these time-invariant variables.
**Data Example** In reality we do not need to compute the estimator or the variance by hand. `R` handles them automatically.
```
g.fe <- plm(equation, data=g, model="within")
# statisticians call the FE estimator 'within' estimator as it carries out
# a within-group transformation
summary(g.fe)
```
# Publication Example
* Lin, Justin Yifu (1992): [Rural Reforms and Agricultural
Growth in China](http://www.jstor.org/stable/2117601), *The American
Economic Review*, Vol.82, No.1, pp.34-51.
* 改革开放40周年
* Chinese agricultural industry witnessed a dramatic growth
during 1978-1984.
* Was the growth was attributed to the household-responsibility system (HRS) reform?
* Lin (1992): panel data of 28 mainland provinces from 1970 to 1987.
* He estimates the following FE model by OLS.
$$
\begin{aligned}
ln Y_{it} & = \alpha_1 + \alpha_2
+ \ln(\mathrm{Land}_{it}) + \alpha_3 \ln (\mathrm{Labor}_{it}) \\
& +
\alpha_4 \ln (\mathrm{Capital}_{it}) + \alpha_5 \ln
(\mathrm{Fert}_{it} ) + \alpha_6 \mathrm{HRS}_{it} \\
& +
\alpha_7 \mbox{MP}_{t-1}
+ \alpha_8 \mathrm{GP}_t + \alpha_9
\mbox{NGCA}_{it} + \alpha_{10} \mbox{MCI}_{it} + \alpha_{11}
T_t + \sum_{j=12}^{39} \alpha_{j} D_j +
\epsilon_{it}.
\end{aligned}$$
* The empirical findings are robust
* The importance of HRS is supported across specifications
## Random Effect
* RE allows time-invariant explanatory variables.
* Knife-edge special case $\mathrm{cov}\left(\alpha_{i},x_{it}\right)=0$.
* FE is consistent when $\alpha_{i}$ and $x_{it}$ are uncorrelated.
* OLS is also consistent.
* But neither is inefficient.
# Assumptions
**Assumption RE.1**
$E\left[\epsilon_{it}|\alpha_{i},\mathbf{x}_{i}\right]=0$ and
$E\left[\alpha_{i}|\mathbf{x}_{i}\right]=0$.
RE.1 obviously implies $\mathrm{cov}\left(\alpha_{i},x_{it}\right)=0$,
so
$$S=\mathrm{var}\left(u_{i}|\mathbf{x}_{i}\right)=\sigma_{\alpha}^{2}\mathbf{1}_{T}\mathbf{1}_{T}'+\sigma_{\epsilon}^{2}I_{T},\ \mbox{for all }i=1,\ldots,n.$$
* Ghe covariance matrix is not a scalar multiplication of the
identity matrix.
* OLS is inefficient.
# Estimation
* Rewrite $$y_{it}=w_{it}\boldsymbol{\beta}+u_{it}$$
* Had we known $S$, the GLS estimator would be
$$\widehat{\boldsymbol{\beta}}^{RE}=\left(\sum_{i=1}^{n}\mathbf{w}_{i}'S^{-1}\mathbf{w}_{i}\right)^{-1}\sum_{i=1}^{n}\mathbf{w}_{i}'S^{-1}\mathbf{y}_{i}=\left(W'\mathbf{S}^{-1}W\right)^{-1}W'\mathbf{S}^{-1}y$$
* In practice, software computes FGLS
```
g.re <- plm(equation, data=g, model="random")
summary(g.re)
# Which model is preferred?
# The Hausman test favors the fixed-effect model.
phtest(g.re, g.fe)
```
# Dynamic Panel Model
* Example: Stock price is influence by the fundamental indicators in the quarterly finance report, but also by yesterday's price.
$$y_{it}=\beta_{1}+\beta_{2}y_{it-1}+\beta_{3}x_{it}+\alpha_{i}+\epsilon_{it}$$
First-difference (FD): for periods $t$ and
$t-1$,
$$
\left(y_{it}-y_{it-1}\right)=\beta_{2}\left(y_{it-1}-y_{it-2}\right)+\beta_{3}\left(x_{it}-x_{it-1}\right)+\left(\epsilon_{it}-\epsilon_{it-1}\right).
$$
For simplicity, assume $E\left[\left(x_{it}-x_{it-1}\right)\left(\epsilon_{it}-\epsilon_{it-1}\right)\right]=0$,
but
$$
E\left[\left(y_{it-1}-y_{it-2}\right)\left(\epsilon_{it}-\epsilon_{it-1}\right)\right]
=-E\left[y_{it-1}\epsilon_{it-1}\right]=-E\left[\epsilon_{it-1}^{2}\right]\neq0.
$$
|
github_jupyter
|
g0 <- read.csv("naics5809.csv")
g0[c(1:10, 50:60), 1:10]
library(plm)
g <- pdata.frame( g0, index = c("naics", "year") )
# the regression equation
equation <- emp~invest+cap
# Nothing prevents from running an OLS.
g.ols <- lm(equation, data=g)
summary(g.ols)
# The OLS coefficient estimates are exactly the same as the pooled OLS.
# The only difference in the summary is that the later shows the panel structure
# of the data.
g.pool <- plm(equation,data=g,model="pooling")
summary(g.pool)
g.fe <- plm(equation, data=g, model="within")
# statisticians call the FE estimator 'within' estimator as it carries out
# a within-group transformation
summary(g.fe)
g.re <- plm(equation, data=g, model="random")
summary(g.re)
# Which model is preferred?
# The Hausman test favors the fixed-effect model.
phtest(g.re, g.fe)
| 0.402744 | 0.990732 |
Quiz: Multi Layer Perceptron
==================
In this quiz you are asked to modify the MLP model used in the tutorial in order to train the network on the [Iris flower dataset](../iris/iris.ipynb). The dataset is already provided in the TFRecord format and you can find it [here](../iris). In the snippets below you will find an **hashtag** `#QUIZ` in the parts where you are asked to complete the code. Remember that the Tensorflow official documentation is your best friend when you cannot find a specific method.
Defining the model
---------------------
Here you have to define the MLP model for the new dataset. What you are asked to do is:
- Define the model. The dataset has **4 input features** (sepal length, sepal width, petal length, petal width) meaning that your MLP must have 4 input units. Morevoer the dataset has **3 classes** (0=Setosa, 1=Versicolor, 2=Virginica) meaning that your perceptron must have 3 output units.
- Design a new **accuracy metric**. In the XOR example there were only two possible classes, whereas here there are three. You should use the `argmax()` method in tensorflow to get the class with higher probability returned from the MLP. Then you must compare it with the actual target class that is stored in a one-hot vector.
```
import tensorflow as tf
def my_model_fn(features, labels, mode):
#QUIZ: here you must define the model for the new dataset
#PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"classes": tf.round(y),
"probabilities": y}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#TRAIN mode
elif mode == tf.estimator.ModeKeys.TRAIN:
loss = tf.losses.mean_squared_error(labels=labels, predictions=y)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
#QUIZ: here you must define a new accuracy metric.
#accuracy = tf.metrics.accuracy(???)
tf.summary.scalar('accuracy', accuracy[1]) #<-- accuracy[1] to grab the value
logging_hook = tf.train.LoggingTensorHook({"accuracy" : accuracy[1]}, every_n_iter=250)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, training_hooks =[logging_hook])
#EVAL mode
elif mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels=labels, predictions=y)
#QUIZ: here you must define a new accuracy metric.
#accuracy = tf.metrics.accuracy(???)
eval_metric = {"accuracy": accuracy}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric)
mlp = tf.estimator.Estimator(model_fn=my_model_fn, model_dir="./tf_mlp_model")
```
Train the model
------------------
In this section you have to define a dataset object and an iterator. The parse function is already implemented. The TFRecord files for the Iris dataset are ready to be used and included in this repository. Here you have to declare a Tensorflow dataset and an iterator. I suggest you to read the official documentation of Tensorflow for knowning more about those classes.
```
def my_input_fn():
def _parse_function(example_proto):
features = {"feature": tf.VarLenFeature(tf.float32),
"label": tf.FixedLenFeature((), tf.int64, default_value=0)}
parsed_features = tf.parse_single_example(example_proto, features)
feature = tf.cast(parsed_features["feature"], tf.float32)
feature = tf.sparse_tensor_to_dense(feature, default_value=0)
label_one_hot = tf.one_hot(parsed_features["label"], depth=3)
return feature, label_one_hot
#QUIZ: load the TFRecord dataset using the method tf.data.TFRecordDataset()
#QUIZ: parse the dataset using the map() method pointing to the _parse_function() above
#QUIZ: store the dataset in cache
#QUIZ: shuffle the dataset using the shuffle() method (be carefull with the buffer_size parameter)
#QUIZ: use the method repeat() to decide how many time you would like to use the dataset
#QUIZ: define the batch size through the batch() method
#QUIZ: make a one-shot iterator
#QUIZ: get new values using get_next()
return batch_features, batch_labels
tf.logging.set_verbosity(tf.logging.INFO)
mlp.train(input_fn=my_input_fn, steps=5000)
```
Test the model
------------------
Now you have to test the model on the test set. You have to declare a new input function, a dataset and an iterator. You can reuse the code above but you must be careful with the `batch()` and `repeat()` methods.
```
def my_input_fn():
#QUIZ: load and parse the test set. This part is very similar to the function
#you already implemented above.
return batch_features, batch_labels
```
Improve the performances
-----------------------------
Now you have to modify some critical parameters in your model in order to improve the performances. I suggest you to play with the number of hidden units. You can try to increase the number of units and use **dropout** to avoid overfitting. Moreover you can use a different optimizer. Good optimizers are the RMSProp, Adam, and Adagrad. However, sometimes nothing is better than a well-tuned stocastich gradient descent with decaying learning rate. Good luck...
|
github_jupyter
|
import tensorflow as tf
def my_model_fn(features, labels, mode):
#QUIZ: here you must define the model for the new dataset
#PREDICT mode
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"classes": tf.round(y),
"probabilities": y}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#TRAIN mode
elif mode == tf.estimator.ModeKeys.TRAIN:
loss = tf.losses.mean_squared_error(labels=labels, predictions=y)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
#QUIZ: here you must define a new accuracy metric.
#accuracy = tf.metrics.accuracy(???)
tf.summary.scalar('accuracy', accuracy[1]) #<-- accuracy[1] to grab the value
logging_hook = tf.train.LoggingTensorHook({"accuracy" : accuracy[1]}, every_n_iter=250)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, training_hooks =[logging_hook])
#EVAL mode
elif mode == tf.estimator.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(labels=labels, predictions=y)
#QUIZ: here you must define a new accuracy metric.
#accuracy = tf.metrics.accuracy(???)
eval_metric = {"accuracy": accuracy}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric)
mlp = tf.estimator.Estimator(model_fn=my_model_fn, model_dir="./tf_mlp_model")
def my_input_fn():
def _parse_function(example_proto):
features = {"feature": tf.VarLenFeature(tf.float32),
"label": tf.FixedLenFeature((), tf.int64, default_value=0)}
parsed_features = tf.parse_single_example(example_proto, features)
feature = tf.cast(parsed_features["feature"], tf.float32)
feature = tf.sparse_tensor_to_dense(feature, default_value=0)
label_one_hot = tf.one_hot(parsed_features["label"], depth=3)
return feature, label_one_hot
#QUIZ: load the TFRecord dataset using the method tf.data.TFRecordDataset()
#QUIZ: parse the dataset using the map() method pointing to the _parse_function() above
#QUIZ: store the dataset in cache
#QUIZ: shuffle the dataset using the shuffle() method (be carefull with the buffer_size parameter)
#QUIZ: use the method repeat() to decide how many time you would like to use the dataset
#QUIZ: define the batch size through the batch() method
#QUIZ: make a one-shot iterator
#QUIZ: get new values using get_next()
return batch_features, batch_labels
tf.logging.set_verbosity(tf.logging.INFO)
mlp.train(input_fn=my_input_fn, steps=5000)
def my_input_fn():
#QUIZ: load and parse the test set. This part is very similar to the function
#you already implemented above.
return batch_features, batch_labels
| 0.770033 | 0.973139 |
# Overview
The aim here is to take two sources of information
and combine them into a new source, so that the result is more accurate.
We define our data using:
* a function.
For example: $x$ has values $x(t), x(t+1)...$
The function can be written as:
$x(t+t) = F(t)x(t) + G(t)u(t)$
where $F,G$ are linear transformations, and $u$ models noise.
* a Normal distribution: $N(\mu, covariance)$
If we have a basic prediction model and a separate measurements source,
we can combine the measurements source into the prediction model to enrich it.
The measurements source can be modeled using:
$z(t) = H(t)x(t)$, where $H$ describes how $x$ relates to $z$ for any value of $x$
# Predict & Enrich
We "predict" or calculate the next value for $x$ using:
$x_B = x(t+1) = F(t)x(t) + G(t)u(t)$
$P_B = P(t+1) = F(t)P(t)F(x)^T + Q(t)$
$P$ is the covariance for $x$ and $Q$ is the covariance for the noise $u$.
$F$ and $G$ define linear transformation matrices.
Then we "enrich" by combining the measurements:
$X_A = X_b + K_G(z - Hx_B)$
$P_A = P_B - K_GHP_B$
$K_G = P_BH^T(HP^BH^T + R)^{-1}$
The above equations are derived by first calculating x in terms of z,
combining $N_{zx}$ and $N_{z}$ and then rearranging the equations in terms of $x$ again (see Appendix B).
The result is a generalised way of taking a multi variate model and then enriching it with another
multi variate model. Either both can be theoretical calculations or real observations. The only requirements are that
we have a way to move from model $x$ to model $z$, and that both are normal distributions.
# Appendix
## A. Apply a a linear transformation to a covariance matrix
Applying matrix $F$ to the covariance matrix $P$, do $FPF^T$.
## B. Enriching a model with a separate source of information
We have our main model as:
$x_B = x(t+t) = F(t)x(t) + G(t)u(t)$ and $N(x, P)$,
our new source of information is $z$ and $x$ relates to $z$ using $z=H(t)x$.
$z$ has a distribution modeled by $N(z, R)$ where $R$ is $z$'s covariance matrix.
To combine the two normal distributions i.e for $x$ and for $z$ we firstly state $N(x, P)$
in terms of $z$ as: $N( Hx, HPH^T )$
Then we combine the two distributions by operating on their mean $Hx$ and $z$ vectors and
on their covariance matrices $HPH^T$ and $R$ as:
$N_z(z, R) \times N_z(Hx, HPH^T) = N_z(x_{zA}= Hx + K(z - Hx)$
$P_{zA}= HPH^T - KHP_BH^T)$
Where $K = HP_BH^T(HP_BH^T + R)^{-1}$
Now we have the enriched distribution, but we need it in terms of $N_x$ and not $N_z$,
to do this we have re-arrange the equations for $X_{zA}$ and $P_{zA}$
|
github_jupyter
|
# Overview
The aim here is to take two sources of information
and combine them into a new source, so that the result is more accurate.
We define our data using:
* a function.
For example: $x$ has values $x(t), x(t+1)...$
The function can be written as:
$x(t+t) = F(t)x(t) + G(t)u(t)$
where $F,G$ are linear transformations, and $u$ models noise.
* a Normal distribution: $N(\mu, covariance)$
If we have a basic prediction model and a separate measurements source,
we can combine the measurements source into the prediction model to enrich it.
The measurements source can be modeled using:
$z(t) = H(t)x(t)$, where $H$ describes how $x$ relates to $z$ for any value of $x$
# Predict & Enrich
We "predict" or calculate the next value for $x$ using:
$x_B = x(t+1) = F(t)x(t) + G(t)u(t)$
$P_B = P(t+1) = F(t)P(t)F(x)^T + Q(t)$
$P$ is the covariance for $x$ and $Q$ is the covariance for the noise $u$.
$F$ and $G$ define linear transformation matrices.
Then we "enrich" by combining the measurements:
$X_A = X_b + K_G(z - Hx_B)$
$P_A = P_B - K_GHP_B$
$K_G = P_BH^T(HP^BH^T + R)^{-1}$
The above equations are derived by first calculating x in terms of z,
combining $N_{zx}$ and $N_{z}$ and then rearranging the equations in terms of $x$ again (see Appendix B).
The result is a generalised way of taking a multi variate model and then enriching it with another
multi variate model. Either both can be theoretical calculations or real observations. The only requirements are that
we have a way to move from model $x$ to model $z$, and that both are normal distributions.
# Appendix
## A. Apply a a linear transformation to a covariance matrix
Applying matrix $F$ to the covariance matrix $P$, do $FPF^T$.
## B. Enriching a model with a separate source of information
We have our main model as:
$x_B = x(t+t) = F(t)x(t) + G(t)u(t)$ and $N(x, P)$,
our new source of information is $z$ and $x$ relates to $z$ using $z=H(t)x$.
$z$ has a distribution modeled by $N(z, R)$ where $R$ is $z$'s covariance matrix.
To combine the two normal distributions i.e for $x$ and for $z$ we firstly state $N(x, P)$
in terms of $z$ as: $N( Hx, HPH^T )$
Then we combine the two distributions by operating on their mean $Hx$ and $z$ vectors and
on their covariance matrices $HPH^T$ and $R$ as:
$N_z(z, R) \times N_z(Hx, HPH^T) = N_z(x_{zA}= Hx + K(z - Hx)$
$P_{zA}= HPH^T - KHP_BH^T)$
Where $K = HP_BH^T(HP_BH^T + R)^{-1}$
Now we have the enriched distribution, but we need it in terms of $N_x$ and not $N_z$,
to do this we have re-arrange the equations for $X_{zA}$ and $P_{zA}$
| 0.828384 | 0.987338 |
```
!pip install -qq transformers
!pip install -qq optuna
!pip install -qq sentencepiece
!pip install -qq datasets
!pip install -qq stabilizer
import transformers
import datasets
from transformers import AutoTokenizer, AutoModelForSequenceClassification,AdamW, get_linear_schedule_with_warmup,Trainer, TrainingArguments
from transformers.file_utils import is_tf_available, is_torch_available, is_torch_tpu_available
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
import random
from textwrap import wrap
from datetime import datetime
from datasets import load_from_disk
from datasets import Dataset
from sklearn.metrics import accuracy_score,classification_report, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from stabilizer.reinitialize import reinit_autoencoder_model
from stabilizer.llrd import get_optimizer_parameters_with_llrd
from torch import nn
# the model we gonna train, base uncased BERT
# check text classification models here: https://huggingface.co/models?filter=text-classification
MODEL_NAME = "distilbert-base-uncased"
BATCH_SIZE = 16
EPOCHS = 3
LEARNING_RATE= 6.58e-5
WEIGHT_DECAY = 0.289
WARMUP_STEPS = 464
RANDOM_SEED=22
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def set_seed(seed):
"""Set all seeds to make results reproducible (deterministic mode).
When seed is None, disables deterministic mode.
:param seed: an integer to your choosing
"""
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='macro')
acc = accuracy_score(labels, preds)
confusion_matrix = classification_report(labels, preds, digits=4,output_dict=True)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall,
'hate_f1': confusion_matrix["0"]["f1-score"],
'hate_recall': confusion_matrix["0"]["recall"],
'hate_precision': confusion_matrix["0"]["precision"],
'offensive_f1': confusion_matrix["1"]["f1-score"],
'offensive_recall': confusion_matrix["1"]["recall"],
'offensive_precision': confusion_matrix["1"]["precision"],
'normal_f1': confusion_matrix["2"]["f1-score"],
'normal_recall': confusion_matrix["2"]["recall"],
'normal_precision': confusion_matrix["2"]["precision"],
}
def model_init():
return AutoModelForSequenceClassification.from_pretrained(MODEL_NAME,num_labels=3).to(device)
# Code extracted from DistilBERT implementation
#https://github.com/flowerpot-ai/stabilizer
def reinit_autoencoder_model(model, reinit_num_layers=0):
"""reinitialize autoencoder model layers"""
if reinit_num_layers:
for layer in model.distilbert.transformer.layer[-reinit_num_layers:]:
for module in layer.modules():
if isinstance(module, nn.Embedding):
if module.weight.requires_grad:
module.weight.data.normal_(mean=0.0, std=model.config.initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=model.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
return model
def timestamp():
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y (%H:%M:%S.%f)")
print(timestampStr)
set_seed(RANDOM_SEED)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
hatetwit_dataset_dfs = load_from_disk('/content/drive/MyDrive/Dissertation/datasets/hatetwit_'+str(1))
train_dataset = hatetwit_dataset_dfs ["train"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
eval_dataset = hatetwit_dataset_dfs ["validation"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
test_dataset = hatetwit_dataset_dfs ["test"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
training_args = TrainingArguments(
output_dir='/content/drive/MyDrive/Dissertation/disbert_hate_reinit/results', # output directory
num_train_epochs=EPOCHS, # total number of training epochs
save_strategy ="epoch" ,
per_device_train_batch_size=BATCH_SIZE, # batch size per device during training
per_device_eval_batch_size=BATCH_SIZE, # batch size for evaluation
warmup_steps = WARMUP_STEPS,
weight_decay= WEIGHT_DECAY, # strength of weight decay
learning_rate= LEARNING_RATE,
logging_dir='./disbert_hate/logs', # directory for storing logs
load_best_model_at_end=True, # load the best model when finished training (default metric is loss)
evaluation_strategy="epoch",
#eval_steps = 500 # evaluate each `logging_steps`
)
model_l0 = model_init()
model_l0 = reinit_autoencoder_model(model_l0,0)
trainer_l0 = Trainer(
model =model_l0, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l0.train()
timestamp()
trainer_l0.evaluate(test_dataset)
timestamp()
model_l1 = model_init()
model_l1 = reinit_autoencoder_model(model_l1,1)
trainer_l1 = Trainer(
model =model_l1, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l1.train()
timestamp()
trainer_l1.evaluate(test_dataset)
timestamp()
model_l2 = model_init()
model_l2 = reinit_autoencoder_model(model_l2,2)
trainer_l2 = Trainer(
model =model_l2, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l2.train()
timestamp()
trainer_l2.evaluate(test_dataset)
timestamp()
model_l3 = model_init()
model_l3 = reinit_autoencoder_model(model_l3,3)
trainer_l3 = Trainer(
model =model_l3, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l3.train()
timestamp()
trainer_l3.evaluate(test_dataset)
timestamp()
model_l4 = model_init()
model_l4 = reinit_autoencoder_model(model_l4,4)
trainer_l4 = Trainer(
model =model_l4, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l4.train()
trainer_l4.evaluate(test_dataset)
model_l5 = model_init()
model_l5 = reinit_autoencoder_model(model_l5,5)
trainer_l5 = Trainer(
model =model_l5, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l5.train()
trainer_l5.evaluate(test_dataset)
model_l6 = model_init()
model_l6 = reinit_autoencoder_model(model_l6,6)
trainer_l6 = Trainer(
model =model_l6, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l6.train()
trainer_l6.evaluate(test_dataset)
```
|
github_jupyter
|
!pip install -qq transformers
!pip install -qq optuna
!pip install -qq sentencepiece
!pip install -qq datasets
!pip install -qq stabilizer
import transformers
import datasets
from transformers import AutoTokenizer, AutoModelForSequenceClassification,AdamW, get_linear_schedule_with_warmup,Trainer, TrainingArguments
from transformers.file_utils import is_tf_available, is_torch_available, is_torch_tpu_available
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
import random
from textwrap import wrap
from datetime import datetime
from datasets import load_from_disk
from datasets import Dataset
from sklearn.metrics import accuracy_score,classification_report, confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from stabilizer.reinitialize import reinit_autoencoder_model
from stabilizer.llrd import get_optimizer_parameters_with_llrd
from torch import nn
# the model we gonna train, base uncased BERT
# check text classification models here: https://huggingface.co/models?filter=text-classification
MODEL_NAME = "distilbert-base-uncased"
BATCH_SIZE = 16
EPOCHS = 3
LEARNING_RATE= 6.58e-5
WEIGHT_DECAY = 0.289
WARMUP_STEPS = 464
RANDOM_SEED=22
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def set_seed(seed):
"""Set all seeds to make results reproducible (deterministic mode).
When seed is None, disables deterministic mode.
:param seed: an integer to your choosing
"""
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
# calculate accuracy using sklearn's function
acc = accuracy_score(labels, preds)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='macro')
acc = accuracy_score(labels, preds)
confusion_matrix = classification_report(labels, preds, digits=4,output_dict=True)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall,
'hate_f1': confusion_matrix["0"]["f1-score"],
'hate_recall': confusion_matrix["0"]["recall"],
'hate_precision': confusion_matrix["0"]["precision"],
'offensive_f1': confusion_matrix["1"]["f1-score"],
'offensive_recall': confusion_matrix["1"]["recall"],
'offensive_precision': confusion_matrix["1"]["precision"],
'normal_f1': confusion_matrix["2"]["f1-score"],
'normal_recall': confusion_matrix["2"]["recall"],
'normal_precision': confusion_matrix["2"]["precision"],
}
def model_init():
return AutoModelForSequenceClassification.from_pretrained(MODEL_NAME,num_labels=3).to(device)
# Code extracted from DistilBERT implementation
#https://github.com/flowerpot-ai/stabilizer
def reinit_autoencoder_model(model, reinit_num_layers=0):
"""reinitialize autoencoder model layers"""
if reinit_num_layers:
for layer in model.distilbert.transformer.layer[-reinit_num_layers:]:
for module in layer.modules():
if isinstance(module, nn.Embedding):
if module.weight.requires_grad:
module.weight.data.normal_(mean=0.0, std=model.config.initializer_range)
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=model.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
return model
def timestamp():
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%d-%b-%Y (%H:%M:%S.%f)")
print(timestampStr)
set_seed(RANDOM_SEED)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
hatetwit_dataset_dfs = load_from_disk('/content/drive/MyDrive/Dissertation/datasets/hatetwit_'+str(1))
train_dataset = hatetwit_dataset_dfs ["train"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
eval_dataset = hatetwit_dataset_dfs ["validation"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
test_dataset = hatetwit_dataset_dfs ["test"].remove_columns(["input_ids_bert","attention_mask_bert","token_type_ids_bert"])
training_args = TrainingArguments(
output_dir='/content/drive/MyDrive/Dissertation/disbert_hate_reinit/results', # output directory
num_train_epochs=EPOCHS, # total number of training epochs
save_strategy ="epoch" ,
per_device_train_batch_size=BATCH_SIZE, # batch size per device during training
per_device_eval_batch_size=BATCH_SIZE, # batch size for evaluation
warmup_steps = WARMUP_STEPS,
weight_decay= WEIGHT_DECAY, # strength of weight decay
learning_rate= LEARNING_RATE,
logging_dir='./disbert_hate/logs', # directory for storing logs
load_best_model_at_end=True, # load the best model when finished training (default metric is loss)
evaluation_strategy="epoch",
#eval_steps = 500 # evaluate each `logging_steps`
)
model_l0 = model_init()
model_l0 = reinit_autoencoder_model(model_l0,0)
trainer_l0 = Trainer(
model =model_l0, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l0.train()
timestamp()
trainer_l0.evaluate(test_dataset)
timestamp()
model_l1 = model_init()
model_l1 = reinit_autoencoder_model(model_l1,1)
trainer_l1 = Trainer(
model =model_l1, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l1.train()
timestamp()
trainer_l1.evaluate(test_dataset)
timestamp()
model_l2 = model_init()
model_l2 = reinit_autoencoder_model(model_l2,2)
trainer_l2 = Trainer(
model =model_l2, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l2.train()
timestamp()
trainer_l2.evaluate(test_dataset)
timestamp()
model_l3 = model_init()
model_l3 = reinit_autoencoder_model(model_l3,3)
trainer_l3 = Trainer(
model =model_l3, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l3.train()
timestamp()
trainer_l3.evaluate(test_dataset)
timestamp()
model_l4 = model_init()
model_l4 = reinit_autoencoder_model(model_l4,4)
trainer_l4 = Trainer(
model =model_l4, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l4.train()
trainer_l4.evaluate(test_dataset)
model_l5 = model_init()
model_l5 = reinit_autoencoder_model(model_l5,5)
trainer_l5 = Trainer(
model =model_l5, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l5.train()
trainer_l5.evaluate(test_dataset)
model_l6 = model_init()
model_l6 = reinit_autoencoder_model(model_l6,6)
trainer_l6 = Trainer(
model =model_l6, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=eval_dataset, # evaluation dataset
compute_metrics=compute_metrics, # the callback that computes metrics of interest
)
trainer_l6.train()
trainer_l6.evaluate(test_dataset)
| 0.825273 | 0.29005 |
# Лабораторная работа 3
## Описание модели
Данные содержат 830 записей и 6 атрибутов. Датасет содержит информацию для скрининга рака молочной железы.
### Attributes
- BI-RADS assessment: 1 to 5
- Age: patient's age in years
- Shape: mass shape: round=1 oval=2 lobular=3 irregular=4
- Margin: mass margin: circumscribed=1 microlobulated=2 obscured=3 ill-defined=4 spiculated=5
- Density: mass density high=1 iso=2 low=3 fat-containing=4
- Severity: benign=0 or malignant=1
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("cleaned_data.csv")
data.head()
min_ = data.min()
max_ = data.max()
mean_ = data.mean()
std_ = data.std()
nulls = data.isnull().sum()
stats = pd.DataFrame({'Пропуски': nulls, 'Мин.': min_, 'Макс.': max_, 'Средн.': mean_, 'Ст. откл.': std_})
stats
#Разбиение данных на тренировочные и тестовые
from sklearn.model_selection import train_test_split
x, y = data.iloc[:, 0:5].values, data.iloc[:, 5].values
X_train, X_test, y_train, y_test = \
train_test_split(x, y, test_size=0.3, random_state=0)
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(eta0=0.1, random_state=0,max_iter=40)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.tree import DecisionTreeClassifier
import mlxtend
from mlxtend.plotting import plot_decision_regions
tree = DecisionTreeClassifier (criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train_std, y_train)
y_pred = tree.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=6, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
y_pred = knn.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.svm import SVC
svm =SVC(kernel='rbf', random_state=0, gamma=40, C=1.0)
svm.fit(X_train_std, y_train)
y_pred = svm.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("cleaned_data.csv")
data.head()
min_ = data.min()
max_ = data.max()
mean_ = data.mean()
std_ = data.std()
nulls = data.isnull().sum()
stats = pd.DataFrame({'Пропуски': nulls, 'Мин.': min_, 'Макс.': max_, 'Средн.': mean_, 'Ст. откл.': std_})
stats
#Разбиение данных на тренировочные и тестовые
from sklearn.model_selection import train_test_split
x, y = data.iloc[:, 0:5].values, data.iloc[:, 5].values
X_train, X_test, y_train, y_test = \
train_test_split(x, y, test_size=0.3, random_state=0)
from sklearn.preprocessing import StandardScaler
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
from sklearn.linear_model import Perceptron
ppn = Perceptron(eta0=0.1, random_state=0,max_iter=40)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.tree import DecisionTreeClassifier
import mlxtend
from mlxtend.plotting import plot_decision_regions
tree = DecisionTreeClassifier (criterion='entropy', max_depth=3, random_state=0)
tree.fit(X_train_std, y_train)
y_pred = tree.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=6, p=2, metric='minkowski')
knn.fit(X_train_std, y_train)
y_pred = knn.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
from sklearn.svm import SVC
svm =SVC(kernel='rbf', random_state=0, gamma=40, C=1.0)
svm.fit(X_train_std, y_train)
y_pred = svm.predict(X_test_std)
print('Чиcлo ошибочно классифицированных образцов : %d ' % (y_test != y_pred).sum())
from sklearn.metrics import confusion_matrix
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print(confmat)
print('Точность доброкачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Точность злокачественная: %.3f' % precision_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Полнота доброкачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=0))
print('Полнота злокачественная: %.3f' % recall_score(y_true=y_test, y_pred=y_pred, pos_label=1))
print('Верность: %.3f' % accuracy_score(y_true=y_test, y_pred=y_pred))
| 0.474875 | 0.843734 |
# The Autodiff Cookbook
*alexbw@, mattjj@*
JAX has a pretty general automatic differentiation system. In this notebook, we'll go through a whole bunch of neat autodiff ideas that you can cherry pick for your own work, starting with the basics.
```
import jax.numpy as np
from jax import grad, jit, vmap
from jax import random
key = random.PRNGKey(0)
```
## Gradients
### Starting with `grad`
You can differentiate a function with `grad`:
```
grad_tanh = grad(np.tanh)
print(grad_tanh(2.0))
```
`grad` takes a function and returns a function. If you have a Python function `f` that evaluates the mathematical function $f$, then `grad(f)` is a Python function that evaluates the mathematical function $\nabla f$. That means `grad(f)(x)` represents the value $\nabla f(x)$.
Since `grad` operates on functions, you can apply it to its own output to differentiate as many times as you like:
```
print(grad(grad(np.tanh))(2.0))
print(grad(grad(grad(np.tanh)))(2.0))
```
Let's look at computing gradients with `grad` in a linear logistic regression model. First, the setup:
```
def sigmoid(x):
return 0.5 * (np.tanh(x / 2) + 1)
# Outputs probability of a label being true.
def predict(W, b, inputs):
return sigmoid(np.dot(inputs, W) + b)
# Build a toy dataset.
inputs = np.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = np.array([True, True, False, True])
# Training loss is the negative log-likelihood of the training examples.
def loss(W, b):
preds = predict(W, b, inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
# Initialize random model coefficients
key, W_key, b_key = random.split(key, 3)
W = random.normal(W_key, (3,))
b = random.normal(b_key, ())
```
Use the `grad` function with its `argnums` argument to differentiate a function with respect to positional arguments.
```
# Differentiate `loss` with respect to the first positional argument:
W_grad = grad(loss, argnums=0)(W, b)
print('W_grad', W_grad)
# Since argnums=0 is the default, this does the same thing:
W_grad = grad(loss)(W, b)
print('W_grad', W_grad)
# But we can choose different values too, and drop the keyword:
b_grad = grad(loss, 1)(W, b)
print('b_grad', b_grad)
# Including tuple values
W_grad, b_grad = grad(loss, (0, 1))(W, b)
print('W_grad', W_grad)
print('b_grad', b_grad)
```
This `grad` API has a direct correspondence to the excellent notation in Spivak's classic *Calculus on Manifolds* (1965), also used in Sussman and Wisdom's [*Structure and Interpretation of Classical Mechanics*](http://mitpress.mit.edu/sites/default/files/titles/content/sicm_edition_2/book.html) (2015) and their [*Functional Differential Geometry*](https://mitpress.mit.edu/books/functional-differential-geometry) (2013). Both books are open-access. See in particular the "Prologue" section of *Functional Differential Geometry* for a defense of this notation.
Essentially, when using the `argnums` argument, if `f` is a Python function for evaluating the mathematical function $f$, then the Python expression `grad(f, i)` evaluates to a Python function for evaluating $\partial_i f$.
### Differentiating with respect to nested lists, tuples, and dicts
Differentiating with respect to standard Python containers just works, so use tuples, lists, and dicts (and arbitrary nesting) however you like.
```
def loss2(params_dict):
preds = predict(params_dict['W'], params_dict['b'], inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
print(grad(loss2)({'W': W, 'b': b}))
```
You can [register your own container types](https://github.com/google/jax/issues/446#issuecomment-467105048) to work with not just `grad` but all the JAX transformations (`jit`, `vmap`, etc.).
### Evaluate a function and its gradient using `value_and_grad`
Another convenient function is `value_and_grad` for efficiently computing both a function's value as well as its gradient's value:
```
from jax import value_and_grad
loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)
print('loss value', loss_value)
print('loss value', loss(W, b))
```
### Checking against numerical differences
A great thing about derivatives is that they're straightforward to check with finite differences:
```
# Set a step size for finite differences calculations
eps = 1e-4
# Check b_grad with scalar finite differences
b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps
print('b_grad_numerical', b_grad_numerical)
print('b_grad_autodiff', grad(loss, 1)(W, b))
# Check W_grad with finite differences in a random direction
key, subkey = random.split(key)
vec = random.normal(subkey, W.shape)
unitvec = vec / np.sqrt(np.vdot(vec, vec))
W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps
print('W_dirderiv_numerical', W_grad_numerical)
print('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec))
```
JAX provides a simple convenience function that does essentially the same thing, but checks up to any order of differentiation that you like:
```
from jax.test_util import check_grads
check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives
```
### Hessian-vector products with `grad`-of-`grad`
One thing we can do with higher-order `grad` is build a Hessian-vector product function. (Later on we'll write an even more efficient implementation that mixes both forward- and reverse-mode, but this one will use pure reverse-mode.)
A Hessian-vector product function can be useful in a [truncated Newton Conjugate-Gradient algorithm](https://en.wikipedia.org/wiki/Truncated_Newton_method) for minimizing smooth convex functions, or for studying the curvature of neural network training objectives (e.g. [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)).
For a scalar-valued function $f : \mathbb{R}^n \to \mathbb{R}$, the Hessian at a point $x \in \mathbb{R}^n$ is written as $\partial^2 f(x)$. A Hessian-vector product function is then able to evaluate
$\qquad v \mapsto \partial^2 f(x) \cdot v$
for any $v \in \mathbb{R}^n$.
The trick is not to instantiate the full Hessian matrix: if $n$ is large, perhaps in the millions or billions in the context of neural networks, then that might be impossible to store.
Luckily, `grad` already gives us a way to write an efficient Hessian-vector product function. We just have to use the identity
$\qquad \partial^2 f (x) v = \partial [x \mapsto \partial f(x) \cdot v] = \partial g(x)$,
where $g(x) = \partial f(x) \cdot v$ is a new scalar-valued function that dots the gradient of $f$ at $x$ with the vector $v$. Nottice that we're only ever differentiating scalar-valued functions of vector-valued arguments, which is exactly where we know `grad` is efficient.
In JAX code, we can just write this:
```
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
```
This example shows that you can freely use lexical closure, and JAX will never get perturbed or confused.
We'll check this implementation a few cells down, once we see how to compute dense Hessian matrices. We'll also write an even better version that uses both forward-mode and reverse-mode.
### Jacobians and Hessians using `jacfwd` and `jacrev`
You can compute full Jacobian matrices using the `jacfwd` and `jacrev` functions:
```
from jax import jacfwd, jacrev
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
J = jacfwd(f)(W)
print("jacfwd result, with shape", J.shape)
print(J)
J = jacrev(f)(W)
print("jacrev result, with shape", J.shape)
print(J)
```
These two functions compute the same values (up to machine numerics), but differ in their implementation: `jacfwd` uses forward-mode automatic differentiation, which is more efficient for "tall" Jacobian matrices, while `jacrev` uses reverse-mode, which is more efficient for "wide" Jacobian matrices. For matrices that are near-square, `jacfwd` probably has an edge over `jacrev`.
You can also use `jacfwd` and `jacrev` with container types:
```
def predict_dict(params, inputs):
return predict(params['W'], params['b'], inputs)
J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs)
for k, v in J_dict.items():
print("Jacobian from {} to logits is".format(k))
print(v)
```
For more details on forward- and reverse-mode, as well as how to implement `jacfwd` and `jacrev` as efficiently as possible, read on!
Using a composition of two of these functions gives us a way to compute dense Hessian matrices:
```
def hessian(f):
return jacfwd(jacrev(f))
H = hessian(f)(W)
print("hessian, with shape", H.shape)
print(H)
```
This shape makes sense: if we start with a function $f : \mathbb{R}^n \to \mathbb{R}^m$, then at a point $x \in \mathbb{R}^n$ we expect to get the shapes
* $f(x) \in \mathbb{R}^m$, the value of $f$ at $x$,
* $\partial f(x) \in \mathbb{R}^{m \times n}$, the Jacobian matrix at $x$,
* $\partial^2 f(x) \in \mathbb{R}^{m \times n \times n}$, the Hessian at $x$,
and so on.
To implement `hessian`, we could have used `jacrev(jacrev(f))` or `jacrev(jacfwd(f))` or any other composition of the two. But forward-over-reverse is typically the most efficient. That's because in the inner Jacobian computation we're often differentiating a function wide Jacobian (maybe like a loss function $f : \mathbb{R}^n \to \mathbb{R}$), while in the outer Jacobian computation we're differentiating a function with a square Jacobian (since $\nabla f : \mathbb{R}^n \to \mathbb{R}^n$), which is where forward-mode wins out.
## How it's made: two foundational autodiff functions
### Jacobian-Vector products (JVPs, aka forward-mode autodiff)
JAX includes efficient and general implementations of both forward- and reverse-mode automatic differentiation. The familiar `grad` function is built on reverse-mode, but to explain the difference in the two modes, and when each can be useful, we need a bit of math background.
#### JVPs in math
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}^m$, the Jacobian matrix of $f$ evaluated at an input point $x \in \mathbb{R}^n$, denoted $\partial f(x)$, is often thought of as a matrix in $\mathbb{R}^m \times \mathbb{R}^n$:
$\qquad \partial f(x) \in \mathbb{R}^{m \times n}$.
But we can also think of $\partial f(x)$ as a linear map, which maps the tangent space of the domain of $f$ at the point $x$ (which is just another copy of $\mathbb{R}^n$) to the tangent space of the codomain of $f$ at the point $f(x)$ (a copy of $\mathbb{R}^m$):
$\qquad \partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$.
This map is called the [pushforward map](https://en.wikipedia.org/wiki/Pushforward_(differential)) of $f$ at $x$. The Jacobian matrix is just the matrix for this linear map in a standard basis.
If we don't commit to one specific input point $x$, then we can think of the function $\partial f$ as first taking an input point and returning the Jacobian linear map at that input point:
$\qquad \partial f : \mathbb{R}^n \to \mathbb{R}^n \to \mathbb{R}^m$.
In particular, we can uncurry things so that given input point $x \in \mathbb{R}^n$ and a tangent vector $v \in \mathbb{R}^n$, we get back an output tangent vector in $\mathbb{R}^m$. We call that mapping, from $(x, v)$ pairs to output tangent vectors, the *Jacobian-vector product*, and write it as
$\qquad (x, v) \mapsto \partial f(x) v$
#### JVPs in JAX code
Back in Python code, JAX's `jvp` function models this transformation. Given a Python function that evaluates $f$, JAX's `jvp` is a way to get a Python function for evaluating $(x, v) \mapsto (f(x), \partial f(x) v)$.
```
from jax import jvp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
key, subkey = random.split(key)
v = random.normal(subkey, W.shape)
# Push forward the vector `v` along `f` evaluated at `W`
y, u = jvp(f, (W,), (v,))
```
In terms of Haskell-like type signatures, we could write
```haskell
jvp :: (a -> b) -> a -> T a -> (b, T b)
```
where we use `T a` to denote the type of the tangent space for `a`. In words, `jvp` takes as arguments a function of type `a -> b`, a value of type `a`, and a tangent vector value of type `T a`. It gives back a pair consisting of a value of type `b` and an output tangent vector of type `T b`.
The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a "JVP rule" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values.
That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 2x the cost of just evaluating the function. Put another way, for a fixed primal point $x$, we can evaluate $v \mapsto \partial f(x) \cdot v$ for about the same cost as evaluating $f$.
That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning?
To answer that, first think about how you could use a JVP to build a full Jacobian matrix. If we apply a JVP to a one-hot tangent vector, it reveals one column of the Jacobian matrix, corresponding to the nonzero entry we fed in. So we can build a full Jacobian one column at a time, and to get each column costs about the same as one function evaluation. That will be efficient for functions with "tall" Jacobians, but inefficient for "wide" Jacobians.
If you're doing gradient-based optimization in machine learning, you probably want to minimize a loss function from parameters in $\mathbb{R}^n$ to a scalar loss value in $\mathbb{R}$. That means the Jacobian of this function is a very wide matrix: $\partial f(x) \in \mathbb{R}^{1 \times n}$, which we often identify with the Gradient vector $\nabla f(x) \in \mathbb{R}^n$. Building that matrix one column at a time, with each call taking a similar number of FLOPs to evaluating the original function, sure seems inefficient! In particular, for training neural networks, where $f$ is a training loss function and $n$ can be in the millions or billions, this approach just won't scale.
To do better for functions like this, we just need to use reverse-mode.
### Vector-Jacobian products (VJPs, aka reverse-mode autodiff)
Where forward-mode gives us back a function for evaluating Jacobian-vector products, which we can then use to build Jacobian matrices one column at a time, reverse-mode is a way to get back a function for evaluating vector-Jacobian products (equivalently Jacobian-transpose-vector products), which we can use to build Jacobian matrices one row at a time.
#### VJPs in math
Let's again consider a function $f : \mathbb{R}^n \to \mathbb{R}^m$.
Starting from our notation for JVPs, the notation for VJPs is pretty simple:
$\qquad (x, v) \mapsto v \partial f(x)$,
where $v$ is an element of the cotangent space of $f$ at $x$ (isomorphic to another copy of $\mathbb{R}^m$). When being rigorous, we should think of $v$ as a linear map $v : \mathbb{R}^m \to \mathbb{R}$, and when we write $v \partial f(x)$ we mean function composition $v \circ \partial f(x)$, where the types work out because $\partial f(x) : \mathbb{R}^n \to \mathbb{R}^m$. But in the common case we can identify $v$ with a vector in $\mathbb{R}^m$ and use the two almost interchageably, just like we might sometimes flip between "column vectors" and "row vectors" without much comment.
With that identification, we can alternatively think of the linear part of a VJP as the transpose (or adjoint conjugate) of the linear part of a JVP:
$\qquad (x, v) \mapsto \partial f(x)^\mathsf{T} v$.
For a given point $x$, we can write the signature as
$\qquad \partial f(x)^\mathsf{T} : \mathbb{R}^m \to \mathbb{R}^n$.
The corresponding map on cotangent spaces is often called the [pullback](https://en.wikipedia.org/wiki/Pullback_(differential_geometry))
of $f$ at $x$. The key for our purposes is that it goes from something that looks like the output of $f$ to something that looks like the input of $f$, just like we might expect from a transposed linear function.
#### VJPs in JAX code
Switching from math back to Python, the JAX function `vjp` can take a Python function for evaluating $f$ and give us back a Python function for evaluating the VJP $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$.
```
from jax import vjp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
y, vjp_fun = vjp(f, W)
key, subkey = random.split(key)
u = random.normal(subkey, y.shape)
# Pull back the covector `u` along `f` evaluated at `W`
v = vjp_fun(u)
```
In terms of Haskell-like type signatures, we could write
```haskell
vjp :: (a -> b) -> a -> (b, CT b -> CT a)
```
where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`.
This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \mapsto (f(x), v^\mathsf{T} \partial f(x))$ is only about twice the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \mathbb{R}^n \to \mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.
There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!).
For more on how reverse-mode works, see [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/).
### Hessian-vector products using both forward- and reverse-mode
In a previous section, we implemented a Hessian-vector product function just using reverse-mode:
```
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
```
That's efficient, but we can do even better and save some memory by using forward-mode together with reverse-mode.
Mathematically, given a function $f : \mathbb{R}^n \to \mathbb{R}$ to differentiate, a point $x \in \mathbb{R}^n$ at which to linearize the function, and a vector $v \in \mathbb{R}^n$, the Hessian-vector product function we want is
$(x, v) \mapsto \partial^2 f(x) v$
Consider the helper function $g : \mathbb{R}^n \to \mathbb{R}^n$ defined to be the derivative (or gradient) of $f$, namely $g(x) = \partial f(x)$. All we need is its JVP, since that will give us
$(x, v) \mapsto \partial g(x) v = \partial^2 f(x) v$.
We can translate that almost directly into code:
```
from jax import jvp, grad
# forward-over-reverse
def hvp(f, primals, tangents):
return jvp(grad(f), primals, tangents)[1]
```
Even better, since we didn't have to call `np.dot` directly, this `hvp` function works with arrays of any shape and with arbitrary container types (like vectors stored as nested lists/dicts/tuples), and doesn't even have a dependence on `jax.numpy`.
Here's an example of how to use it:
```
def f(X):
return np.sum(np.tanh(X)**2)
key, subkey1, subkey2 = random.split(key, 3)
X = random.normal(subkey1, (30, 40))
V = random.normal(subkey2, (30, 40))
ans1 = hvp(f, (X,), (V,))
ans2 = np.tensordot(hessian(f)(X), V, 2)
print(np.allclose(ans1, ans2, 1e-4, 1e-4))
```
Another way you might consider writing this is using reverse-over-forward:
```
# reverse-over-forward
def hvp_revfwd(f, primals, tangents):
g = lambda primals: jvp(f, primals, tangents)[1]
return grad(g)(primals)
```
That's not quite as good, though, because forward-mode has less overhead than reverse-mode, and since the outer differentiation operator here has to differentiate a larger computation than the inner one, keeping forward-mode on the outside works best:
```
# reverse-over-reverse, only works for single arguments
def hvp_revrev(f, primals, tangents):
x, = primals
v, = tangents
return grad(lambda x: np.vdot(grad(f)(x), v))(x)
print("Forward over reverse")
%timeit -n10 -r3 hvp(f, (X,), (V,))
print("Reverse over forward")
%timeit -n10 -r3 hvp_revfwd(f, (X,), (V,))
print("Reverse over reverse")
%timeit -n10 -r3 hvp_revrev(f, (X,), (V,))
print("Naive full Hessian materialization")
%timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2)
```
## Composing VJPs, JVPs, and `vmap`
### Jacobian-Matrix and Matrix-Jacobian products
Now that we have `jvp` and `vjp` transformations that give us functions to push-forward or pull-back single vectors at a time, we can use JAX's [`vmap` transformation](https://github.com/google/jax#auto-vectorization-with-vmap) to push and pull entire bases at once. In particular, we can use that to write fast matrix-Jacobian and Jacobian-matrix products.
```
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
# Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`.
# First, use a list comprehension to loop over rows in the matrix M.
def loop_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return np.vstack([vjp_fun(mi) for mi in M])
# Now, use vmap to build a computation that does a single fast matrix-matrix
# multiply, rather than an outer loop over vector-matrix multiplies.
def vmap_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return vmap(vjp_fun)(M)
key = random.PRNGKey(0)
num_covecs = 128
U = random.normal(key, (num_covecs,) + y.shape)
loop_vs = loop_mjp(f, W, M=U)
print('Non-vmapped Matrix-Jacobian product')
%timeit -n10 -r3 loop_mjp(f, W, M=U)
print('\nVmapped Matrix-Jacobian product')
vmap_vs = vmap_mjp(f, W, M=U)
%timeit -n10 -r3 vmap_mjp(f, W, M=U)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical'
def loop_jmp(f, x, M):
# jvp immediately returns the primal and tangent values as a tuple,
# so we'll compute and select the tangents in a list comprehension
return np.vstack([jvp(f, (W,), (si,))[1] for si in S])
def vmap_jmp(f, x, M):
_jvp = lambda s: jvp(f, (W,), (s,))[1]
return vmap(_jvp)(M)
num_vecs = 128
S = random.normal(key, (num_vecs,) + W.shape)
loop_vs = loop_jmp(f, W, M=S)
print('Non-vmapped Jacobian-Matrix product')
%timeit -n10 -r3 loop_jmp(f, W, M=S)
vmap_vs = vmap_jmp(f, W, M=S)
print('\nVmapped Jacobian-Matrix product')
%timeit -n10 -r3 vmap_jmp(f, W, M=S)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical'
```
### The implementation of `jacfwd` and `jacrev`
Now that we've seen fast Jacobian-matrix and matrix-Jacobian products, it's not hard to guess how to write `jacfwd` and `jacrev`. We just use the same technique to push-forward or pull-back an entire standard basis (isomorphic to an identity matrix) at once.
```
from jax import jacrev as builtin_jacrev
def our_jacrev(f):
def jacfun(x):
y, vjp_fun = vjp(f, x)
# Use vmap to do a matrix-Jacobian product.
# Here, the matrix is the Euclidean basis, so we get all
# entries in the Jacobian at once.
J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y)))
return J
return jacfun
assert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!'
from jax import jacfwd as builtin_jacfwd
def our_jacfwd(f):
def jacfun(x):
_jvp = lambda s: jvp(f, (x,), (s,))[1]
Jt =vmap(_jvp, in_axes=1)(np.eye(len(x)))
return np.transpose(Jt)
return jacfun
assert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!'
```
Interestingly, [Autograd](https://github.com/hips/autograd) couldn't do this. Our [implementation of reverse-mode `jacobian` in Autograd](https://github.com/HIPS/autograd/blob/96a03f44da43cd7044c61ac945c483955deba957/autograd/differential_operators.py#L60) had to pull back one vector at a time with an outer-loop `map`. Pushing one vector at a time through the computation is much less efficient than batching it all together with `vmap`.
Another thing that Autograd couldn't do is `jit`. Interestingly, no matter how much Python dynamism you use in your function to be differentiated, we could always use `jit` on the linear part of the computation. For example:
```
def f(x):
try:
if x < 3:
return 2 * x ** 3
else:
raise ValueError
except ValueError:
return np.pi * x
y, f_vjp = vjp(f, 4.)
print(jit(f_vjp)(1.))
```
## Complex numbers and differentiation
JAX is great at complex numbers and differentiation. To support both [holomorphic and non-holomorphic differentiation](https://en.wikipedia.org/wiki/Holomorphic_function), JAX follows [Autograd's convention](https://github.com/HIPS/autograd/blob/master/docs/tutorial.md#complex-numbers) for encoding complex derivatives.
Consider a complex-to-complex function $f: \mathbb{C} \to \mathbb{C}$ that we break down into its component real-to-real functions:
```
def f(z):
x, y = real(z), imag(z)
return u(x, y), v(x, y) * 1j
```
That is, we've decomposed $f(z) = u(x, y) + v(x, y) i$ where $z = x + y i$. We define `grad(f)` to correspond to
```
def grad_f(z):
x, y = real(z), imag(z)
return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j
```
In math symbols, that means we define $\partial f(z) \triangleq \partial_0 u(x, y) + \partial_1 u(x, y)$. So we throw out $v$, ignoring the complex component function of $f$ entirely!
This convention covers three important cases:
1. If `f` evaluates a holomorphic function, then we get the usual complex derivative, since $\partial_0 u = \partial_1 v$ and $\partial_1 u = - \partial_0 v$.
2. If `f` is evaluates the real-valued loss function of a complex parameter `x`, then we get a result that we can use in gradient-based optimization by taking steps in the direction of the conjugate of `grad(f)(x)`.
3. If `f` evaluates a real-to-real function, but its implementation uses complex primitives internally (some of which must be non-holomorphic, e.g. FFTs used in convolutions) then we get the same result that an implementation that only used real primitives would have given.
By throwing away `v` entirely, this convention does not handle the case where `f` evaluates a non-holomorphic function and you want to evaluate all of $\partial_0 u$, $\partial_1 u$, $\partial_0 v$, and $\partial_1 v$ at once. But in that case the answer would have to contain four real values, and so there's no way to express it as a single complex number.
You should expect complex numbers to work everywhere in JAX. Here's differentiating through a Cholesky decomposition of a complex matrix:
```
A = np.array([[5., 2.+3j, 5j],
[2.-3j, 7., 1.+7j],
[-5j, 1.-7j, 12.]])
def f(X):
L = np.linalg.cholesky(X)
return np.sum((L - np.sin(L))**2)
grad(f, holomorphic=True)(A)
```
For primitives' JVP rules, writing the primals as $z = a + bi$ and the tangents as $t = c + di$, we define the Jacobian-vector product $t \mapsto \partial f(z) \cdot t$ as
$t \mapsto
\begin{matrix} \begin{bmatrix} 1 & 1 \end{bmatrix} \\ ~ \end{matrix}
\begin{bmatrix} \partial_0 u(a, b) & -\partial_0 v(a, b) \\ - \partial_1 u(a, b) i & \partial_1 v(a, b) i \end{bmatrix}
\begin{bmatrix} c \\ d \end{bmatrix}$.
See Chapter 4 of [Dougal's PhD thesis](https://dougalmaclaurin.com/phd-thesis.pdf) for more details.
## More advanced autodiff
In this notebook, we worked through some easy, and then progressively more complicated, applications of automatic differentiation in JAX. We hope you now feel that taking derivatives in JAX is easy and powerful.
There's a whole world of other autodiff tricks and functionality out there. Topics we didn't cover, but hope to in a "Advanced Autodiff Cookbook" include:
- Gauss-Newton Vector Products, linearizing once
- Custom VJPs and JVPs
- Efficient derivatives at fixed-points
- Estimating the trace of a Hessian using random Hessian-vector products.
- Forward-mode autodiff using only reverse-mode autodiff.
- Taking derivatives with respect to custom data types.
- Checkpointing (binomial checkpointing for efficient reverse-mode, not model snapshotting).
- Optimizing VJPs with Jacobian pre-accumulation.
|
github_jupyter
|
import jax.numpy as np
from jax import grad, jit, vmap
from jax import random
key = random.PRNGKey(0)
grad_tanh = grad(np.tanh)
print(grad_tanh(2.0))
print(grad(grad(np.tanh))(2.0))
print(grad(grad(grad(np.tanh)))(2.0))
def sigmoid(x):
return 0.5 * (np.tanh(x / 2) + 1)
# Outputs probability of a label being true.
def predict(W, b, inputs):
return sigmoid(np.dot(inputs, W) + b)
# Build a toy dataset.
inputs = np.array([[0.52, 1.12, 0.77],
[0.88, -1.08, 0.15],
[0.52, 0.06, -1.30],
[0.74, -2.49, 1.39]])
targets = np.array([True, True, False, True])
# Training loss is the negative log-likelihood of the training examples.
def loss(W, b):
preds = predict(W, b, inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
# Initialize random model coefficients
key, W_key, b_key = random.split(key, 3)
W = random.normal(W_key, (3,))
b = random.normal(b_key, ())
# Differentiate `loss` with respect to the first positional argument:
W_grad = grad(loss, argnums=0)(W, b)
print('W_grad', W_grad)
# Since argnums=0 is the default, this does the same thing:
W_grad = grad(loss)(W, b)
print('W_grad', W_grad)
# But we can choose different values too, and drop the keyword:
b_grad = grad(loss, 1)(W, b)
print('b_grad', b_grad)
# Including tuple values
W_grad, b_grad = grad(loss, (0, 1))(W, b)
print('W_grad', W_grad)
print('b_grad', b_grad)
def loss2(params_dict):
preds = predict(params_dict['W'], params_dict['b'], inputs)
label_probs = preds * targets + (1 - preds) * (1 - targets)
return -np.sum(np.log(label_probs))
print(grad(loss2)({'W': W, 'b': b}))
from jax import value_and_grad
loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)
print('loss value', loss_value)
print('loss value', loss(W, b))
# Set a step size for finite differences calculations
eps = 1e-4
# Check b_grad with scalar finite differences
b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps
print('b_grad_numerical', b_grad_numerical)
print('b_grad_autodiff', grad(loss, 1)(W, b))
# Check W_grad with finite differences in a random direction
key, subkey = random.split(key)
vec = random.normal(subkey, W.shape)
unitvec = vec / np.sqrt(np.vdot(vec, vec))
W_grad_numerical = (loss(W + eps / 2. * unitvec, b) - loss(W - eps / 2. * unitvec, b)) / eps
print('W_dirderiv_numerical', W_grad_numerical)
print('W_dirderiv_autodiff', np.vdot(grad(loss)(W, b), unitvec))
from jax.test_util import check_grads
check_grads(loss, (W, b), order=2) # check up to 2nd order derivatives
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
from jax import jacfwd, jacrev
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
J = jacfwd(f)(W)
print("jacfwd result, with shape", J.shape)
print(J)
J = jacrev(f)(W)
print("jacrev result, with shape", J.shape)
print(J)
def predict_dict(params, inputs):
return predict(params['W'], params['b'], inputs)
J_dict = jacrev(predict_dict)({'W': W, 'b': b}, inputs)
for k, v in J_dict.items():
print("Jacobian from {} to logits is".format(k))
print(v)
def hessian(f):
return jacfwd(jacrev(f))
H = hessian(f)(W)
print("hessian, with shape", H.shape)
print(H)
from jax import jvp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
key, subkey = random.split(key)
v = random.normal(subkey, W.shape)
# Push forward the vector `v` along `f` evaluated at `W`
y, u = jvp(f, (W,), (v,))
jvp :: (a -> b) -> a -> T a -> (b, T b)
from jax import vjp
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
y, vjp_fun = vjp(f, W)
key, subkey = random.split(key)
u = random.normal(subkey, y.shape)
# Pull back the covector `u` along `f` evaluated at `W`
v = vjp_fun(u)
vjp :: (a -> b) -> a -> (b, CT b -> CT a)
def hvp(f, x, v):
return grad(lambda x: np.vdot(grad(f)(x), v))
from jax import jvp, grad
# forward-over-reverse
def hvp(f, primals, tangents):
return jvp(grad(f), primals, tangents)[1]
def f(X):
return np.sum(np.tanh(X)**2)
key, subkey1, subkey2 = random.split(key, 3)
X = random.normal(subkey1, (30, 40))
V = random.normal(subkey2, (30, 40))
ans1 = hvp(f, (X,), (V,))
ans2 = np.tensordot(hessian(f)(X), V, 2)
print(np.allclose(ans1, ans2, 1e-4, 1e-4))
# reverse-over-forward
def hvp_revfwd(f, primals, tangents):
g = lambda primals: jvp(f, primals, tangents)[1]
return grad(g)(primals)
# reverse-over-reverse, only works for single arguments
def hvp_revrev(f, primals, tangents):
x, = primals
v, = tangents
return grad(lambda x: np.vdot(grad(f)(x), v))(x)
print("Forward over reverse")
%timeit -n10 -r3 hvp(f, (X,), (V,))
print("Reverse over forward")
%timeit -n10 -r3 hvp_revfwd(f, (X,), (V,))
print("Reverse over reverse")
%timeit -n10 -r3 hvp_revrev(f, (X,), (V,))
print("Naive full Hessian materialization")
%timeit -n10 -r3 np.tensordot(hessian(f)(X), V, 2)
# Isolate the function from the weight matrix to the predictions
f = lambda W: predict(W, b, inputs)
# Pull back the covectors `m_i` along `f`, evaluated at `W`, for all `i`.
# First, use a list comprehension to loop over rows in the matrix M.
def loop_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return np.vstack([vjp_fun(mi) for mi in M])
# Now, use vmap to build a computation that does a single fast matrix-matrix
# multiply, rather than an outer loop over vector-matrix multiplies.
def vmap_mjp(f, x, M):
y, vjp_fun = vjp(f, x)
return vmap(vjp_fun)(M)
key = random.PRNGKey(0)
num_covecs = 128
U = random.normal(key, (num_covecs,) + y.shape)
loop_vs = loop_mjp(f, W, M=U)
print('Non-vmapped Matrix-Jacobian product')
%timeit -n10 -r3 loop_mjp(f, W, M=U)
print('\nVmapped Matrix-Jacobian product')
vmap_vs = vmap_mjp(f, W, M=U)
%timeit -n10 -r3 vmap_mjp(f, W, M=U)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Matrix-Jacobian Products should be identical'
def loop_jmp(f, x, M):
# jvp immediately returns the primal and tangent values as a tuple,
# so we'll compute and select the tangents in a list comprehension
return np.vstack([jvp(f, (W,), (si,))[1] for si in S])
def vmap_jmp(f, x, M):
_jvp = lambda s: jvp(f, (W,), (s,))[1]
return vmap(_jvp)(M)
num_vecs = 128
S = random.normal(key, (num_vecs,) + W.shape)
loop_vs = loop_jmp(f, W, M=S)
print('Non-vmapped Jacobian-Matrix product')
%timeit -n10 -r3 loop_jmp(f, W, M=S)
vmap_vs = vmap_jmp(f, W, M=S)
print('\nVmapped Jacobian-Matrix product')
%timeit -n10 -r3 vmap_jmp(f, W, M=S)
assert np.allclose(loop_vs, vmap_vs), 'Vmap and non-vmapped Jacobian-Matrix products should be identical'
from jax import jacrev as builtin_jacrev
def our_jacrev(f):
def jacfun(x):
y, vjp_fun = vjp(f, x)
# Use vmap to do a matrix-Jacobian product.
# Here, the matrix is the Euclidean basis, so we get all
# entries in the Jacobian at once.
J, = vmap(vjp_fun, in_axes=0)(np.eye(len(y)))
return J
return jacfun
assert np.allclose(builtin_jacrev(f)(W), our_jacrev(f)(W)), 'Incorrect reverse-mode Jacobian results!'
from jax import jacfwd as builtin_jacfwd
def our_jacfwd(f):
def jacfun(x):
_jvp = lambda s: jvp(f, (x,), (s,))[1]
Jt =vmap(_jvp, in_axes=1)(np.eye(len(x)))
return np.transpose(Jt)
return jacfun
assert np.allclose(builtin_jacfwd(f)(W), our_jacfwd(f)(W)), 'Incorrect forward-mode Jacobian results!'
def f(x):
try:
if x < 3:
return 2 * x ** 3
else:
raise ValueError
except ValueError:
return np.pi * x
y, f_vjp = vjp(f, 4.)
print(jit(f_vjp)(1.))
def f(z):
x, y = real(z), imag(z)
return u(x, y), v(x, y) * 1j
def grad_f(z):
x, y = real(z), imag(z)
return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j
A = np.array([[5., 2.+3j, 5j],
[2.-3j, 7., 1.+7j],
[-5j, 1.-7j, 12.]])
def f(X):
L = np.linalg.cholesky(X)
return np.sum((L - np.sin(L))**2)
grad(f, holomorphic=True)(A)
| 0.792062 | 0.955402 |
# Plot Starlet planes with PyWI-CTA
[](https://mybinder.org/v2/gh/jeremiedecock/pywi-cta-notebooks/master?filepath=tuto_2b_plot_starlet_planes.ipynb)
## Import required modules
This notebook requires PyWI-CTA for the I/O.
```
%matplotlib inline
import matplotlib.pyplot as plt
import pywicta
import pywicta.data
from pywicta.io import geometry_converter
from pywicta.io.images import image_generator
from pywicta.io.images import plot_ctapipe_image
from pywicta.io.images import plot_hillas_parameters_on_axes
from pywicta.denoising import starlet
from pywicta.denoising.starlet import WaveletTransform
from pywicta.denoising import inverse_transform_sampling
from pywicta.denoising.inverse_transform_sampling import EmpiricalDistribution
print(pywicta.get_version())
```
## Get image
The next cells define the list of images to use in this notebook.
Images can be fetched from Fits files or from Simtel files.
Fits files are much lighter and much faster to process than Simtel files but they are specific to PyWI-CTA and thus you first have to generate them from Simtel files using [the following script](). Also, contrary to Simtel files, a Fits files contains only one "image" (i.e. an unique event viewed from one unique telescope).
```
#pywicta.data.lst(["lst_gamma_r104_prod3b-north_t1_e1016403_660GeV", "lst_gamma_r104_prod3b-north_t1_e6905_87GeV"])
pywicta.data.lst()
#CAM_ID = "ASTRICam"
#CAM_ID = "CHEC"
#CAM_ID = "DigiCam"
#CAM_ID = "FlashCam"
#CAM_ID = "NectarCam"
CAM_ID = "LSTCam"
IMG_PATH_LIST = pywicta.data.lst(["lst_gamma_r104_prod3b-north_t1_e1016403_660GeV"])
image_gen = image_generator(IMG_PATH_LIST,
cam_filter_list=[CAM_ID],
ctapipe_format=False)
img = next(image_gen)
img.meta
```
## Plot
Images comming from FITS files are stored in 2D arrays (mainly because the Sparce2D wavelet transform expects images stored in 2D arrays).
But ctapipe uses a special 1D array representation for IACT images (defined in "geom" objects).
Thus conversion has to be done prior to use ctapipe functions on images comming from Fits files (using the "geometry_converter" function).
```
geom1d = geometry_converter.get_geom1d(CAM_ID)
img_input_1d = geometry_converter.image_2d_to_1d(img.input_image, CAM_ID)
disp = plot_ctapipe_image(img_input_1d,
geom1d,
title='Noised image',
norm='lin',
plot_axis=False)
```
## Clean the image using the Starlet transform
### Get empirical noise distribution
```
use_noise_distribution = True
if use_noise_distribution:
noise_cdf_file = inverse_transform_sampling.get_cdf_file_path(CAM_ID) # pywicta.denoising.cdf.LSTCAM_CDF_FILE
print(noise_cdf_file)
noise_distribution = EmpiricalDistribution(noise_cdf_file)
else:
noise_distribution = None
```
### Setup the Starlet transform
Fell free to change the following default setup to get a better understanding of the Starlet transform...
```
type_of_filtering = 'hard_filtering'
filter_thresholds = (3., 0.2)
clusters_threshold = 0.2
last_scale_treatment = 'drop'
detect_only_positive_structures = False
kill_isolated_pixels = True
```
### Compute the Starlet transform and plot the Starlet planes
```
number_of_scales = len(filter_thresholds) + 1
in_planes = starlet.wavelet_transform(img.input_image,
number_of_scales=number_of_scales,
noise_distribution=noise_distribution)
filtered_in_planes = starlet.filter_planes(in_planes,
method=type_of_filtering,
thresholds=filter_thresholds,
detect_only_positive_structures=detect_only_positive_structures)
for plane_index, (plane, filtered_plane) in enumerate(zip(in_planes, filtered_in_planes)):
if plane_index < len(in_planes) - 1:
significant_pixels_mask = (geometry_converter.image_2d_to_1d(filtered_plane, CAM_ID) != 0)
else:
significant_pixels_mask = None
if plane_index == 0:
title_comment = " (smallest scale)"
elif plane_index == len(in_planes) - 2:
title_comment = ' (largest scale)'
elif plane_index == len(in_planes) - 1:
title_comment = ' ("coarse" a.k.a. "residuals")'
else:
title_comment = ""
disp = plot_ctapipe_image(geometry_converter.image_2d_to_1d(plane, CAM_ID),
geom1d,
title='Plane {}{}'.format(plane_index + 1, title_comment),
norm='lin',
highlight_mask=significant_pixels_mask,
plot_axis=False)
```
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import pywicta
import pywicta.data
from pywicta.io import geometry_converter
from pywicta.io.images import image_generator
from pywicta.io.images import plot_ctapipe_image
from pywicta.io.images import plot_hillas_parameters_on_axes
from pywicta.denoising import starlet
from pywicta.denoising.starlet import WaveletTransform
from pywicta.denoising import inverse_transform_sampling
from pywicta.denoising.inverse_transform_sampling import EmpiricalDistribution
print(pywicta.get_version())
#pywicta.data.lst(["lst_gamma_r104_prod3b-north_t1_e1016403_660GeV", "lst_gamma_r104_prod3b-north_t1_e6905_87GeV"])
pywicta.data.lst()
#CAM_ID = "ASTRICam"
#CAM_ID = "CHEC"
#CAM_ID = "DigiCam"
#CAM_ID = "FlashCam"
#CAM_ID = "NectarCam"
CAM_ID = "LSTCam"
IMG_PATH_LIST = pywicta.data.lst(["lst_gamma_r104_prod3b-north_t1_e1016403_660GeV"])
image_gen = image_generator(IMG_PATH_LIST,
cam_filter_list=[CAM_ID],
ctapipe_format=False)
img = next(image_gen)
img.meta
geom1d = geometry_converter.get_geom1d(CAM_ID)
img_input_1d = geometry_converter.image_2d_to_1d(img.input_image, CAM_ID)
disp = plot_ctapipe_image(img_input_1d,
geom1d,
title='Noised image',
norm='lin',
plot_axis=False)
use_noise_distribution = True
if use_noise_distribution:
noise_cdf_file = inverse_transform_sampling.get_cdf_file_path(CAM_ID) # pywicta.denoising.cdf.LSTCAM_CDF_FILE
print(noise_cdf_file)
noise_distribution = EmpiricalDistribution(noise_cdf_file)
else:
noise_distribution = None
type_of_filtering = 'hard_filtering'
filter_thresholds = (3., 0.2)
clusters_threshold = 0.2
last_scale_treatment = 'drop'
detect_only_positive_structures = False
kill_isolated_pixels = True
number_of_scales = len(filter_thresholds) + 1
in_planes = starlet.wavelet_transform(img.input_image,
number_of_scales=number_of_scales,
noise_distribution=noise_distribution)
filtered_in_planes = starlet.filter_planes(in_planes,
method=type_of_filtering,
thresholds=filter_thresholds,
detect_only_positive_structures=detect_only_positive_structures)
for plane_index, (plane, filtered_plane) in enumerate(zip(in_planes, filtered_in_planes)):
if plane_index < len(in_planes) - 1:
significant_pixels_mask = (geometry_converter.image_2d_to_1d(filtered_plane, CAM_ID) != 0)
else:
significant_pixels_mask = None
if plane_index == 0:
title_comment = " (smallest scale)"
elif plane_index == len(in_planes) - 2:
title_comment = ' (largest scale)'
elif plane_index == len(in_planes) - 1:
title_comment = ' ("coarse" a.k.a. "residuals")'
else:
title_comment = ""
disp = plot_ctapipe_image(geometry_converter.image_2d_to_1d(plane, CAM_ID),
geom1d,
title='Plane {}{}'.format(plane_index + 1, title_comment),
norm='lin',
highlight_mask=significant_pixels_mask,
plot_axis=False)
| 0.450601 | 0.946001 |
# Financial Planning with APIs and Simulations
In this Challenge, you’ll create two financial analysis tools by using a single Jupyter notebook:
Part 1: A financial planner for emergencies. The members will be able to use this tool to visualize their current savings. The members can then determine if they have enough reserves for an emergency fund.
Part 2: A financial planner for retirement. This tool will forecast the performance of their retirement portfolio in 30 years. To do this, the tool will make an Alpaca API call via the Alpaca SDK to get historical price data for use in Monte Carlo simulations.
You’ll use the information from the Monte Carlo simulation to answer questions about the portfolio in your Jupyter notebook.
```
# Import the required libraries and dependencies
import os
import requests
import json
import pandas as pd
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
from MCForecastTools import MCSimulation
%matplotlib inline
# Load the environment variables from the .env file
#by calling the load_dotenv function
load_dotenv()
```
## Part 1: Create a Financial Planner for Emergencies
### Evaluate the Cryptocurrency Wallet by Using the Requests Library
In this section, you’ll determine the current value of a member’s cryptocurrency wallet. You’ll collect the current prices for the Bitcoin and Ethereum cryptocurrencies by using the Python Requests library. For the prototype, you’ll assume that the member holds the 1.2 Bitcoins (BTC) and 5.3 Ethereum coins (ETH). To do all this, complete the following steps:
1. Create a variable named `monthly_income`, and set its value to `12000`.
2. Use the Requests library to get the current price (in US dollars) of Bitcoin (BTC) and Ethereum (ETH) by using the API endpoints that the starter code supplies.
3. Navigate the JSON response object to access the current price of each coin, and store each in a variable.
> **Hint** Note the specific identifier for each cryptocurrency in the API JSON response. The Bitcoin identifier is `1`, and the Ethereum identifier is `1027`.
4. Calculate the value, in US dollars, of the current amount of each cryptocurrency and of the entire cryptocurrency wallet.
```
# The current number of coins for each cryptocurrency asset held in the portfolio.
btc_coins = 1.2
eth_coins = 5.3
```
#### Step 1: Create a variable named `monthly_income`, and set its value to `12000`.
```
# The monthly amount for the member's household income
monthly_income = 12000
```
#### Review the endpoint URLs for the API calls to Free Crypto API in order to get the current pricing information for both BTC and ETH.
```
# The Free Crypto API Call endpoint URLs for the held cryptocurrency assets
btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=USD"
eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=USD"
```
#### Step 2. Use the Requests library to get the current price (in US dollars) of Bitcoin (BTC) and Ethereum (ETH) by using the API endpoints that the starter code supplied.
```
# Using the Python requests library, make an API call to access the current price of BTC
btc_response = requests.get(btc_url).json()
# Use the json.dumps function to review the response data from the API call
# Use the indent and sort_keys parameters to make the response object readable
print(json.dumps(btc_response, indent=4, sort_keys=True))
# Using the Python requests library, make an API call to access the current price ETH
eth_response = requests.get(eth_url).json()
# Use the json.dumps function to review the response data from the API call
# Use the indent and sort_keys parameters to make the response object readable
print(json.dumps(eth_response, indent=4, sort_keys=True))
```
#### Step 3: Navigate the JSON response object to access the current price of each coin, and store each in a variable.
```
# Navigate the BTC response object to access the current price of BTC
btc_price = btc_response['data']['1']['quotes']['USD']['price']
# Print the current price of BTC
print(f"BTC Current Price: ${btc_price:,.2f}")
# Navigate the BTC response object to access the current price of ETH
eth_price = eth_response['data']['1027']['quotes']['USD']['price']
# Print the current price of ETH
print(f"ETH Current Price: ${eth_price:,.2f}")
```
### Step 4: Calculate the value, in US dollars, of the current amount of each cryptocurrency and of the entire cryptocurrency wallet.
```
# Compute the current value of the BTC holding
btc_value = btc_coins * btc_price
# Print current value of your holding in BTC
print(f"Current BTC Holding: ${btc_value:,.2f}")
# Compute the current value of the ETH holding
eth_value = eth_coins * eth_price
# Print current value of your holding in ETH
print(f"Current ETH Holding: ${eth_value:,.2f}")
# Compute the total value of the cryptocurrency wallet
# Add the value of the BTC holding to the value of the ETH holding
total_crypto_wallet = btc_value + eth_value
# Print current cryptocurrency wallet balance
print(f"Cryptocurrency Wallet Balance: ${total_crypto_wallet:,.2f}")
```
### Evaluate the Stock and Bond Holdings by Using the Alpaca SDK
In this section, you’ll determine the current value of a member’s stock and bond holdings. You’ll make an API call to Alpaca via the Alpaca SDK to get the current closing prices of the SPDR S&P 500 ETF Trust (ticker: SPY) and of the iShares Core US Aggregate Bond ETF (ticker: AGG). For the prototype, assume that the member holds 110 shares of SPY, which represents the stock portion of their portfolio, and 200 shares of AGG, which represents the bond portion. To do all this, complete the following steps:
1. In the `Starter_Code` folder, create an environment file (`.env`) to store the values of your Alpaca API key and Alpaca secret key.
2. Set the variables for the Alpaca API and secret keys. Using the Alpaca SDK, create the Alpaca `tradeapi.REST` object. In this object, include the parameters for the Alpaca API key, the secret key, and the version number.
3. Set the following parameters for the Alpaca API call:
- `tickers`: Use the tickers for the member’s stock and bond holdings.
- `timeframe`: Use a time frame of one day.
- `start_date` and `end_date`: Use the same date for these parameters, and format them with the date of the previous weekday (or `2020-08-07`). This is because you want the one closing price for the most-recent trading day.
4. Get the current closing prices for `SPY` and `AGG` by using the Alpaca `get_barset` function. Format the response as a Pandas DataFrame by including the `df` property at the end of the `get_barset` function.
5. Navigating the Alpaca response DataFrame, select the `SPY` and `AGG` closing prices, and store them as variables.
6. Calculate the value, in US dollars, of the current amount of shares in each of the stock and bond portions of the portfolio, and print the results.
#### Review the total number of shares held in both (SPY) and (AGG).
```
# Current amount of shares held in both the stock (SPY) and bond (AGG) portion of the portfolio.
spy_shares = 110
agg_shares = 200
```
#### Step 1: In the `Starter_Code` folder, create an environment file (`.env`) to store the values of your Alpaca API key and Alpaca secret key.
#### Step 2: Set the variables for the Alpaca API and secret keys. Using the Alpaca SDK, create the Alpaca `tradeapi.REST` object. In this object, include the parameters for the Alpaca API key, the secret key, and the version number.
```
# Set the variables for the Alpaca API and secret keys
alpaca_api_key = os.getenv('ALPACA_API_KEY')
alpaca_secret_key = os.getenv('ALPACA_SECRET_KEY')
# Create the Alpaca tradeapi.REST object
alpaca = tradeapi.REST(
alpaca_api_key,
alpaca_secret_key,
api_version='v2'
)
#prining keys
print(f"Alpaca Key type: {type(alpaca_api_key)}")
print(f"Alpaca Secret Key type: {type(alpaca_secret_key)}")
```
#### Step 3: Set the following parameters for the Alpaca API call:
- `tickers`: Use the tickers for the member’s stock and bond holdings.
- `timeframe`: Use a time frame of one day.
- `start_date` and `end_date`: Use the same date for these parameters, and format them with the date of the previous weekday (or `2020-08-07`). This is because you want the one closing price for the most-recent trading day.
```
# Set the tickers for both the bond and stock portion of the portfolio
tickers = ['SPY','AGG']
# Set timeframe to 1D
timeframe = "1D"
# Format current date as ISO format
# Set both the start and end date at the date of your prior weekday
# This will give you the closing price of the previous trading day
# Alternatively you can use a start and end date of 2020-08-07
start_date = pd.Timestamp("2021-12-31", tz="America/New_York").isoformat()
end_date = pd.Timestamp("2021-12-31", tz="America/New_York").isoformat()
```
#### Step 4: Get the current closing prices for `SPY` and `AGG` by using the Alpaca `get_barset` function. Format the response as a Pandas DataFrame by including the `df` property at the end of the `get_barset` function.
```
# Use the Alpaca get_barset function to get current closing prices the portfolio
# Be sure to set the `df` property after the function to format the response object as a DataFrame
df_portfolio = alpaca.get_barset(
tickers,
timeframe,
start = start_date,
end = end_date
).df
# Review the first 5 rows of the Alpaca DataFrame
df_portfolio.head()
```
#### Step 5: Navigating the Alpaca response DataFrame, select the `SPY` and `AGG` closing prices, and store them as variables.
```
# Access the closing price for AGG from the Alpaca DataFrame
# Converting the value to a floating point number
agg_close_price = df_portfolio['AGG']['close'][0]
# Print the AGG closing price
print(f"AGG Closing Price for 2022-01-06: ${agg_close_price:,.2f}")
# Access the closing price for SPY from the Alpaca DataFrame
# Converting the value to a floating point number
spy_close_price = df_portfolio['SPY']['close'][0]
# Print the SPY closing price
print(f"SPY Closing Price for 2022-01-06: ${spy_close_price:,.2f}")
```
#### Step 6: Calculate the value, in US dollars, of the current amount of shares in each of the stock and bond portions of the portfolio, and print the results.
```
# Calculate the current value of the bond portion of the portfolio
agg_value = agg_close_price * agg_shares
# Print the current value of the bond portfolio
print(f"Bond Portfolio Value: ${agg_value:,.2f}")
# Calculate the current value of the stock portion of the portfolio
spy_value = spy_close_price * spy_shares
# Print the current value of the stock portfolio
print(f"Bond Portfolio Value: ${spy_value:,.2f}")
# Calculate the total value of the stock and bond portion of the portfolio
total_stocks_bonds = agg_value + spy_value
# Print the current balance of the stock and bond portion of the portfolio
print(f"Combined Stock & Bond Portfolio "
f"Value: ${total_stocks_bonds:,.2f}")
# Calculate the total value of the member's entire savings portfolio
# Add the value of the cryptocurrency walled to the value of the total stocks and bonds
total_portfolio = total_crypto_wallet + total_stocks_bonds
# Print current cryptocurrency wallet balance
print(f"Total Assets: ${total_portfolio:,.2f}")
```
### Evaluate the Emergency Fund
In this section, you’ll use the valuations for the cryptocurrency wallet and for the stock and bond portions of the portfolio to determine if the credit union member has enough savings to build an emergency fund into their financial plan. To do this, complete the following steps:
1. Create a Python list named `savings_data` that has two elements. The first element contains the total value of the cryptocurrency wallet. The second element contains the total value of the stock and bond portions of the portfolio.
2. Use the `savings_data` list to create a Pandas DataFrame named `savings_df`, and then display this DataFrame. The function to create the DataFrame should take the following three parameters:
- `savings_data`: Use the list that you just created.
- `columns`: Set this parameter equal to a Python list with a single value called `amount`.
- `index`: Set this parameter equal to a Python list with the values of `crypto` and `stock/bond`.
3. Use the `savings_df` DataFrame to plot a pie chart that visualizes the composition of the member’s portfolio. The y-axis of the pie chart uses `amount`. Be sure to add a title.
4. Using Python, determine if the current portfolio has enough to create an emergency fund as part of the member’s financial plan. Ideally, an emergency fund should equal to three times the member’s monthly income. To do this, implement the following steps:
1. Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of $12000. (You set this earlier in Part 1).
2. Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio:
1. If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund.
2. Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal.
3. Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.)
#### Step 1: Create a Python list named `savings_data` that has two elements. The first element contains the total value of the cryptocurrency wallet. The second element contains the total value of the stock and bond portions of the portfolio.
```
# Consolidate financial assets data into a Python list
savings_data = [total_crypto_wallet, total_stocks_bonds]
# Review the Python list savings_data
savings_data
```
#### Step 2: Use the `savings_data` list to create a Pandas DataFrame named `savings_df`, and then display this DataFrame. The function to create the DataFrame should take the following three parameters:
- `savings_data`: Use the list that you just created.
- `columns`: Set this parameter equal to a Python list with a single value called `amount`.
- `index`: Set this parameter equal to a Python list with the values of `crypto` and `stock/bond`.
```
# Create a Pandas DataFrame called savings_df
savings_df = pd.DataFrame(
savings_data,
columns=['amount'],
index = ['crypto' , "Stock/Bond"]
)
# Display the savings_df DataFrame
savings_df
```
#### Step 3: Use the `savings_df` DataFrame to plot a pie chart that visualizes the composition of the member’s portfolio. The y-axis of the pie chart uses `amount`. Be sure to add a title.
```
# Plot the total value of the member's portfolio (crypto and stock/bond) in a pie chart
savings_df.plot.pie(
y="amount",
figsize=(8,12),
title="Composition of Member's Portfolio"
);
```
#### Step 4: Using Python, determine if the current portfolio has enough to create an emergency fund as part of the member’s financial plan. Ideally, an emergency fund should equal to three times the member’s monthly income. To do this, implement the following steps:
Step 1. Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of 12000. (You set this earlier in Part 1).
Step 2. Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio:
* If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund.
* Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal.
* Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.)
##### Step 4-1: Create a variable named `emergency_fund_value`, and set it equal to three times the value of the member’s `monthly_income` of 12000. (You set this earlier in Part 1).
```
# Create a variable named emergency_fund_value
emergency_fund_value = monthly_income * 3
```
##### Step 4-2: Create a series of three if statements to determine if the member’s total portfolio is large enough to fund the emergency portfolio:
* If the total portfolio value is greater than the emergency fund value, display a message congratulating the member for having enough money in this fund.
* Else if the total portfolio value is equal to the emergency fund value, display a message congratulating the member on reaching this important financial goal.
* Else the total portfolio is less than the emergency fund value, so display a message showing how many dollars away the member is from reaching the goal. (Subtract the total portfolio value from the emergency fund value.)
```
# Evaluate the possibility of creating an emergency fund with 3 conditions:
if total_portfolio > emergency_fund_value:
print("Congratulations, your portfolio is able to fund your emergency account.")
elif total_portfolio == emergency_fund_value:
print("Congratulations, your total portfolio equals the amount need to fund your emergency account.")
else:
shortfall = emergency_fund_value - total_portfolio
print(f"You are $ {shortfall:,.2f} away from reaching your goal.")
```
## Part 2: Create a Financial Planner for Retirement
### Create the Monte Carlo Simulation
In this section, you’ll use the MCForecastTools library to create a Monte Carlo simulation for the member’s savings portfolio. To do this, complete the following steps:
1. Make an API call via the Alpaca SDK to get 3 years of historical closing prices for a traditional 60/40 portfolio split: 60% stocks (SPY) and 40% bonds (AGG).
2. Run a Monte Carlo simulation of 500 samples and 30 years for the 60/40 portfolio, and then plot the results.The following image shows the overlay line plot resulting from a simulation with these characteristics. However, because a random number generator is used to run each live Monte Carlo simulation, your image will differ slightly from this exact image:

3. Plot the probability distribution of the Monte Carlo simulation. Plot the probability distribution of the Monte Carlo simulation. The following image shows the histogram plot resulting from a simulation with these characteristics. However, because a random number generator is used to run each live Monte Carlo simulation, your image will differ slightly from this exact image:

4. Generate the summary statistics for the Monte Carlo simulation.
#### Step 1: Make an API call via the Alpaca SDK to get 3 years of historical closing prices for a traditional 60/40 portfolio split: 60% stocks (SPY) and 40% bonds (AGG).
```
# Set start and end dates of 3 years back from your current date
# Alternatively, you can use an end date of 2020-08-07 and work 3 years back from that date
start_date = pd.Timestamp('2018-12-31', tz='America/New_York').isoformat()
end_date = pd.Timestamp('2021-12-31', tz='America/New_York').isoformat()
# Set number of rows to 1000 to retrieve the maximum amount of rows
limit_rows = 1000
# Use the Alpaca get_barset function to make the API call to get the 3 years worth of pricing data
# The tickers and timeframe parameters should have been set in Part 1 of this activity
# The start and end dates should be updated with the information set above
# Remember to add the df property to the end of the call so the response is returned as a DataFrame
df_three_years = alpaca.get_barset(
tickers,
timeframe,
start=start_date,
end=end_date,
limit=limit_rows
).df
# Setting the date element as index
df_three_years.index = df_three_years.index.date
# Display both the first and last five rows of the DataFrame
display(df_three_years.head())
display(df_three_years.tail())
```
#### Step 2: Run a Monte Carlo simulation of 500 samples and 30 years for the 60/40 portfolio, and then plot the results.
```
# Configure the Monte Carlo simulation to forecast 30 years cumulative returns
# The weights should be split 40% to AGG and 60% to SPY.
# Run 500 samples.
MCS_30 = MCSimulation(
portfolio_data= df_three_years,
weights= [0.4, 0.6],
num_simulation= 500,
num_trading_days= 252 * 30
)
# Review the simulation input data
MCS_30.portfolio_data.head()
# Run the Monte Carlo simulation to forecast 30 years cumulative returns
MCS_30.calc_cumulative_return()
# Visualize the 30-year Monte Carlo simulation by creating an
# overlay line plot
MCS_30.plot_simulation();
```
#### Step 3: Plot the probability distribution of the Monte Carlo simulation.
```
# Visualize the probability distribution of the 30-year Monte Carlo simulation
# by plotting a histogram
MCS_30.plot_distribution();
```
#### Step 4: Generate the summary statistics for the Monte Carlo simulation.
```
# Generate summary statistics from the 30-year Monte Carlo simulation results
# Save the results as a variable
MC_summary_statistics = MCS_30.summarize_cumulative_return()
# Review the 30-year Monte Carlo summary statistics
print(MC_summary_statistics)
```
### Analyze the Retirement Portfolio Forecasts
Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the Monte Carlo simulation, answer the following question in your Jupyter notebook:
- What are the lower and upper bounds for the expected value of the portfolio with a 95% confidence interval?
```
# Print the current balance of the stock and bond portion of the members portfolio
print(f"Current Stock & Bond Portfolio value: ${total_stocks_bonds:,.2f}")
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio
ci_lower_thirty_cumulative_return = MC_summary_statistics[8] * total_stocks_bonds
ci_upper_thirty_cumulative_return = MC_summary_statistics[9] * total_stocks_bonds
# Print the result of your calculations
print(f"There is a 95% chance that an initial investment of ${total_stocks_bonds:,.2f} in the stocks and bonds portfolio,"
f" will be worth between ${ci_lower_thirty_cumulative_return: .2f} and ${ci_upper_thirty_cumulative_return: .2f} over the period of thirty years.")
```
### Forecast Cumulative Returns in 10 Years
The CTO of the credit union is impressed with your work on these planning tools but wonders if 30 years is a long time to wait until retirement. So, your next task is to adjust the retirement portfolio and run a new Monte Carlo simulation to find out if the changes will allow members to retire earlier.
For this new Monte Carlo simulation, do the following:
- Forecast the cumulative returns for 10 years from now. Because of the shortened investment horizon (30 years to 10 years), the portfolio needs to invest more heavily in the riskier asset—that is, stock—to help accumulate wealth for retirement.
- Adjust the weights of the retirement portfolio so that the composition for the Monte Carlo simulation consists of 20% bonds and 80% stocks.
- Run the simulation over 500 samples, and use the same data that the API call to Alpaca generated.
- Based on the new Monte Carlo simulation, answer the following questions in your Jupyter notebook:
- Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the new Monte Carlo simulation, what are the lower and upper bounds for the expected value of the portfolio (with the new weights) with a 95% confidence interval?
- Will weighting the portfolio more heavily toward stocks allow the credit union members to retire after only 10 years?
```
# Configure a Monte Carlo simulation to forecast 10 years cumulative returns
# The weights should be split 20% to AGG and 80% to SPY.
# Run 500 samples.
MCS_10 = MCSimulation(
portfolio_data= df_three_years,
weights= [0.2, 0.8],
num_simulation= 500,
num_trading_days= 252 * 10
)
# Review the simulation input data
MCS_10.portfolio_data.head()
# Run the Monte Carlo simulation to forecast 10 years cumulative returns
MCS_10.calc_cumulative_return()
# Visualize the 10-year Monte Carlo simulation by creating an
# overlay line plot
MCS_10.plot_simulation();
# Visualize the probability distribution of the 10-year Monte Carlo simulation
# by plotting a histogram
MCS_30.plot_distribution();
# Generate summary statistics from the 10-year Monte Carlo simulation results
# Save the results as a variable
MC_summary_statistics_10 = MCS_10.summarize_cumulative_return()
# Review the 10-year Monte Carlo summary statistics
print(MC_summary_statistics_10)
```
### Answer the following questions:
#### Question: Using the current value of only the stock and bond portion of the member's portfolio and the summary statistics that you generated from the new Monte Carlo simulation, what are the lower and upper bounds for the expected value of the portfolio (with the new weights) with a 95% confidence interval?
```
# Print the current balance of the stock and bond portion of the members portfolio
print(f"Current Stock & Bond Portfolio value: ${total_stocks_bonds:,.2f}")
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio
ci_lower_ten_cumulative_return = MC_summary_statistics_10[8] * total_stocks_bonds
ci_upper_ten_cumulative_return = MC_summary_statistics_10[9] * total_stocks_bonds
# Print the result of your calculations
print(f"There is a 95% chance that an initial investment of ${total_stocks_bonds:,.2f} in the stocks and bonds portfolio,"
f" will be worth between ${ci_lower_ten_cumulative_return: .2f} and ${ci_upper_ten_cumulative_return: .2f} over the period of ten years.")
```
#### Question: Will weighting the portfolio more heavily to stocks allow the credit union members to retire after only 10 years?
**Answer** It is difficult to comment as not much information is available to us. But based on the above analyis, since the upper end of 10years analysis is not more than the lower end of 30 years analysis, it is not recommendable to go for retirement option after 10years. It seems likely to go for retirement only after 30years. Increasing the weightage of stock, seems to have not a great beneficial results to have significant impact on the decision. But nothing is guaranteed, so we can only estimate and move towards a more likely situation whcih is retirement after 30years plan.
|
github_jupyter
|
# Import the required libraries and dependencies
import os
import requests
import json
import pandas as pd
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
from MCForecastTools import MCSimulation
%matplotlib inline
# Load the environment variables from the .env file
#by calling the load_dotenv function
load_dotenv()
# The current number of coins for each cryptocurrency asset held in the portfolio.
btc_coins = 1.2
eth_coins = 5.3
# The monthly amount for the member's household income
monthly_income = 12000
# The Free Crypto API Call endpoint URLs for the held cryptocurrency assets
btc_url = "https://api.alternative.me/v2/ticker/Bitcoin/?convert=USD"
eth_url = "https://api.alternative.me/v2/ticker/Ethereum/?convert=USD"
# Using the Python requests library, make an API call to access the current price of BTC
btc_response = requests.get(btc_url).json()
# Use the json.dumps function to review the response data from the API call
# Use the indent and sort_keys parameters to make the response object readable
print(json.dumps(btc_response, indent=4, sort_keys=True))
# Using the Python requests library, make an API call to access the current price ETH
eth_response = requests.get(eth_url).json()
# Use the json.dumps function to review the response data from the API call
# Use the indent and sort_keys parameters to make the response object readable
print(json.dumps(eth_response, indent=4, sort_keys=True))
# Navigate the BTC response object to access the current price of BTC
btc_price = btc_response['data']['1']['quotes']['USD']['price']
# Print the current price of BTC
print(f"BTC Current Price: ${btc_price:,.2f}")
# Navigate the BTC response object to access the current price of ETH
eth_price = eth_response['data']['1027']['quotes']['USD']['price']
# Print the current price of ETH
print(f"ETH Current Price: ${eth_price:,.2f}")
# Compute the current value of the BTC holding
btc_value = btc_coins * btc_price
# Print current value of your holding in BTC
print(f"Current BTC Holding: ${btc_value:,.2f}")
# Compute the current value of the ETH holding
eth_value = eth_coins * eth_price
# Print current value of your holding in ETH
print(f"Current ETH Holding: ${eth_value:,.2f}")
# Compute the total value of the cryptocurrency wallet
# Add the value of the BTC holding to the value of the ETH holding
total_crypto_wallet = btc_value + eth_value
# Print current cryptocurrency wallet balance
print(f"Cryptocurrency Wallet Balance: ${total_crypto_wallet:,.2f}")
# Current amount of shares held in both the stock (SPY) and bond (AGG) portion of the portfolio.
spy_shares = 110
agg_shares = 200
# Set the variables for the Alpaca API and secret keys
alpaca_api_key = os.getenv('ALPACA_API_KEY')
alpaca_secret_key = os.getenv('ALPACA_SECRET_KEY')
# Create the Alpaca tradeapi.REST object
alpaca = tradeapi.REST(
alpaca_api_key,
alpaca_secret_key,
api_version='v2'
)
#prining keys
print(f"Alpaca Key type: {type(alpaca_api_key)}")
print(f"Alpaca Secret Key type: {type(alpaca_secret_key)}")
# Set the tickers for both the bond and stock portion of the portfolio
tickers = ['SPY','AGG']
# Set timeframe to 1D
timeframe = "1D"
# Format current date as ISO format
# Set both the start and end date at the date of your prior weekday
# This will give you the closing price of the previous trading day
# Alternatively you can use a start and end date of 2020-08-07
start_date = pd.Timestamp("2021-12-31", tz="America/New_York").isoformat()
end_date = pd.Timestamp("2021-12-31", tz="America/New_York").isoformat()
# Use the Alpaca get_barset function to get current closing prices the portfolio
# Be sure to set the `df` property after the function to format the response object as a DataFrame
df_portfolio = alpaca.get_barset(
tickers,
timeframe,
start = start_date,
end = end_date
).df
# Review the first 5 rows of the Alpaca DataFrame
df_portfolio.head()
# Access the closing price for AGG from the Alpaca DataFrame
# Converting the value to a floating point number
agg_close_price = df_portfolio['AGG']['close'][0]
# Print the AGG closing price
print(f"AGG Closing Price for 2022-01-06: ${agg_close_price:,.2f}")
# Access the closing price for SPY from the Alpaca DataFrame
# Converting the value to a floating point number
spy_close_price = df_portfolio['SPY']['close'][0]
# Print the SPY closing price
print(f"SPY Closing Price for 2022-01-06: ${spy_close_price:,.2f}")
# Calculate the current value of the bond portion of the portfolio
agg_value = agg_close_price * agg_shares
# Print the current value of the bond portfolio
print(f"Bond Portfolio Value: ${agg_value:,.2f}")
# Calculate the current value of the stock portion of the portfolio
spy_value = spy_close_price * spy_shares
# Print the current value of the stock portfolio
print(f"Bond Portfolio Value: ${spy_value:,.2f}")
# Calculate the total value of the stock and bond portion of the portfolio
total_stocks_bonds = agg_value + spy_value
# Print the current balance of the stock and bond portion of the portfolio
print(f"Combined Stock & Bond Portfolio "
f"Value: ${total_stocks_bonds:,.2f}")
# Calculate the total value of the member's entire savings portfolio
# Add the value of the cryptocurrency walled to the value of the total stocks and bonds
total_portfolio = total_crypto_wallet + total_stocks_bonds
# Print current cryptocurrency wallet balance
print(f"Total Assets: ${total_portfolio:,.2f}")
# Consolidate financial assets data into a Python list
savings_data = [total_crypto_wallet, total_stocks_bonds]
# Review the Python list savings_data
savings_data
# Create a Pandas DataFrame called savings_df
savings_df = pd.DataFrame(
savings_data,
columns=['amount'],
index = ['crypto' , "Stock/Bond"]
)
# Display the savings_df DataFrame
savings_df
# Plot the total value of the member's portfolio (crypto and stock/bond) in a pie chart
savings_df.plot.pie(
y="amount",
figsize=(8,12),
title="Composition of Member's Portfolio"
);
# Create a variable named emergency_fund_value
emergency_fund_value = monthly_income * 3
# Evaluate the possibility of creating an emergency fund with 3 conditions:
if total_portfolio > emergency_fund_value:
print("Congratulations, your portfolio is able to fund your emergency account.")
elif total_portfolio == emergency_fund_value:
print("Congratulations, your total portfolio equals the amount need to fund your emergency account.")
else:
shortfall = emergency_fund_value - total_portfolio
print(f"You are $ {shortfall:,.2f} away from reaching your goal.")
# Set start and end dates of 3 years back from your current date
# Alternatively, you can use an end date of 2020-08-07 and work 3 years back from that date
start_date = pd.Timestamp('2018-12-31', tz='America/New_York').isoformat()
end_date = pd.Timestamp('2021-12-31', tz='America/New_York').isoformat()
# Set number of rows to 1000 to retrieve the maximum amount of rows
limit_rows = 1000
# Use the Alpaca get_barset function to make the API call to get the 3 years worth of pricing data
# The tickers and timeframe parameters should have been set in Part 1 of this activity
# The start and end dates should be updated with the information set above
# Remember to add the df property to the end of the call so the response is returned as a DataFrame
df_three_years = alpaca.get_barset(
tickers,
timeframe,
start=start_date,
end=end_date,
limit=limit_rows
).df
# Setting the date element as index
df_three_years.index = df_three_years.index.date
# Display both the first and last five rows of the DataFrame
display(df_three_years.head())
display(df_three_years.tail())
# Configure the Monte Carlo simulation to forecast 30 years cumulative returns
# The weights should be split 40% to AGG and 60% to SPY.
# Run 500 samples.
MCS_30 = MCSimulation(
portfolio_data= df_three_years,
weights= [0.4, 0.6],
num_simulation= 500,
num_trading_days= 252 * 30
)
# Review the simulation input data
MCS_30.portfolio_data.head()
# Run the Monte Carlo simulation to forecast 30 years cumulative returns
MCS_30.calc_cumulative_return()
# Visualize the 30-year Monte Carlo simulation by creating an
# overlay line plot
MCS_30.plot_simulation();
# Visualize the probability distribution of the 30-year Monte Carlo simulation
# by plotting a histogram
MCS_30.plot_distribution();
# Generate summary statistics from the 30-year Monte Carlo simulation results
# Save the results as a variable
MC_summary_statistics = MCS_30.summarize_cumulative_return()
# Review the 30-year Monte Carlo summary statistics
print(MC_summary_statistics)
# Print the current balance of the stock and bond portion of the members portfolio
print(f"Current Stock & Bond Portfolio value: ${total_stocks_bonds:,.2f}")
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio
ci_lower_thirty_cumulative_return = MC_summary_statistics[8] * total_stocks_bonds
ci_upper_thirty_cumulative_return = MC_summary_statistics[9] * total_stocks_bonds
# Print the result of your calculations
print(f"There is a 95% chance that an initial investment of ${total_stocks_bonds:,.2f} in the stocks and bonds portfolio,"
f" will be worth between ${ci_lower_thirty_cumulative_return: .2f} and ${ci_upper_thirty_cumulative_return: .2f} over the period of thirty years.")
# Configure a Monte Carlo simulation to forecast 10 years cumulative returns
# The weights should be split 20% to AGG and 80% to SPY.
# Run 500 samples.
MCS_10 = MCSimulation(
portfolio_data= df_three_years,
weights= [0.2, 0.8],
num_simulation= 500,
num_trading_days= 252 * 10
)
# Review the simulation input data
MCS_10.portfolio_data.head()
# Run the Monte Carlo simulation to forecast 10 years cumulative returns
MCS_10.calc_cumulative_return()
# Visualize the 10-year Monte Carlo simulation by creating an
# overlay line plot
MCS_10.plot_simulation();
# Visualize the probability distribution of the 10-year Monte Carlo simulation
# by plotting a histogram
MCS_30.plot_distribution();
# Generate summary statistics from the 10-year Monte Carlo simulation results
# Save the results as a variable
MC_summary_statistics_10 = MCS_10.summarize_cumulative_return()
# Review the 10-year Monte Carlo summary statistics
print(MC_summary_statistics_10)
# Print the current balance of the stock and bond portion of the members portfolio
print(f"Current Stock & Bond Portfolio value: ${total_stocks_bonds:,.2f}")
# Use the lower and upper `95%` confidence intervals to calculate the range of the possible outcomes for the current stock/bond portfolio
ci_lower_ten_cumulative_return = MC_summary_statistics_10[8] * total_stocks_bonds
ci_upper_ten_cumulative_return = MC_summary_statistics_10[9] * total_stocks_bonds
# Print the result of your calculations
print(f"There is a 95% chance that an initial investment of ${total_stocks_bonds:,.2f} in the stocks and bonds portfolio,"
f" will be worth between ${ci_lower_ten_cumulative_return: .2f} and ${ci_upper_ten_cumulative_return: .2f} over the period of ten years.")
| 0.67694 | 0.98985 |
<a href="https://colab.research.google.com/github/EricAlcarazdelPico/deep-learning-with-python-personal-workflow/blob/main/binary_classification_i.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Classifying movie reviews: The IMDB dataset
```
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
# load data
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
train_data
train_data.shape
max([max(sequence) for sequence in train_data])
train_labels[0]
train_labels.shape
```
### Preparing the data
**Encoding the integer sequences via multi-hot encoding**
```
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
results[i, j] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
x_train[0]
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
```
### Building your model
**Model definition**
```
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
```
**Compiling the model**
```
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
```
### Validating your approach
**Shuffling data and setting aside a 10000 size validation set**
```
index_shuff = np.arange(len(x_train))
np.random.shuffle(index_shuff)
x_train = x_train[index_shuff] #creates new axis of length 1
x_train = np.squeeze(x_train)
y_train = y_train[index_shuff]
y_train = np.squeeze(y_train)
validation_size = 10000
x_train_shuff = x_train[validation_size:]
y_train_suff = y_train[validation_size:]
x_valid_shuff = x_train[:validation_size]
y_valid_shuff = y_train[:validation_size]
```
**Training your model**
```
history = model.fit(x_train_shuff,
y_train_suff,
epochs=20,
batch_size=512,
validation_data=(x_valid_shuff, y_valid_shuff))
```
**Plotting the training and validation loss**
```
history_dict = history.history
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "bo", label="Training loss")
plt.plot(epochs, val_loss_values, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
```
**Plotting the training and validation accuracy**
```
plt.clf()
acc = history_dict["accuracy"]
val_acc = history_dict["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
```
**Retraining a model from scratch**
```
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.fit(x_train, y_train, epochs=3, batch_size=512)
results = model.evaluate(x_test, y_test)
results
```
### Generate predictions on test data
```
model.predict(x_test)
```
**Decoding reviews back to text**
```
word_index = imdb.get_word_index()
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
decoded_review = " ".join(
[reverse_word_index.get(i - 3, "?") for i in train_data[0]]) # -3 for special charecters
decoded_review
train_labels[0]
```
|
github_jupyter
|
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
# load data
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
train_data
train_data.shape
max([max(sequence) for sequence in train_data])
train_labels[0]
train_labels.shape
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
results[i, j] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
x_train[0]
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
index_shuff = np.arange(len(x_train))
np.random.shuffle(index_shuff)
x_train = x_train[index_shuff] #creates new axis of length 1
x_train = np.squeeze(x_train)
y_train = y_train[index_shuff]
y_train = np.squeeze(y_train)
validation_size = 10000
x_train_shuff = x_train[validation_size:]
y_train_suff = y_train[validation_size:]
x_valid_shuff = x_train[:validation_size]
y_valid_shuff = y_train[:validation_size]
history = model.fit(x_train_shuff,
y_train_suff,
epochs=20,
batch_size=512,
validation_data=(x_valid_shuff, y_valid_shuff))
history_dict = history.history
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "bo", label="Training loss")
plt.plot(epochs, val_loss_values, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.clf()
acc = history_dict["accuracy"]
val_acc = history_dict["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.fit(x_train, y_train, epochs=3, batch_size=512)
results = model.evaluate(x_test, y_test)
results
model.predict(x_test)
word_index = imdb.get_word_index()
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
decoded_review = " ".join(
[reverse_word_index.get(i - 3, "?") for i in train_data[0]]) # -3 for special charecters
decoded_review
train_labels[0]
| 0.901783 | 0.965932 |
### MDS - Multi-Dimensional Scaling
Non Linear dimensionality reduction methods assume that the latent variables are given by a non linear mapping of observed variables. Non linear methods of dimensionality reduction are very popular. They are much more powerful than linear ones because the relationship between latent and observed variables may be more richer than a simple matrix multiplication. These mappings are function of large number of variables and require large dataset samples and computing power. For example MDS(multidimensional scaling), ISOMAP(isometric mapping), LLE(locally linear embedding) e.t.c.
MDS means multidimensional scaling. It is not a single method but a family of methods. MDS takes a dissimilarity matrix $D$ where $D_{ij}$ represents the dissimialrity between points $i$ and $j$ and produces a mapping on lower dimension preserving the disimilarities as closely as possible. The disimilairty matrix could be observed or calculated from the given dataset. MDS has been widely popular and developed in field of human sciences like sociology, anthropology and especially in pshycometrics.
Let's understand it better with an example. Table below represents correlations between rates of different types of crimes from US in 1970. Even for a small number of variables we are unable to understand the structure of the correlation.
$\begin{array}{r|lcr}
\text{crime} & \text{Murder} & \text{Rape} & \text{Robbery} & \text{Assault} & \text{Burglary} & \text{Larceny} & \text{Auto Theft}\\
\hline
\text{Murder} & 1.00 & 0.52 & 0.34 & 0.81 & 0.28 & 0.06 & 0.11\\
\text{Rape} & 0.52 & 1.00 & 0.55 & 0.70 & 0.68 & 0.60 & 0.44\\
\text{Robbery} & 0.34 & 0.55 & 1.00 & 0.56 & 0.62 & 0.44 & 0.62\\
\text{Assault} & 0.81 & 0.70 & 0.56 & 1.00 & 0.52 & 0.32 & 0.33\\
\text{Burglary} & 0.28 & 0.68 & 0.62 & 0.52 & 1.00 & 0.80 & 0.70\\
\text{Larceny} & 0.06 & 0.60 & 0.44 & 0.32 & 0.80 & 1.00 & 0.55\\
\text{Auto Theft} & 0.11 & 0.44 & 0.62 & 0.33 & 0.70 & 0.55 & 1.00\\
\end{array}
$
Plot below represents the 2-D mappings created from the MDS.

The relative position of points on the plot depends on the disimilairality between them in the table of correlations, i.e. crime rates which share high correlation are mapped close to each other while crime rates which do not share high correlation are mapped far. From the figure we can see that along horizontal dimension crime distribution can be interpeted as "violent crime vs property crime" whereas on vertical dimension distribution of crime can be interpeted as the "street crime vs hidden crime".
MDS can be divided into categories-
**Metric MDS** - Metric MDS is used for the quantitative data and tries to preserve the original dissimilarity metrics. Given a dissimialrity matrix $D$, a monotone function f, and p(number of dimension in subspace) metric MDS tries to find an optimal configuration $X \subset R^p\; s.t.\;\;\;$ $f(D_{ij})\approx d_{ij}=(x_i - x_j)^2$ . Another version of metric MDS is classical MDS(original MDS) formulation which provides closed form solution. Intead of trying to approximate the dissimilarity metrics in lower dimension, it uses eigenvalue decomposition for solution.
**Non-Metric MDS** - Non-metric MDS is used for ordinal data. It tries to keep the order of dissimialrity metrics intact. For example if $P_{ij}$ is dissimilarity between $i_{th}$ & $j_{th}$ and $P_{32}$ > $P_{24}$, then non-metric mds creates a mapping $s.t. \;\;d_{32} > d_{24}$
We will implement metric MDS using SMACOF( scaling by majorization of complicated function) algorithm. Before diving into implementation of metric MDS, we need to learn a bit about MM( Majorization- Minorization) algortihm for optimization.
**MM for finding an optima of a function**
MM algorith is an iterative algorithm for finding optimum of a complex function.
Suppose we have a function $f(x)$ for which we need to find a minimum. Instead of directly optimizing $f(x)$ MM uses an approximating function $g(x,x_m)$ to find an optimum. If problem is to find a minimum of $f(x)$ then, $g(x,x_m)$ is called majorizing function else minorizing function and $x_m$ is called support point.
If $g(x,x_m)$ is majorizing function of $f(x)$ then, it has to satisfy following conditions
1. Optimizing $g(x,x_m)$ should be easier than $f(x)$.
2. For any $x$, $\;f(x) \; \le \;g(x,x_m)$
3. $f(x_m) = g(x_m,x_m)$
Steps of MM algorithm
1. choose a random support point $x_m$
2. find $x_{min}$ = $\arg\min_x {g(x,x_m)}$
3. if $f(x_{min}) - f(x_m) \approx \epsilon$ where $\epsilon$ is a very small number else go to step 4
4. set $x_m = x_{min}$ and go to step 2
We will understand these steps better with the help of widget below.
```
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize as opt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from matplotlib.pyplot import cm
x = np.array(np.arange(100))/10
y = np.abs(x-1)+np.abs(x-4)+np.abs(x-3)+np.abs(x-7)+np.abs(x-8)
pivot = 9
g_vals=[]
ax = []
ay = []
ax_ = 0
ay_ = 0
num_iter = 0
minima = 0
fun_minima = 0
g_x=0
plt.rcParams["figure.figsize"] = [12,6]
colors = cm.Reds(range(500))
def g(x):
g1_ = (((x-1)**2)/(np.abs(pivot-1)) + ((x-4)**2)/(np.abs(pivot-4)) +((x-3)**2)/(np.abs(pivot-3)) +
((x-7)**2)/(np.abs(pivot-7)) + ((x-8)**2)/(np.abs(pivot-8)) )
g2_ = np.abs(pivot-1) + np.abs(pivot-4) + np.abs(pivot-3) + np.abs(pivot-7) + np.abs(pivot-8)
g_ = 0.5*(g1_ + g2_)
return (g_)
def calc(j):
global pivot, ax, ay, g_vals,ax_, minima, fun_min, ay_,g_x
for i in range(j):
min_ = opt.minimize(g,x0=0)
minima = min_.x[0]
fun_min = min_.fun
g_x = g(x)
ay_ = g(pivot)
g_vals = g_vals + [g_x]
ax_ = pivot
pivot = minima
ax = ax + [ax_]
ay = ay + [ay_]
def plotter():
plt.plot(x,y,"g")
plt.ylim(10,25)
z = int(500/((len(g_vals)+1)))
cmap = colors[0::z]
l_width = 1
for gx,c,x_pivot,y_pivot in zip(g_vals,cmap[0:-1], ax, ay):
plt.plot(x,gx,c=c, lw =l_width, ls="dashed")
plt.plot(x_pivot, y_pivot,"ro")
plt.plot([x_pivot,x_pivot],[y_pivot,0])
plt.plot(x, g_x, c=cmap[-1], lw=l_width+1)
txt1 = "pivot=("+str("{0:.2f}".format(ax_))+","+str("{0:.2f}".format(ay_))+")"
plt.annotate(txt1, xy= (ax_, ay_), xycoords="data", xytext=[ax_-2, ay_+2],
arrowprops=dict(arrowstyle="->",connectionstyle="arc3"),
bbox=dict(boxstyle="round", fc="w"), family="monospace",
style="normal", size="xx-large")
txt1 = "minima=("+str("{0:.2f}".format(minima))+","+str("{0:.2f}".format(fun_min))+")"
plt.annotate(txt1, xy= (minima, fun_min), xycoords="data", xytext=[minima-5, fun_min+2],
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),bbox=dict(boxstyle="round", fc="w"), family="monospace",
style="normal",size="xx-large")
plt.plot(ax_, ax_,"ro")
plt.plot(minima, fun_min,"go")
plt.plot([ax_,ax_],[ay_,0])
plt.plot([minima, minima],[fun_min,0])
plt.text(0,24,"Iterations = "+str(num_iter+1), horizontalalignment="left", verticalalignment="top",
size="xx-large", family="monospace", bbox=dict(boxstyle="round", fc="w"))
def plot(t):
global num_iter
global num_iter
if t>num_iter:
calc(t-num_iter)
plotter()
num_iter=t
elif t<num_iter:
reset()
calc(t)
plotter()
num_iter = t
def reset():
global pivot, g_vals, ax, ay, ax_, ay_, minima, fun_min, num_iter
pivot = 9
g_vals=[]
ax = []
ay = []
ax_ = 0
ay_ = 0
g_x = 0
num_iter = 0
minima = 0
fun_min = 0
interact(plot,t = widgets.IntSlider(min=1, max=12, value=1, step=1))
```
The plot in green is the function for which we need to find minimum. Number of iterations **t** is controlled by a widget at the top. Our initial pivot point is $x_m = 9.00$, the minimum of $g(x,9)$ is at $x_{min} = 6.49$. Now if we move the nob of widget to 2, we see that $x_{m}$ is now 6.49 and we have a new $x_{min}\;=6.20\;$ based on $g(x,6.49)$.If we increase the **t** by 1 again $x_{m}$ becomes 6.20 and $x_{min}$ value changes to 5.84. As we move the nob further towards right, we see that minima moves towards the minimum of green plot.
So, that's how MM algorithm works. The most important part of MM algorithm is to find a good approximating function.
Now , let's move to SMACOF algorithm for metric MDS. As it was said earlier that metric MDS tries to appoximate the
dissimilarity matrix and minimizes the stress function which is given by
$\hspace{7em}\sigma(X) = \Sigma_{i<j}\; w_{ij}(\delta_{ij} - d_{ij}(X))^2$ , where
$\hspace{7em} w_{ij}$ is weightage assigned to disimilarity between $i$ and $j$
$\hspace{7em} \delta_{ij}$ is element of the given dissimilarity matrix
$\hspace{7em} d_{ij}(X)$ is the dissimilarity from the $X$ which we need to find
We aren't going to delve into derivation of the majorizing function for stress function. If you wish to follow please consult excellent [book](http://www.springer.com/in/book/9780387251509) (topic - Majorizing stress, page -187).
The majorizing function for the stress function is given by $\tau(X,Z)$
$\hspace{7em} \tau(X,Z) = \eta_{\delta}^2 + tr \;X^TVX + 2tr\; X^TB(Z)Z$ , where
$\hspace{9em} \eta_{\delta}^2 = \Sigma_{i<j} W_{ij}\delta_{ij}^2$
$\hspace{9em} B(Z)$ contains elements
$\hspace{11em} b_{ij} = w_{ij}*\delta_{ij}/d_{ij} \;\; if \;d_{ij} > \;\text{0.0} \; else\; \text{0.0}$
$\hspace{11em} b_{ii} = - \Sigma_{j,j\ne i}\; b_{ij}$
$\hspace{9em} V = \Sigma_{i<j} w_{ij}*A_{ij}$
$\hspace{11em} A$ is a sparse matrix same size as $W$ and $s.t. A_{ii}=A_{jj}=1, A_{ji}=A_{ij}=-1$
To implement the SMACOF algorithm, we need the derivative of $t(X,Z)$ $w.t.r$ $X$.
$\hspace{7em} \nabla\;\tau(X,Z) = 2 XV + 2 B(Z)Z \; = \; 0$ which gives
$\hspace{7em}VX = B(Z)Z$.
$\hspace{7em}X = V^+B(Z)Z\;$ where $V^+$ is psuedo-inverse of $V$
Now, we have everything for the steps of SMACOF algorithm.
1. choose a random point $X_m$
2. Find the minimum of $\tau(X, X_m)$ which is given by $X_{min} = X = V^+B(Z)Z$
3. if $\sigma(X_{m}) - \sigma(X_{min}) \lt \epsilon$ break
else set $X_m = X_{min}$ and go to step 2
If we don't differentiate between dissimilarities $d_{ij}$ and $d_{jk}$, then $w_{ij}$ = 1.
Then, $V^+ = n^{-1}J$ where J is the centering matrix. Our update equation changes to
$X_{min} = n^{-1}B(Z)(Z)$.
Let's code the above steps of SMACOF.
```
#load the dataset
from sklearn import datasets
iris_d = datasets.load_iris()
iris = iris_d.data
# compute the variables which require calculation only once
from sklearn.metrics import pairwise
# distance metric calulation function
dist_mat = lambda x:pairwise.pairwise_distances(x, metric = "euclidean")
# we will denote the original distance metric with D
D = dist_mat(iris)
# stress calculation function
stress = lambda d: ((D-d)**2).sum()
def create_B(d):
d[d==0.0] = np.inf
B = D/d
np.fill_diagonal(B, 0.0)
B[range(D.shape[0]),range(D.shape[0])] = -B.sum(axis=0).T
return(B)
# steps of SMACOF
np.random.seed(101)
max_iter = 1000
# set lower dimenion value to 2
p = 2
n,m = iris.shape
#choose a random pivot point
x_m = np.random.rand(n, p)
# denote the subspace distance matrix as d
d = dist_mat(x_m)
stress_old = stress(d)
tol = 1e-4
for i in range(1000):
print(i)
x_min = create_B(d.copy()).dot(x_m)/n
d = dist_mat(x_min)
stress_new = stress( d)
if stress_old-stress_new < tol:
break
else:
x_m = x_min
stress_old = stress_new
plt.plot(x_min[:,0],x_min[:,1],"o")
```
The only hyperparameter required is number of dimensions in lower dimensions. If number of dimensions is low, then solution will get distorted due to over-compression. If It is high then over-fitting will happen and solution will fit the random noise.
### MDS in pyspark
Let's convert the python code to pyspark.
```
from sklearn import datasets
import math as ma
import numpy as np
from pyspark.sql import types as t
from pyspark.sql import functions as f
iris_ = datasets.load_iris()
iris = iris_.data
# repartitoning the dataframe by id column will speed up the join operation
df = spark.createDataFrame(sc.parallelize(iris.tolist()).zipWithIndex()).toDF("features", "id").repartition("id")
df.cache()
euclidean = lambda x,y:ma.sqrt(np.sum((np.array(x)-np.array(y))**2))
data_bc = sc.broadcast(df.sort("id").select("features").rdd.collect())
# create the distance metric
def pairwise_metric1(y):
dist = []
for x in data_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
udf_dist1 = f.udf(pairwise_metric1, t.ArrayType(t.DoubleType()))
df = df.withColumn("D", udf_dist1("features"))
n,p = iris.shape
dim = 2
X = np.random.rand(n,dim)
# randomly initialize a solution for the pivot point.
dfrand = spark.createDataFrame(sc.parallelize(X.tolist()).zipWithIndex()).toDF("X", "id2").repartition("id2")
df = df.join(dfrand, df.id==dfrand.id2, "inner").drop("id1")
def pairwise_metric2(y):
dist = []
for x in X_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
# create the matrix B
def B(id,x,y):
y,x = np.array(y), np.array(x)
y[y==0.0] = np.inf
z = -x/y
z[id] = -(np.sum(z)-z[id])
return(z.tolist())
# function for matrix multiplication using outer multiplication
def df_mult(df, col1, col2, n1, n2, matrix=True):
udf_mult = f.udf(lambda x,y:np.outer(np.array(x), np.array(y)).flatten().tolist(), t.ArrayType(t.DoubleType()))
df = df.withColumn("mult", udf_mult(col1, col2))
df = df.agg(f.array([f.sum(f.col("mult")[i]) for i in range(n1*n2)])).toDF("mult")
if not matrix:
return(df)
st = t.ArrayType(t.StructType([t.StructField("id",t.LongType()),t.StructField("row", t.ArrayType(t.DoubleType()))]))
udf_arange = f.udf(lambda x:[(i,j.tolist()) for i,j in enumerate(np.array(x).reshape(n1,n2)/n1)], st)
df = df.withColumn("mult", udf_arange("mult")).select(f.explode("mult").alias("mult"))
df = df.select(f.col("mult.id").alias("id2"),f.col("mult.row").alias("X_min")).repartition("id2")
return(df)
udf_B = f.udf(B, t.ArrayType(t.DoubleType()))
udf_sigma = f.udf(lambda x,y: float(np.sum((np.array(x)-np.array(y))**2)), t.DoubleType())
sigma_old = np.inf
tol = 1e-4
max_iter = 1000
for i in range(max_iter):
X_bc = sc.broadcast(df.sort("id").select("X").rdd.collect())
def pairwise_metric2(y):
dist = []
for x in X_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
udf_dist2 = f.udf(pairwise_metric2, t.ArrayType(t.DoubleType()))
df = df.withColumn("di", udf_dist2("X"))
df = df.withColumn("sigma", udf_sigma("D","di"))
sigma_new = df.agg({"sigma":"sum"}).collect()[0][0]
print(sigma_old, sigma_new)
sigma_old = sigma_new
df = df.withColumn("B", udf_B("id","D","di")).drop("di")
X_min = df_mult(df, "B", "X", n, dim)
df = df.join(X_min, df.id==X_min.id2).select("id", "D", f.col("X_min").alias("X"))
# cache action will prevent recreation of dataframe from base
df.cache()
```
Visualization of **MNIST** dataset
### Advantages and drawbacks
Metric MDs algorithm works better than classical MDS on non-linear manifolds. But, it requires $O(N^2)$ operations for processing the distance matrix. Embedding new data is hard in metric MDS. Iterative algorithms are require a lot of computing power in spark.
|
github_jupyter
|
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from scipy import optimize as opt
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from matplotlib.pyplot import cm
x = np.array(np.arange(100))/10
y = np.abs(x-1)+np.abs(x-4)+np.abs(x-3)+np.abs(x-7)+np.abs(x-8)
pivot = 9
g_vals=[]
ax = []
ay = []
ax_ = 0
ay_ = 0
num_iter = 0
minima = 0
fun_minima = 0
g_x=0
plt.rcParams["figure.figsize"] = [12,6]
colors = cm.Reds(range(500))
def g(x):
g1_ = (((x-1)**2)/(np.abs(pivot-1)) + ((x-4)**2)/(np.abs(pivot-4)) +((x-3)**2)/(np.abs(pivot-3)) +
((x-7)**2)/(np.abs(pivot-7)) + ((x-8)**2)/(np.abs(pivot-8)) )
g2_ = np.abs(pivot-1) + np.abs(pivot-4) + np.abs(pivot-3) + np.abs(pivot-7) + np.abs(pivot-8)
g_ = 0.5*(g1_ + g2_)
return (g_)
def calc(j):
global pivot, ax, ay, g_vals,ax_, minima, fun_min, ay_,g_x
for i in range(j):
min_ = opt.minimize(g,x0=0)
minima = min_.x[0]
fun_min = min_.fun
g_x = g(x)
ay_ = g(pivot)
g_vals = g_vals + [g_x]
ax_ = pivot
pivot = minima
ax = ax + [ax_]
ay = ay + [ay_]
def plotter():
plt.plot(x,y,"g")
plt.ylim(10,25)
z = int(500/((len(g_vals)+1)))
cmap = colors[0::z]
l_width = 1
for gx,c,x_pivot,y_pivot in zip(g_vals,cmap[0:-1], ax, ay):
plt.plot(x,gx,c=c, lw =l_width, ls="dashed")
plt.plot(x_pivot, y_pivot,"ro")
plt.plot([x_pivot,x_pivot],[y_pivot,0])
plt.plot(x, g_x, c=cmap[-1], lw=l_width+1)
txt1 = "pivot=("+str("{0:.2f}".format(ax_))+","+str("{0:.2f}".format(ay_))+")"
plt.annotate(txt1, xy= (ax_, ay_), xycoords="data", xytext=[ax_-2, ay_+2],
arrowprops=dict(arrowstyle="->",connectionstyle="arc3"),
bbox=dict(boxstyle="round", fc="w"), family="monospace",
style="normal", size="xx-large")
txt1 = "minima=("+str("{0:.2f}".format(minima))+","+str("{0:.2f}".format(fun_min))+")"
plt.annotate(txt1, xy= (minima, fun_min), xycoords="data", xytext=[minima-5, fun_min+2],
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),bbox=dict(boxstyle="round", fc="w"), family="monospace",
style="normal",size="xx-large")
plt.plot(ax_, ax_,"ro")
plt.plot(minima, fun_min,"go")
plt.plot([ax_,ax_],[ay_,0])
plt.plot([minima, minima],[fun_min,0])
plt.text(0,24,"Iterations = "+str(num_iter+1), horizontalalignment="left", verticalalignment="top",
size="xx-large", family="monospace", bbox=dict(boxstyle="round", fc="w"))
def plot(t):
global num_iter
global num_iter
if t>num_iter:
calc(t-num_iter)
plotter()
num_iter=t
elif t<num_iter:
reset()
calc(t)
plotter()
num_iter = t
def reset():
global pivot, g_vals, ax, ay, ax_, ay_, minima, fun_min, num_iter
pivot = 9
g_vals=[]
ax = []
ay = []
ax_ = 0
ay_ = 0
g_x = 0
num_iter = 0
minima = 0
fun_min = 0
interact(plot,t = widgets.IntSlider(min=1, max=12, value=1, step=1))
#load the dataset
from sklearn import datasets
iris_d = datasets.load_iris()
iris = iris_d.data
# compute the variables which require calculation only once
from sklearn.metrics import pairwise
# distance metric calulation function
dist_mat = lambda x:pairwise.pairwise_distances(x, metric = "euclidean")
# we will denote the original distance metric with D
D = dist_mat(iris)
# stress calculation function
stress = lambda d: ((D-d)**2).sum()
def create_B(d):
d[d==0.0] = np.inf
B = D/d
np.fill_diagonal(B, 0.0)
B[range(D.shape[0]),range(D.shape[0])] = -B.sum(axis=0).T
return(B)
# steps of SMACOF
np.random.seed(101)
max_iter = 1000
# set lower dimenion value to 2
p = 2
n,m = iris.shape
#choose a random pivot point
x_m = np.random.rand(n, p)
# denote the subspace distance matrix as d
d = dist_mat(x_m)
stress_old = stress(d)
tol = 1e-4
for i in range(1000):
print(i)
x_min = create_B(d.copy()).dot(x_m)/n
d = dist_mat(x_min)
stress_new = stress( d)
if stress_old-stress_new < tol:
break
else:
x_m = x_min
stress_old = stress_new
plt.plot(x_min[:,0],x_min[:,1],"o")
from sklearn import datasets
import math as ma
import numpy as np
from pyspark.sql import types as t
from pyspark.sql import functions as f
iris_ = datasets.load_iris()
iris = iris_.data
# repartitoning the dataframe by id column will speed up the join operation
df = spark.createDataFrame(sc.parallelize(iris.tolist()).zipWithIndex()).toDF("features", "id").repartition("id")
df.cache()
euclidean = lambda x,y:ma.sqrt(np.sum((np.array(x)-np.array(y))**2))
data_bc = sc.broadcast(df.sort("id").select("features").rdd.collect())
# create the distance metric
def pairwise_metric1(y):
dist = []
for x in data_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
udf_dist1 = f.udf(pairwise_metric1, t.ArrayType(t.DoubleType()))
df = df.withColumn("D", udf_dist1("features"))
n,p = iris.shape
dim = 2
X = np.random.rand(n,dim)
# randomly initialize a solution for the pivot point.
dfrand = spark.createDataFrame(sc.parallelize(X.tolist()).zipWithIndex()).toDF("X", "id2").repartition("id2")
df = df.join(dfrand, df.id==dfrand.id2, "inner").drop("id1")
def pairwise_metric2(y):
dist = []
for x in X_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
# create the matrix B
def B(id,x,y):
y,x = np.array(y), np.array(x)
y[y==0.0] = np.inf
z = -x/y
z[id] = -(np.sum(z)-z[id])
return(z.tolist())
# function for matrix multiplication using outer multiplication
def df_mult(df, col1, col2, n1, n2, matrix=True):
udf_mult = f.udf(lambda x,y:np.outer(np.array(x), np.array(y)).flatten().tolist(), t.ArrayType(t.DoubleType()))
df = df.withColumn("mult", udf_mult(col1, col2))
df = df.agg(f.array([f.sum(f.col("mult")[i]) for i in range(n1*n2)])).toDF("mult")
if not matrix:
return(df)
st = t.ArrayType(t.StructType([t.StructField("id",t.LongType()),t.StructField("row", t.ArrayType(t.DoubleType()))]))
udf_arange = f.udf(lambda x:[(i,j.tolist()) for i,j in enumerate(np.array(x).reshape(n1,n2)/n1)], st)
df = df.withColumn("mult", udf_arange("mult")).select(f.explode("mult").alias("mult"))
df = df.select(f.col("mult.id").alias("id2"),f.col("mult.row").alias("X_min")).repartition("id2")
return(df)
udf_B = f.udf(B, t.ArrayType(t.DoubleType()))
udf_sigma = f.udf(lambda x,y: float(np.sum((np.array(x)-np.array(y))**2)), t.DoubleType())
sigma_old = np.inf
tol = 1e-4
max_iter = 1000
for i in range(max_iter):
X_bc = sc.broadcast(df.sort("id").select("X").rdd.collect())
def pairwise_metric2(y):
dist = []
for x in X_bc.value:
dist += [ma.sqrt(np.sum((np.array(x)-np.array(y))**2))]
return(dist)
udf_dist2 = f.udf(pairwise_metric2, t.ArrayType(t.DoubleType()))
df = df.withColumn("di", udf_dist2("X"))
df = df.withColumn("sigma", udf_sigma("D","di"))
sigma_new = df.agg({"sigma":"sum"}).collect()[0][0]
print(sigma_old, sigma_new)
sigma_old = sigma_new
df = df.withColumn("B", udf_B("id","D","di")).drop("di")
X_min = df_mult(df, "B", "X", n, dim)
df = df.join(X_min, df.id==X_min.id2).select("id", "D", f.col("X_min").alias("X"))
# cache action will prevent recreation of dataframe from base
df.cache()
| 0.457621 | 0.988268 |
# Kernel machines
In this notebook we will use simple two-dimensional data sets to illustrate the behavior of the support vector machine and the Perceptron, when used with quadratic and RBF kernels.
### Import
```
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.svm import SVC
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
```
### Data
- two-dimensional data files `data1.txt` ~ `data5.txt` contain one data coordinate/point per line, along with a label (either -1 or 1)
e.g.: * `3 8 -1` (meaning that point `x=(3,8)` has label `y=-1`)
```
data_files = ['dummy0', '../../_data/data1.txt', '../../_data/data2.txt', '../../_data/data3.txt',
'../../_data/data4.txt', '../../_data/data5.txt']
```
## Kernel SVM
1. loads one of these data sets
2. learns `sklearn.SVC` classifier
3. plot the data, support vectors and boundary
Parameters:
* `datafile` is one of `'data1.txt'` ~ `'data5.txt'` (or another file in the same format)
* `kernel_type` is either `'quadratic'` or `'rbf'`
* `C_value` is the setting of the soft-margin parameter `C` (default: 1.0)
* `s_value` (for the RBF kernel) is the scaling parameter `s` (default: 1.0)
Hyperparameter __`C`__ is the cost of misclassification:
- reducing C means less misclassification cost, expect more misclassifications
- increases the boundary margin
- increases bias (misclassifications)
- lowers variance and as result overfitting
- the default value for parameter `C` is 1.0
For RBF kernel - hyperparameter __Sigma__ (std. deviation):
- sigma plays an role to be an amplifier of the distance between x and x'
- when the distance between x and x' is much larger than sigma, the kernel function tends to be zero.
- if the sigma is very small, only the x within the certain distance can affect the predicting point.
As for the variance and bias explanation,
- smaller sigma => less bias and more variance
- larger sigma => less variance and more bias => more smooth boundary and less overfitting
### Helper for grid pairs
```
def xy_grid(x, y, ax_pad=0, density=0.1):
"""returns grid of (xx, yy) pairs, w.r.t. density and padding"""
xmin, xmax = min(x)-ax_pad, max(x)+ax_pad
ymin, ymax = min(y)-ax_pad, max(y)+ax_pad
xx, yy = np.meshgrid(np.arange(xmin, xmax+density, density), np.arange(ymin, ymax+density, density))
return {'array': np.c_[xx.ravel(), yy.ravel()],
'xx': xx, 'yy': yy,
'xmin': xmin, 'xmax': xmax,
'ymin': ymin, 'ymax': ymax}
def learn_and_display(datafile, classifier):
data = np.loadtxt(datafile)
n, d = data.shape
# Create training set x and labels y
x = data[:, 0:2]
y = data[:, 2]
# Train a support vector machine
clf = classifier.fit(x, y)
# Support vectors
sv = np.zeros(n, dtype=bool)
sv[clf.support_] = True
notsv = np.logical_not(sv)
# Determine the x1- and x2- limits of the plot
grid = xy_grid(x[:, 0], x[:, 1], ax_pad=1, density=0.05)
grid_xy = grid['array']
plt.xlim(grid['xmin'], grid['xmax'])
plt.ylim(grid['ymin'], grid['ymax'])
# Plot the data points, enlarging the support vectors
plt.plot(x[(y==1)*notsv, 0], x[(y==1)*notsv, 1], 'ro')
plt.plot(x[(y==1)*sv, 0], x[(y==1)*sv, 1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv, 0], x[(y==-1)*notsv, 1], 'k^')
plt.plot(x[(y==-1)*sv, 0], x[(y==-1)*sv, 1], 'k^', markersize=10)
Z = clf.decision_function(grid_xy)
# Show boundary and margin using a color plot
Z = Z.reshape(grid['xx'].shape)
plt.pcolormesh(grid['xx'], grid['yy'], Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
plt.contourf(grid['xx'], grid['yy'], Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.3)
plt.show()
```
### SVM with the quadratic kernel
#### Gamma - boundary width
```
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf' # distance algo / metric
C = 20.0 # misclassification cost; C higher => less tolerance => boundaries less smooth
sigma = 20 # margin width; sigma lower => gamma higher => narrower boundary width => less constraint
for data in data_files[3:4]:
for sigma in [10e-2, 10e-1, 10e0, 10e1, 10e2]:
print(C, sigma)
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1.0/sigma**2))
```
#### C - misclassification tolerance
```
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf'
C = 20.0
sigma = 10
for data in data_files[3:4]:
for C in [10e-2, 10e-1, 10e0, 10e1, 10e2]:
print(C, sigma)
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1.0/sigma**2))
```
Also try `data2.txt` through `data5.txt`. Also try changing the value of `C` (the third parameter) to see how that affects the boundary and margin.
### SVM with the RBF kernel
```
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf' # distance algo / metric
C = 100.0 # slack
sigma = 10.0 # soft margin
for data in data_files[1:]:
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1/sigma**2))
```
## Kernel Perceptron
- the Perceptron algorithm does not always converge
### Kernels
```
def rbf(x, y, degree=None, sigma=5.0, denominator=1):
# Note: denominator factor: 1(from lecture), 2(from wiki)
# 1. vector-vector distance
if x.shape == y.shape:
return np.exp(-np.linalg.norm(x-y)**2 / (denominator*(sigma**2)))
# 2. matrix-vector distances
return np.array([np.exp(-np.linalg.norm(x[i] - y)**2 / (denominator*(sigma**2)))
for i in range(x.shape[0])])
def poly(x, y, degree=2, sigma=None):
return (1 + x.dot(y))**degree
def train_kernel(x, y, kernel, degree, sigma, n_iters=1000):
n, d = x.shape
# nxn kernel similarity matrix
K = np.zeros((n, n))
for (i,j), _ in np.ndenumerate(K):
K[i,j] = kernel(x[i], x[j], degree, sigma)
# Random iterations
convergence = 0
alpha, b = np.zeros((n,)), 0
np.random.seed(0)
for itr in range(n_iters):
for idx in np.random.permutation(n):
# Compute no. of misclassifications alpha: -1*1 && 1*-1
if y[idx] * np.sum(alpha * y * K[:,idx] + b) <= 0:
alpha[idx] += 1
b = b + y[idx]
convergence = itr + 1
print("kernel:{}, degree:{}, sigma:{}, {}/{} iterations for convergence".format(
kernel.__name__, degree, sigma, convergence, n_iters))
return alpha, b, convergence < n_iters
```
### Learn and plot kernel perceptrons
```
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import interact, interactive, fixed, interact_manual, IntSlider
@interact_manual(datafile=(1, 5),
kernel={'rbf':rbf, 'poly':poly},
degree=(1, 4, 1),
sigma=widgets.FloatLogSlider(value=1, base=10, min=-3, max=3, step=1, description='sigma'),
iterations=(100, 1000, 100),
density=widgets.FloatSlider(value=.1, min=0.05, max=0.25, step=.05, description='plot density'))
def interactive_perceptron(datafile, kernel, degree, sigma, iterations, density):
# Create training set x and labels y
data = np.loadtxt(data_files[datafile])
x = data[:, 0:2]
y = data[:, 2]
# Train a perceptron
alpha, b, converged = train_kernel(x, y, kernel, degree, sigma, iterations)
# Plot if converged else stop
if not converged:
print('NOT CONVERGED')
return
# Determine the x1- and x2- limits of the plot
ax_pad = 1.5
x1min, x1max = min(x[:, 0])-ax_pad, max(x[:, 0])+ax_pad
x2min, x2max = min(x[:, 1])-ax_pad, max(x[:, 1])+ax_pad
plt.xlim(x1min, x1max)
plt.ylim(x2min, x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1), 0], x[(y==1), 1], 'ro')
plt.plot(x[(y==-1), 0], x[(y==-1), 1], 'k^')
# Construct a grid of points and evaluate classifier at each grid points
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, density), np.arange(x2min, x2max, density))
grid = np.c_[xx1.ravel(), xx2.ravel()]
# Predict (Z)
Z = np.sign([sum(y * alpha * kernel(x, pt, degree, sigma) + b) for pt in grid])
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
# Plot
plt.contourf(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
plt.show()
def learn_and_display_Perceptron(datafile, **kwargs):
kernel, degree, sigma, n_iters = kwargs.values()
# Create training set x and labels y
data = np.loadtxt(datafile)
x = data[:, 0:2]
y = data[:, 2]
# Train a perceptron
alpha, b, converged = train_kernel(x, y, kernel, degree, sigma, n_iters)
# Plot if converged else stop
if not converged:
print('NOT CONVERGED')
return
# Determine the x1- and x2- limits of the plot
ax_pad = 1.5
x1min, x1max = min(x[:, 0])-ax_pad, max(x[:, 0])+ax_pad
x2min, x2max = min(x[:, 1])-ax_pad, max(x[:, 1])+ax_pad
plt.xlim(x1min, x1max)
plt.ylim(x2min, x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1), 0], x[(y==1), 1], 'ro')
plt.plot(x[(y==-1), 0], x[(y==-1), 1], 'k^')
# Construct a grid of points and evaluate classifier at each grid points
density = 0.1 # 'dpi'
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max+density, density), np.arange(x2min, x2max+density, density))
grid = np.c_[xx1.ravel(), xx2.ravel()]
# Predict (Z)
Z = np.sign([sum(y * alpha * kernel(x, pt, degree, sigma) + b) for pt in grid])
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
# Alternative plot methods
# plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2)
plt.contourf(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
# plt.imshow(Z, aspect='auto', origin='lower', interpolation='none') # TODO
plt.show()
```
#### Loop through hyperparameters
```
for data in data_files[1:]:
for degree, sigma in zip([1, 2, 3, 4], [0.2, 1, 5, 25]):
for kernel in [rbf, poly]:
print(data)
learn_and_display_Perceptron(data, kernel=kernel, degree=degree, sigma=sigma, n_iter=1000)
```
### Test distance metrics
#### Vector length and distances
```
np.linalg.norm(np.array([2,2,2]))
np.linalg.norm(np.array([1,1,1]))
np.linalg.norm(1)
1 - np.array([2,2,2])
np.linalg.norm(1 - np.array([2,2,2])) # broadcasting
```
#### Similarity/distance from vector to matrix
- value 1 means vectors are similar
sigma is;
- as usually defined in a Gaussian Distribution, the standard deviation
- radius around support vectors
- [interactive demo](https://cs.stanford.edu/people/karpathy/svmjs/demo/)
```
def rbf_(x, y, sigma=3, denominator=2):
""""""
if x.shape == y.shape:
return np.exp(-np.linalg.norm(x-y)**2 / (denominator*(sigma**2)))
return np.array([np.exp(-np.linalg.norm(x[i] - y)**2 / (denominator*(sigma**2))).round(3)
for i in range(x.shape[0])])
data = np.loadtxt(data_files[5])
x, y = data[:, :2], data[:, 2]
for sigma in [0.3, 0.5, 1, 2, 4]:
rbf = rbf_(x[:10], x[2], sigma)
print('sigma: {:<3} vector similarity: {}\t mean similarity: {:.3f}'.format(sigma, rbf, rbf.mean(0)))
```
```
#
```
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.svm import SVC
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
data_files = ['dummy0', '../../_data/data1.txt', '../../_data/data2.txt', '../../_data/data3.txt',
'../../_data/data4.txt', '../../_data/data5.txt']
def xy_grid(x, y, ax_pad=0, density=0.1):
"""returns grid of (xx, yy) pairs, w.r.t. density and padding"""
xmin, xmax = min(x)-ax_pad, max(x)+ax_pad
ymin, ymax = min(y)-ax_pad, max(y)+ax_pad
xx, yy = np.meshgrid(np.arange(xmin, xmax+density, density), np.arange(ymin, ymax+density, density))
return {'array': np.c_[xx.ravel(), yy.ravel()],
'xx': xx, 'yy': yy,
'xmin': xmin, 'xmax': xmax,
'ymin': ymin, 'ymax': ymax}
def learn_and_display(datafile, classifier):
data = np.loadtxt(datafile)
n, d = data.shape
# Create training set x and labels y
x = data[:, 0:2]
y = data[:, 2]
# Train a support vector machine
clf = classifier.fit(x, y)
# Support vectors
sv = np.zeros(n, dtype=bool)
sv[clf.support_] = True
notsv = np.logical_not(sv)
# Determine the x1- and x2- limits of the plot
grid = xy_grid(x[:, 0], x[:, 1], ax_pad=1, density=0.05)
grid_xy = grid['array']
plt.xlim(grid['xmin'], grid['xmax'])
plt.ylim(grid['ymin'], grid['ymax'])
# Plot the data points, enlarging the support vectors
plt.plot(x[(y==1)*notsv, 0], x[(y==1)*notsv, 1], 'ro')
plt.plot(x[(y==1)*sv, 0], x[(y==1)*sv, 1], 'ro', markersize=10)
plt.plot(x[(y==-1)*notsv, 0], x[(y==-1)*notsv, 1], 'k^')
plt.plot(x[(y==-1)*sv, 0], x[(y==-1)*sv, 1], 'k^', markersize=10)
Z = clf.decision_function(grid_xy)
# Show boundary and margin using a color plot
Z = Z.reshape(grid['xx'].shape)
plt.pcolormesh(grid['xx'], grid['yy'], Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
plt.contourf(grid['xx'], grid['yy'], Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.3)
plt.show()
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf' # distance algo / metric
C = 20.0 # misclassification cost; C higher => less tolerance => boundaries less smooth
sigma = 20 # margin width; sigma lower => gamma higher => narrower boundary width => less constraint
for data in data_files[3:4]:
for sigma in [10e-2, 10e-1, 10e0, 10e1, 10e2]:
print(C, sigma)
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1.0/sigma**2))
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf'
C = 20.0
sigma = 10
for data in data_files[3:4]:
for C in [10e-2, 10e-1, 10e0, 10e1, 10e2]:
print(C, sigma)
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1.0/sigma**2))
# clf = SVC(kernel=kernel_type, C=1.0, gamma=1/(sigma**2))
kernel = 'rbf' # distance algo / metric
C = 100.0 # slack
sigma = 10.0 # soft margin
for data in data_files[1:]:
learn_and_display(data, SVC(kernel='rbf', C=C, gamma=1/sigma**2))
def rbf(x, y, degree=None, sigma=5.0, denominator=1):
# Note: denominator factor: 1(from lecture), 2(from wiki)
# 1. vector-vector distance
if x.shape == y.shape:
return np.exp(-np.linalg.norm(x-y)**2 / (denominator*(sigma**2)))
# 2. matrix-vector distances
return np.array([np.exp(-np.linalg.norm(x[i] - y)**2 / (denominator*(sigma**2)))
for i in range(x.shape[0])])
def poly(x, y, degree=2, sigma=None):
return (1 + x.dot(y))**degree
def train_kernel(x, y, kernel, degree, sigma, n_iters=1000):
n, d = x.shape
# nxn kernel similarity matrix
K = np.zeros((n, n))
for (i,j), _ in np.ndenumerate(K):
K[i,j] = kernel(x[i], x[j], degree, sigma)
# Random iterations
convergence = 0
alpha, b = np.zeros((n,)), 0
np.random.seed(0)
for itr in range(n_iters):
for idx in np.random.permutation(n):
# Compute no. of misclassifications alpha: -1*1 && 1*-1
if y[idx] * np.sum(alpha * y * K[:,idx] + b) <= 0:
alpha[idx] += 1
b = b + y[idx]
convergence = itr + 1
print("kernel:{}, degree:{}, sigma:{}, {}/{} iterations for convergence".format(
kernel.__name__, degree, sigma, convergence, n_iters))
return alpha, b, convergence < n_iters
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import interact, interactive, fixed, interact_manual, IntSlider
@interact_manual(datafile=(1, 5),
kernel={'rbf':rbf, 'poly':poly},
degree=(1, 4, 1),
sigma=widgets.FloatLogSlider(value=1, base=10, min=-3, max=3, step=1, description='sigma'),
iterations=(100, 1000, 100),
density=widgets.FloatSlider(value=.1, min=0.05, max=0.25, step=.05, description='plot density'))
def interactive_perceptron(datafile, kernel, degree, sigma, iterations, density):
# Create training set x and labels y
data = np.loadtxt(data_files[datafile])
x = data[:, 0:2]
y = data[:, 2]
# Train a perceptron
alpha, b, converged = train_kernel(x, y, kernel, degree, sigma, iterations)
# Plot if converged else stop
if not converged:
print('NOT CONVERGED')
return
# Determine the x1- and x2- limits of the plot
ax_pad = 1.5
x1min, x1max = min(x[:, 0])-ax_pad, max(x[:, 0])+ax_pad
x2min, x2max = min(x[:, 1])-ax_pad, max(x[:, 1])+ax_pad
plt.xlim(x1min, x1max)
plt.ylim(x2min, x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1), 0], x[(y==1), 1], 'ro')
plt.plot(x[(y==-1), 0], x[(y==-1), 1], 'k^')
# Construct a grid of points and evaluate classifier at each grid points
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max, density), np.arange(x2min, x2max, density))
grid = np.c_[xx1.ravel(), xx2.ravel()]
# Predict (Z)
Z = np.sign([sum(y * alpha * kernel(x, pt, degree, sigma) + b) for pt in grid])
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
# Plot
plt.contourf(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
plt.show()
def learn_and_display_Perceptron(datafile, **kwargs):
kernel, degree, sigma, n_iters = kwargs.values()
# Create training set x and labels y
data = np.loadtxt(datafile)
x = data[:, 0:2]
y = data[:, 2]
# Train a perceptron
alpha, b, converged = train_kernel(x, y, kernel, degree, sigma, n_iters)
# Plot if converged else stop
if not converged:
print('NOT CONVERGED')
return
# Determine the x1- and x2- limits of the plot
ax_pad = 1.5
x1min, x1max = min(x[:, 0])-ax_pad, max(x[:, 0])+ax_pad
x2min, x2max = min(x[:, 1])-ax_pad, max(x[:, 1])+ax_pad
plt.xlim(x1min, x1max)
plt.ylim(x2min, x2max)
# Plot the data points, enlarging those that are support vectors
plt.plot(x[(y==1), 0], x[(y==1), 1], 'ro')
plt.plot(x[(y==-1), 0], x[(y==-1), 1], 'k^')
# Construct a grid of points and evaluate classifier at each grid points
density = 0.1 # 'dpi'
xx1, xx2 = np.meshgrid(np.arange(x1min, x1max+density, density), np.arange(x2min, x2max+density, density))
grid = np.c_[xx1.ravel(), xx2.ravel()]
# Predict (Z)
Z = np.sign([sum(y * alpha * kernel(x, pt, degree, sigma) + b) for pt in grid])
# Show boundary and margin using a color plot
Z = Z.reshape(xx1.shape)
# Alternative plot methods
# plt.pcolormesh(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2)
plt.contourf(xx1, xx2, Z, cmap=plt.cm.PRGn, vmin=-2, vmax=2, alpha=.8)
# plt.imshow(Z, aspect='auto', origin='lower', interpolation='none') # TODO
plt.show()
for data in data_files[1:]:
for degree, sigma in zip([1, 2, 3, 4], [0.2, 1, 5, 25]):
for kernel in [rbf, poly]:
print(data)
learn_and_display_Perceptron(data, kernel=kernel, degree=degree, sigma=sigma, n_iter=1000)
np.linalg.norm(np.array([2,2,2]))
np.linalg.norm(np.array([1,1,1]))
np.linalg.norm(1)
1 - np.array([2,2,2])
np.linalg.norm(1 - np.array([2,2,2])) # broadcasting
def rbf_(x, y, sigma=3, denominator=2):
""""""
if x.shape == y.shape:
return np.exp(-np.linalg.norm(x-y)**2 / (denominator*(sigma**2)))
return np.array([np.exp(-np.linalg.norm(x[i] - y)**2 / (denominator*(sigma**2))).round(3)
for i in range(x.shape[0])])
data = np.loadtxt(data_files[5])
x, y = data[:, :2], data[:, 2]
for sigma in [0.3, 0.5, 1, 2, 4]:
rbf = rbf_(x[:10], x[2], sigma)
print('sigma: {:<3} vector similarity: {}\t mean similarity: {:.3f}'.format(sigma, rbf, rbf.mean(0)))
#
| 0.738009 | 0.980766 |
```
import cppimport.import_hook
import numpy as np
#cppimport.force_rebuild(True)
```
`cppimport` provides an easy way to compile C++ code. When you execute `import _atlas4py`, this will look for a file `_atlas4py.cpp` and import the file as given in there (check the include paths there!)
```
import _atlas4py
#grid = _atlas4py.StructuredGrid("L32x32")
#grid = _atlas4py.StructuredGrid(x_spacings=[_atlas4py.LinearSpacing(-1, 1, 21)]*15 + [_atlas4py.LinearSpacing(-1, 1, 15)]*6,
# y_spacing=_atlas4py.LinearSpacing(-1, 1, 21))
grid = _atlas4py.StructuredGrid(x_spacing=_atlas4py.LinearSpacing(-1, 1, 20),
y_spacing=_atlas4py.LinearSpacing(-1, 1, 21))
print(grid)
mesh = _atlas4py.StructuredMeshGenerator().generate(grid)
_atlas4py.build_edges(mesh)
_atlas4py.build_node_to_edge_connectivity(mesh)
fs = _atlas4py.functionspace.CellColumns(mesh)
print(fs)
out_f = fs.create_field(name="my_field", levels=1, dtype=np.float64)
in_f = fs.create_field(name="my_field", levels=1, dtype=np.float64)
```
You can create views on the atlas fields. Don't forget to pass `copy=False`! The views can be accessed like normal numpy arrays
```
out_view = np.array(out_f, copy=False)
in_view = np.array(in_f, copy=False)
out_view[:] = in_view[:] = np.zeros_like(in_view)
lonlat_view = np.array(mesh.nodes.lonlat, copy=False)
lonlat0 = lonlat_view[0]
lonlat1 = lonlat_view[-1]
assert in_view.shape[1] == 1
for cPos in range(mesh.cells.size):
nPos = mesh.cells.node_connectivity[cPos, 0]
center_lon = (lonlat_view[nPos, 0] - lonlat0[0] + (lonlat0[0] - lonlat1[0]) / 2)
center_lat = (lonlat_view[nPos, 1] - lonlat0[1] + (lonlat0[1] - lonlat1[1]) / 2)
in_view[cPos, 0] = 1 if abs(center_lon) < .5 and abs(center_lat) < .5 else 0
out_view[cPos, 0] = 0
```
Now we also compile and import `computation.cpp`. Note that there is an absolute path to `gmsh` in here.
```
!rm out.msh
import computation
with _atlas4py.Gmsh("out.msh") as out:
out.write(mesh)
for i in range(100):
in_f.metadata["step"] = i
out.write(in_f)
computation.run_computation(mesh, 1, in_f, out_f)
in_f, out_f = out_f, in_f
!~/packages/gmsh/gmsh-4.4.1-Linux64/bin/gmsh out.msh
```
# Learning about atlas...
```
grid = _atlas4py.StructuredGrid(x_spacing=_atlas4py.LinearSpacing(-1, 1, 5),
y_spacing=_atlas4py.LinearSpacing(-1, 1, 5))
mesh = _atlas4py.StructuredMeshGenerator().generate(grid)
_atlas4py.build_edges(mesh)
_atlas4py.build_node_to_edge_connectivity(mesh)
fs = _atlas4py.functionspace.CellColumns(mesh, halo=2)
mesh.cells.size
mesh.edges.size
mesh.nodes.size
lonlat_view = np.array(mesh.nodes.lonlat, copy=False)
lonlat_view
import pprint
print("number of blocks: ", mesh.edges.node_connectivity.blocks)
for block in range(mesh.edges.node_connectivity.blocks):
print("\nblock: ", block)
b = mesh.edges.node_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
```
The number of blocks are 2. If we build pole edges, these edges get into the second block.
```
print("number of blocks: ", mesh.cells.node_connectivity.blocks)
for block in (0, 1):
print("\nblock: ", block)
b = mesh.cells.node_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
print("number of blocks: ", mesh.cells.edge_connectivity.blocks)
for block in range(mesh.cells.edge_connectivity.blocks):
print("\nblock: ", block)
b = mesh.cells.edge_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
c = mesh.nodes.edge_connectivity
pprint.pprint([[c[i, j] for j in range(c.cols(i))] for i in range(c.rows)])
```
Test whether you can write different types into the metadata and get back the correct type.
```
print(in_f.metadata)
in_f.metadata["bool"] = True
print(in_f.metadata["bool"])
in_f.metadata["int"] = 3
print(in_f.metadata["int"])
in_f.metadata["float"] = 3.12
print(in_f.metadata["float"])
in_f.metadata["str"] = "x"
print(in_f.metadata["str"])
print(in_f.metadata)
```
|
github_jupyter
|
import cppimport.import_hook
import numpy as np
#cppimport.force_rebuild(True)
import _atlas4py
#grid = _atlas4py.StructuredGrid("L32x32")
#grid = _atlas4py.StructuredGrid(x_spacings=[_atlas4py.LinearSpacing(-1, 1, 21)]*15 + [_atlas4py.LinearSpacing(-1, 1, 15)]*6,
# y_spacing=_atlas4py.LinearSpacing(-1, 1, 21))
grid = _atlas4py.StructuredGrid(x_spacing=_atlas4py.LinearSpacing(-1, 1, 20),
y_spacing=_atlas4py.LinearSpacing(-1, 1, 21))
print(grid)
mesh = _atlas4py.StructuredMeshGenerator().generate(grid)
_atlas4py.build_edges(mesh)
_atlas4py.build_node_to_edge_connectivity(mesh)
fs = _atlas4py.functionspace.CellColumns(mesh)
print(fs)
out_f = fs.create_field(name="my_field", levels=1, dtype=np.float64)
in_f = fs.create_field(name="my_field", levels=1, dtype=np.float64)
out_view = np.array(out_f, copy=False)
in_view = np.array(in_f, copy=False)
out_view[:] = in_view[:] = np.zeros_like(in_view)
lonlat_view = np.array(mesh.nodes.lonlat, copy=False)
lonlat0 = lonlat_view[0]
lonlat1 = lonlat_view[-1]
assert in_view.shape[1] == 1
for cPos in range(mesh.cells.size):
nPos = mesh.cells.node_connectivity[cPos, 0]
center_lon = (lonlat_view[nPos, 0] - lonlat0[0] + (lonlat0[0] - lonlat1[0]) / 2)
center_lat = (lonlat_view[nPos, 1] - lonlat0[1] + (lonlat0[1] - lonlat1[1]) / 2)
in_view[cPos, 0] = 1 if abs(center_lon) < .5 and abs(center_lat) < .5 else 0
out_view[cPos, 0] = 0
!rm out.msh
import computation
with _atlas4py.Gmsh("out.msh") as out:
out.write(mesh)
for i in range(100):
in_f.metadata["step"] = i
out.write(in_f)
computation.run_computation(mesh, 1, in_f, out_f)
in_f, out_f = out_f, in_f
!~/packages/gmsh/gmsh-4.4.1-Linux64/bin/gmsh out.msh
grid = _atlas4py.StructuredGrid(x_spacing=_atlas4py.LinearSpacing(-1, 1, 5),
y_spacing=_atlas4py.LinearSpacing(-1, 1, 5))
mesh = _atlas4py.StructuredMeshGenerator().generate(grid)
_atlas4py.build_edges(mesh)
_atlas4py.build_node_to_edge_connectivity(mesh)
fs = _atlas4py.functionspace.CellColumns(mesh, halo=2)
mesh.cells.size
mesh.edges.size
mesh.nodes.size
lonlat_view = np.array(mesh.nodes.lonlat, copy=False)
lonlat_view
import pprint
print("number of blocks: ", mesh.edges.node_connectivity.blocks)
for block in range(mesh.edges.node_connectivity.blocks):
print("\nblock: ", block)
b = mesh.edges.node_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
print("number of blocks: ", mesh.cells.node_connectivity.blocks)
for block in (0, 1):
print("\nblock: ", block)
b = mesh.cells.node_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
print("number of blocks: ", mesh.cells.edge_connectivity.blocks)
for block in range(mesh.cells.edge_connectivity.blocks):
print("\nblock: ", block)
b = mesh.cells.edge_connectivity.block(block)
print("{} x {}".format(b.rows, b.cols))
pprint.pprint([[b[i, j] for j in range(b.cols)] for i in range(b.rows)])
print("--- ")
c = mesh.nodes.edge_connectivity
pprint.pprint([[c[i, j] for j in range(c.cols(i))] for i in range(c.rows)])
print(in_f.metadata)
in_f.metadata["bool"] = True
print(in_f.metadata["bool"])
in_f.metadata["int"] = 3
print(in_f.metadata["int"])
in_f.metadata["float"] = 3.12
print(in_f.metadata["float"])
in_f.metadata["str"] = "x"
print(in_f.metadata["str"])
print(in_f.metadata)
| 0.286169 | 0.85022 |
## Data Processing
We have used a corpus of NIPS papers in this tutorial. This corpus contains 1740 documentsbut. If you’re following this tutorial just to learn about LDA I encourage you to consider picking a corpus on a subject that you are familiar with.
See https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html for details
```
import io
import os.path
import re
import tarfile
import smart_open
def extract_documents(url='https://cs.nyu.edu/~roweis/data/nips12raw_str602.tgz'):
fname = url.split('/')[-1]
# Download the file to local storage first.
# We can't read it on the fly because of
# https://github.com/RaRe-Technologies/smart_open/issues/331
if not os.path.isfile(fname):
with smart_open.open(url, "rb") as fin:
with smart_open.open(fname, 'wb') as fout:
while True:
buf = fin.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
break
fout.write(buf)
with tarfile.open(fname, mode='r:gz') as tar:
# Ignore directory entries, as well as files like README, etc.
files = [
m for m in tar.getmembers()
if m.isfile() and re.search(r'nipstxt/nips\d+/\d+\.txt', m.name)
]
for member in sorted(files, key=lambda x: x.name):
member_bytes = tar.extractfile(member).read()
yield member_bytes.decode('utf-8', errors='replace')
docs = list(extract_documents())
print(len(docs))
print(docs[0][:500])
# Tokenize the documents.
from nltk.tokenize import RegexpTokenizer
# Split the documents into tokens.
tokenizer = RegexpTokenizer(r'\w+')
for idx in range(len(docs)):
docs[idx] = docs[idx].lower() # Convert to lowercase.
docs[idx] = tokenizer.tokenize(docs[idx]) # Split into words.
# Remove numbers, but not words that contain numbers.
docs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
docs = [[token for token in doc if len(token) > 1] for doc in docs]
# Lemmatize the documents.
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
# Remove rare and common tokens.
from gensim.corpora import Dictionary
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 20 documents, or more than 20% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.2)
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
from gensim import corpora
corpora.MmCorpus.serialize('./nips.mm', corpus)
dictionary.save_as_text('./nips_wordids.txt')
```
|
github_jupyter
|
import io
import os.path
import re
import tarfile
import smart_open
def extract_documents(url='https://cs.nyu.edu/~roweis/data/nips12raw_str602.tgz'):
fname = url.split('/')[-1]
# Download the file to local storage first.
# We can't read it on the fly because of
# https://github.com/RaRe-Technologies/smart_open/issues/331
if not os.path.isfile(fname):
with smart_open.open(url, "rb") as fin:
with smart_open.open(fname, 'wb') as fout:
while True:
buf = fin.read(io.DEFAULT_BUFFER_SIZE)
if not buf:
break
fout.write(buf)
with tarfile.open(fname, mode='r:gz') as tar:
# Ignore directory entries, as well as files like README, etc.
files = [
m for m in tar.getmembers()
if m.isfile() and re.search(r'nipstxt/nips\d+/\d+\.txt', m.name)
]
for member in sorted(files, key=lambda x: x.name):
member_bytes = tar.extractfile(member).read()
yield member_bytes.decode('utf-8', errors='replace')
docs = list(extract_documents())
print(len(docs))
print(docs[0][:500])
# Tokenize the documents.
from nltk.tokenize import RegexpTokenizer
# Split the documents into tokens.
tokenizer = RegexpTokenizer(r'\w+')
for idx in range(len(docs)):
docs[idx] = docs[idx].lower() # Convert to lowercase.
docs[idx] = tokenizer.tokenize(docs[idx]) # Split into words.
# Remove numbers, but not words that contain numbers.
docs = [[token for token in doc if not token.isnumeric()] for doc in docs]
# Remove words that are only one character.
docs = [[token for token in doc if len(token) > 1] for doc in docs]
# Lemmatize the documents.
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]
# Compute bigrams.
from gensim.models import Phrases
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(docs, min_count=20)
for idx in range(len(docs)):
for token in bigram[docs[idx]]:
if '_' in token:
# Token is a bigram, add to document.
docs[idx].append(token)
# Remove rare and common tokens.
from gensim.corpora import Dictionary
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 20 documents, or more than 20% of the documents.
dictionary.filter_extremes(no_below=20, no_above=0.2)
# Bag-of-words representation of the documents.
corpus = [dictionary.doc2bow(doc) for doc in docs]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
from gensim import corpora
corpora.MmCorpus.serialize('./nips.mm', corpus)
dictionary.save_as_text('./nips_wordids.txt')
| 0.292494 | 0.790732 |
# Welcome to the REDCap API Workshop
---
# Workshop Outline
---
By the end of this workshop we hope that you will learn what the REDCap API is, and how you can use it for your project.
During this workshop we will cover the following topics:
1. [What is an API?](#what-is-an-api)
2. [What is a RESTful API?](#what-is-a-restful-api)
- GET
- PUT
- POST
- DELETE
- HTTP Status Codes
3. [REDCap API Playground](#redcap-api-playground)
- Getting access to the API Playground
- Using the API Playgroud
- JSON and CSV support
4. [Using Python with the API](#using-the-redcap-api-with-python)
- Options for connecting to the API
- Ex. Project Info
- Ex. Arms/Events
- Ex. Export Records
- Ex. Import Records
- Ex. Surveys
5. [Data Analysis and Visualization](#data-analysis-and-visualization)
- Ex. Using pandas package
- Ex. Using matplotlib for visualization
# <a name="what-is-an-api"></a>I. What is an API
---
**Application Programing Interface** (**API**) is a set of functions used to communicate with an application or service.
Normally, when you are using a web application, you are interacting with it through a user interface.
API allows you to bypass the front end user interface and work with the backend application programmatically. This is useful when automating or building integrations between different apps. You can send requests to the application server to read, create, update, or delete the data.

For example, a researcher studying depression might want to analyze comments on Instagram to see if they can find speech patterns associated with depression. To do this they want to use a machine learning model which needs a huge dataset to train.
From a technical perspective, the first step would be to gather the data to train the model. You could do this by having an RA go and physically copy all the comments by hand, but this would be insanely time consuming and tedious to the point where it wouldn't be worth it. That's where the Instagram API can help. Instead of having your RA copy and paste thousands or millions of comments, they could write a script to scroll through comments and copy them to an excel sheet in an automated way.
To write this script, they would need to use the Instagram API to communicate with the Instagram backend application and pull the necessary data. The Instagram API would describe an **endpoint** (typically a URL associated with a resource) that the researcher could use in their script to get a list of comments.
###### Still a bit confused?
Let's look at a mock API example.
Go to the website [petstore.swagger.io](https://petstore.swagger.io/#/).
Here, they have an example API for a fake pet company that has a database of pets. See the table below.
|ID| Name| PhotoUrls|Tags|Status|..|
|---|---|---|---|---|---|
|1|Eagle|https ://...|bird|available|..|
|2|Bunny|https ://...|rodent|unavailable|..|
What if we wanted to find a list of animals available at the pet store?
Well, luckily, the company has created an API to allow us to retrieve this information without getting access to other protected information in the database. The company can control to what data we, as users, have access.
This mock API is open for public access with full rights, but most APIs will need authorization to perform certain actions.

Hopefully by now you have some idea of what an API is.
In the next section we'll look into some more API specifics.
```
import requests
r = requests.get('https://petstore.swagger.io/v2/pet/2')
r.json()
```
---
<br>
<br>
<br>
# <a name="what-is-a-restful-api"></a>II. What is a RESTful API
---
Now that you're starting to form a sense of what an API *is*, we can start to look at the structure of API's and the standards used by the Web community.
There are many web API types, each define their own rules and best practices for interfacing with web services or applications. For example, those that adhere to [**Simple Object Access Protocol**](https://en.wikipedia.org/wiki/SOAP) (SOAP), or [**Remote Procedure Call**](https://en.wikipedia.org/wiki/Remote_procedure_call) (RPC) architectures are common, however we will focus on the most common API architecture called [**REpresentational State Transfer**](https://en.wikipedia.org/wiki/Representational_state_transfer).
#### RESTful API's
Web services that adhere to REST architecture are deemed RESTful. While REST is an architectural style rather than an actual protocol like SOAP, it makes use of a bunch of common internet standards like HTTP, URI and JSON. The nitty-gritty details of REST definitely aren't required knowledge for anyone looking to use a RESTful API but it's helpfull to know a little bit about how the architecture interfaces users with services.
HTTP based RESTful API's (such as the REDCap API) are characterized by:
- A base URL
- Standard HTTP CRUD methods
From the example used in the first section, we could make a request through the API by putting together the base url https://petstore.swagger.io/v2/store/inventory with the HTTP standard method **GET** to retrieve the data about the pets in the store's inventory. Typically, the base url will be comprised of the main url + a version identifier + path/to/an/endpoint as in the example.
#### So what are the HTTP standard CRUD methods?
**CRUD** stands for **Create, Read, Update, and Delete**. This defines the four actions that an agent can request from the web service. Each action has accompanying *methods* that can be called upon to perform these requests. They're summarized below.
##### Create
<div align='center'>
<img src="attachment:Post_method.png" height=226 width=601/>
<em>The CREATE method POSTs a new resource to the web service.</em>
</div>
The **POST** method allows agents (ie. You) to *create* member resources. From the example above, we could have submitted a **POST** request to the web service to *create* a dog (member) in the pets (collection) database. The web service would take care of most of the work that needs to be done to actually create the member in the database –like assign a unique ID– we simply have to know the correct API call to request it.
##### Read
<div align='center'>
<img src="attachment:Get_method2.png" height=226 width=601/>
<em>The READ method GETs a resource from the web service.</em>
</div>
The **GET** method allows the agent to pull data from the web service –in other words– *Read*. In the above example we *read* data from a collection of pets, but we could have also *read* data about the users or the state of the service, etc. In essence, GET requests stipulate a resource payload is flowing from a service to the requesting agent.
##### Update
<div align='center'>
<img src="attachment:Put_method.png" height=226 width=601/>
<em>The UPDATE method comes in 2 flavours, PUT and PATCH. PUT updates the entire resource, PATCH can update only parts of the resource.</em>
</div>
The **PUT** and **PATCH** methods are both used to *update* a resource via a web service. The more common PUT method is actually used to *replace* the specified resource/s, or create one if it doesn't already exist. The PATCH method differs subtly in that it *updates* the specified resource/s, or it *may* create one if it doesn't exist. If you're resource is a file for example, a PUT would replace the file with a new version, while a PATCH might only modify some part of it.
##### Delete
<div align='center'>
<img src="attachment:Delete_method.png" height=226 width=601/>
<em>The DELETE method removes a resource from the web application.</em>
</div>
You can use the **DELETE** method to delete resources on from the web application. In practice, most databases will not actually delete the specified resource, but instead mark the resource as *deleted* so that it appears deleted to the user.
Ok, we're almost done. We now know what a RESTful API is, and a little about the methods they use. The last thing we'll cover before we move to the REDCap API is HTTP responses.
##### HTTP Responses
Every time you send a request to a RESTful API, you will receive a response, whether or not your request was succesful. For example, I'm sure we're all familiar with the infamous *404 Server Not Found* response, but how about the status code *504 Gateway Timeout*?
<div align='center'>
<img src="attachment:httpresponse.png" />
<em>An example of a response from a web service. Notice the three main parts: status code, header and body.</em>
</div>
HTTP responses have three main parts: a status code, a header, and a body. The header will contain meta-data usually describing what to expect from the body, like the content-type (ex. html) or the date. The body (also called content) will contain the actual resouce that was requested, like a web page or database entry. Finally, at the head of the response will be the **status code**.
The status code quickly and concisely informs the requester if the request was either good or bad and what generally caused the status. Any status in the 200's is considered a success. Anything else, is a fail. Each specific code tells you a bit about why the response is what it is. Take for example status code *404*, it's painfully obvious that this status indicates a failure, but if you know a bit about HTTP status codes it would also tell you a bit about why it failed. In this case, you know it's not a problem with the web service or else the error would be in the 500's. Instead it is a problem with the requester (client), specifically, the resource you want cannot be found.
Broadly, status codes in the 100's are informational, 300's are reserved for redirects, in the 400's are client errors (something wrong from the requester's side), and 500's are reserved for server side errors. To help wrap your head around these codes, a human readable summary of the status code categories are listed in the image below.
<div align='center'>
<img src="attachment:HTTP-codes.jpg" width=425 height=233/>
<em>Summary of the HTTP status codes, in plain english</em>
</div>
Finally, if this section seemed pretty neat to you, check out this [tutorial website](https://www.restapitutorial.com/) that can give some more details and best practices for creating a RESTful API and a list of explanations for each status code.
---
<br>
<br>
<br>
# <a name="redcap-api-playground"></a>III. REDCap API Playground
---
Like many other tools, REDCap provides an API that allows you to retrieve or modify data or settings within REDCap. An example use case would be performing automated data imports/exports.
The easiest way to get familiar with the capabilities of the REDCap API is through API Playground - a feature that allows you to explore the REDCap API without writing any code.
To gain access to this feature, your user rights need to include API privileges. This can be enabled by the project owner.

Next, you will see the two API buttons in your project panel.

Finally, you can navigate to API Playgrond and click "Request API token", which is approved by the REDCap Administrator.

---
For this workshop, we have a demo user with full API privileges. Let's all log into REDCap and take a look at the API Playground.
**URL:** https://edc.camhx.ca/redcap/
**Login:** redcap_api_demo
**Password:** KCNIWorkshop1
---
There are a few key elements on the API Playground page, let's go over them:
**API Method**
- This dropdown allows you to select the method/content that you want to work with.
- Examples include Arms, Events, Fields, Instruments, Records, Surveys, and more. Refer to full API documentation for details.
- Allows you to pass parameters to narrow down the information in the response.
REDCap provides all content on one endpoint https://edc.camhx.ca/redcap/api/. Therefore, selecting the content type and passing it in the request parameters is necessary.
**Raw Request Parameters**
- This section will be automatically generated when you select the appropriate API Method and parameters. It includes the same information in plain text format and can be used in any tool or programming language that allows making API requests.
- Majority of REDCap requests are `POST` requests. Even when we want to get data, we have to pass parameters to narrow down the results. We also have to include the token and the content type (method) in the request.
**Response & Execute Request**
- This section allows you to send a request with the parameters selected above and preview a response. It is a great feature to test your API calls before you put them into your application.
**Code Snippets**
- API Playground provides automatically generated code snippets in various languages.
- You can build your API calls without writing any code and copy the resulting snippets into your application.
REDCap API methods can provide and accept data in either JSON or CSV. XML is also supported, but used less commonly.
---
<br>
<br>
<br>
# <a name="using-the-redcap-api-with-python"></a>IV. Using REDCap API with Python
---
#### Choosing the language and library
Most popular programming languages have one or more libraries that allow you to make API calls. Some of them even have libraries to specifically work with REDCap. You can find more information here: http://redcap-tools.github.io/projects/
For this tutorial we will be using **Python 3.7** with the `requests` library. This library is not REDCap-specific and can work with any arbitrary API.
#### (Optional) Set up your environment
In this tutorial, the Python environment is already created for you with all packages installed. However, if you're working in another python environment then it will be necessary to install the `requests` library for that environment. Here we give an example using Anaconda.
From the terminal (Linux & MacOS) or Anaconda prompt (Windows):
```shell
# First create your conda environment - replace redcap-env with your preferred environment name
conda create --name redcap-env python=3.7
# Activate the environment you just created
conda activate redcap-env
# Install the requests package to your environment
conda install requests
```
### 1. Export Project Info
We are going to make our first API call with our Python code. This will return the project info.
Have a look at the code below:
```
import requests
redcap_url = 'https://edc.camhx.ca/redcap/api/'
data = {
'token': '19DC2E5DEC4794D7BEA5932BAB8D7B16',
'content': 'project',
'format': 'json',
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
```
First, we import the `requests` library.
Then, we specify the REDCap API endpoint (URL).
Next, we specify our parameters in a Python dictionary. This can be copied from the API Playground. Note the content parameter.
Finally, we make our `POST` request. It returns some JSON-formatted data, which is saved into variable `r`.
Now, we can do something with the data that we got back. For example, print everything that was returned as a JSON string, or convert it to a Python dictionary.
```
# Prints the raw result (JSON formatted string)
r.text
# Parses the raw result to convert it to a Python dictionary
r.json()
r.json().get('project_title')
```
### 2. Export Arms and Events
```
token = '19DC2E5DEC4794D7BEA5932BAB8D7B16'
data = {
'token': token,
'content': 'arm',
'format': 'json',
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
r.text
data = {
'token': token,
'content': 'event',
'format': 'json',
'arms[0]': '1', # Optional
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
r.json()
```
### 3. Export Records
```
# Get record with id 261965, instrument "my first instrument", as csv
data = {
'token': token,
'content': 'record',
'format': 'csv',
'type': 'flat',
'records[0]': '261965',
'forms[0]': 'my_first_instrument',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false'
}
r = requests.post(redcap_url, data=data)
r.text
# If the form was completed in multiple events, it will return all instances.
# Same request, minimum parameters
data = {
'token': token,
'content': 'record',
'format': 'csv',
'records[0]': '261965',
'forms[0]': 'my_first_instrument'
}
r = requests.post(redcap_url, data=data)
r.text
# Same request, json
data = {
'token': token,
'content': 'record',
'format': 'json',
'records[0]': '261965',
'forms[0]': 'my_first_instrument',
}
r = requests.post(redcap_url, data=data)
r.json()
# Same request, json
data = {
'token': token,
'content': 'record',
'format': 'json',
'records[0]': '261965',
'forms[]': ['my_first_instrument', 'phq910'],
}
r = requests.post(redcap_url, data=data)
r.json()
```
### 4. Import Records
We will create a new record by sending some data as a JSON string. This will include the new record id, event name, and a variable to indicate that the first instrument should be complete.
Use your name or some unique value for the record id.
```
my_new_record = '[{"record_id": "Tester","redcap_event_name": "event_1_arm_1", "my_first_instrument_complete": "2"}]'
data = {
'token': token,
'content': 'record',
'format': 'json',
'type': 'flat',
'overwriteBehavior': 'overwrite',
'forceAutoNumber': 'false',
'data': my_new_record,
'returnContent': 'count'
}
r = requests.post(redcap_url, data=data)
r.text
```
Now let's try to send a full assessment. So far we worked with JSON strings. This time we will use a Python dictionary to modify the values and then use `json` library to convert this to a JSON string.
```
import json
# Don't forget to change the record id
my_phq9 = [{'record_id': 'Tester',
'redcap_event_name': 'event_1_arm_1',
'phq9_cde_id': 'PHQ9-1.0',
'phq9_1': '1',
'phq9_2': '1',
'phq9_3': '2',
'phq9_4': '1',
'phq9_5': '1',
'phq9_6': '1',
'phq9_7': '0',
'phq9_8': '3',
'phq9_9': '2',
'phq9_how_difficult': '2',
'phq9_calc': '12',
'phq910_complete': '2'}]
# Convert Python dict to JSON string, check if it looks correct
my_phq9 = json.dumps(my_phq9)
my_phq9
data = {
'token': token,
'content': 'record',
'format': 'json',
'type': 'flat',
'overwriteBehavior': 'overwrite',
'forceAutoNumber': 'false',
'data': my_phq9,
'returnContent': 'count'
}
r = requests.post(redcap_url, data=data)
r.text
```
---
<br>
<br>
<br>
# <a name="data-analysis-and-visualization"></a>V. Data Analysis and Visualization
---
```
import pandas as pd
data = {
'token': token,
'content': 'record',
'format': 'json'
}
r = requests.post(redcap_url, data=data)
r.json()
df = pd.DataFrame(r.json())
df
df.describe()
df[["phq9_calc"]] = df[["phq9_calc"]].apply(pd.to_numeric)
df.dtypes
df.plot(x ='record_id', y='phq9_calc', kind = 'bar')
```
|
github_jupyter
|
import requests
r = requests.get('https://petstore.swagger.io/v2/pet/2')
r.json()
# First create your conda environment - replace redcap-env with your preferred environment name
conda create --name redcap-env python=3.7
# Activate the environment you just created
conda activate redcap-env
# Install the requests package to your environment
conda install requests
import requests
redcap_url = 'https://edc.camhx.ca/redcap/api/'
data = {
'token': '19DC2E5DEC4794D7BEA5932BAB8D7B16',
'content': 'project',
'format': 'json',
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
# Prints the raw result (JSON formatted string)
r.text
# Parses the raw result to convert it to a Python dictionary
r.json()
r.json().get('project_title')
token = '19DC2E5DEC4794D7BEA5932BAB8D7B16'
data = {
'token': token,
'content': 'arm',
'format': 'json',
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
r.text
data = {
'token': token,
'content': 'event',
'format': 'json',
'arms[0]': '1', # Optional
'returnFormat': 'json'
}
r = requests.post(redcap_url, data=data)
r.json()
# Get record with id 261965, instrument "my first instrument", as csv
data = {
'token': token,
'content': 'record',
'format': 'csv',
'type': 'flat',
'records[0]': '261965',
'forms[0]': 'my_first_instrument',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false'
}
r = requests.post(redcap_url, data=data)
r.text
# If the form was completed in multiple events, it will return all instances.
# Same request, minimum parameters
data = {
'token': token,
'content': 'record',
'format': 'csv',
'records[0]': '261965',
'forms[0]': 'my_first_instrument'
}
r = requests.post(redcap_url, data=data)
r.text
# Same request, json
data = {
'token': token,
'content': 'record',
'format': 'json',
'records[0]': '261965',
'forms[0]': 'my_first_instrument',
}
r = requests.post(redcap_url, data=data)
r.json()
# Same request, json
data = {
'token': token,
'content': 'record',
'format': 'json',
'records[0]': '261965',
'forms[]': ['my_first_instrument', 'phq910'],
}
r = requests.post(redcap_url, data=data)
r.json()
my_new_record = '[{"record_id": "Tester","redcap_event_name": "event_1_arm_1", "my_first_instrument_complete": "2"}]'
data = {
'token': token,
'content': 'record',
'format': 'json',
'type': 'flat',
'overwriteBehavior': 'overwrite',
'forceAutoNumber': 'false',
'data': my_new_record,
'returnContent': 'count'
}
r = requests.post(redcap_url, data=data)
r.text
import json
# Don't forget to change the record id
my_phq9 = [{'record_id': 'Tester',
'redcap_event_name': 'event_1_arm_1',
'phq9_cde_id': 'PHQ9-1.0',
'phq9_1': '1',
'phq9_2': '1',
'phq9_3': '2',
'phq9_4': '1',
'phq9_5': '1',
'phq9_6': '1',
'phq9_7': '0',
'phq9_8': '3',
'phq9_9': '2',
'phq9_how_difficult': '2',
'phq9_calc': '12',
'phq910_complete': '2'}]
# Convert Python dict to JSON string, check if it looks correct
my_phq9 = json.dumps(my_phq9)
my_phq9
data = {
'token': token,
'content': 'record',
'format': 'json',
'type': 'flat',
'overwriteBehavior': 'overwrite',
'forceAutoNumber': 'false',
'data': my_phq9,
'returnContent': 'count'
}
r = requests.post(redcap_url, data=data)
r.text
import pandas as pd
data = {
'token': token,
'content': 'record',
'format': 'json'
}
r = requests.post(redcap_url, data=data)
r.json()
df = pd.DataFrame(r.json())
df
df.describe()
df[["phq9_calc"]] = df[["phq9_calc"]].apply(pd.to_numeric)
df.dtypes
df.plot(x ='record_id', y='phq9_calc', kind = 'bar')
| 0.53777 | 0.911928 |
_Lambda School Data Science_
# Reshape data
Objectives
- understand tidy data formatting
- melt and pivot data with pandas
Links
- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)
- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)
- Tidy Data
- Reshaping Data
- Python Data Science Handbook
- [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping
- [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables
Reference
- pandas documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)
- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
## Why reshape data?
#### Some libraries prefer data in different formats
For example, the Seaborn data visualization library prefers data in "Tidy" format often (but not always).
> "[Seaborn will be most powerful when your datasets have a particular organization.](https://seaborn.pydata.org/introduction.html#organizing-datasets) This format ia alternately called “long-form” or “tidy” data and is described in detail by Hadley Wickham. The rules can be simply stated:
> - Each variable is a column
- Each observation is a row
> A helpful mindset for determining whether your data are tidy is to think backwards from the plot you want to draw. From this perspective, a “variable” is something that will be assigned a role in the plot."
#### Data science is often about putting square pegs in round holes
Here's an inspiring [video clip from _Apollo 13_](https://www.youtube.com/watch?v=ry55--J4_VQ): “Invent a way to put a square peg in a round hole.” It's a good metaphor for data wrangling!
## Upgrade Seaborn
Run the cell below which upgrades Seaborn and automatically restarts your Google Colab Runtime.
```
!pip install seaborn --upgrade
import os
os.kill(os.getpid(), 9)
```
## Hadley Wickham's Examples
From his paper, [Tidy Data](http://vita.had.co.nz/papers/tidy-data.html)
```
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
```
"Table 1 provides some data about an imaginary experiment in a format commonly seen in the wild.
The table has two columns and three rows, and both rows and columns are labelled."
```
table1
```
"There are many ways to structure the same underlying data.
Table 2 shows the same data as Table 1, but the rows and columns have been transposed. The data is the same, but the layout is different."
```
table2
```
"Table 3 reorganises Table 1 to make the values, variables and obserations more clear.
Table 3 is the tidy version of Table 1. Each row represents an observation, the result of one treatment on one person, and each column is a variable."
| name | trt | result |
|--------------|-----|--------|
| John Smith | a | - |
| Jane Doe | a | 16 |
| Mary Johnson | a | 3 |
| John Smith | b | 2 |
| Jane Doe | b | 11 |
| Mary Johnson | b | 1 |
## Table 1 --> Tidy
We can use the pandas `melt` function to reshape Table 1 into Tidy format.
```
table1.index.name = 'name'
tidy1 = table1.reset_index().melt(id_vars='name', var_name='trt', value_name='result')
tidy1['trt'] = tidy1['trt'].str.replace('treatment','')
tidy1
```
## Table 2 --> Tidy
```
tidy2 = table2.T.reset_index().melt(id_vars='name', var_name='trt', value_name='result')
tidy2['trt'] = tidy2['trt'].str.replace('treatment','')
tidy2
```
## Tidy --> Table 1
The `pivot_table` function is the inverse of `melt`.
```
tidy1.pivot_table(index='name', columns='trt', values='result')
```
## Tidy --> Table 2
```
tidy2.pivot_table(index='name', columns='trt', values='result')
sns.catplot(x='trt', y='result', col='name', kind='bar', data=tidy1, height=2);
```
## Load Instacart data
Let's return to the dataset of [3 Million Instacart Orders](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2)
If necessary, uncomment and run the cells below to re-download and extract the data
```
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
```
Run these cells to load the data
```
%cd instacart_2017_05_01
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
```
## Goal: Reproduce part of this example
Instead of a plot with 50 products, we'll just do two — the first products from each list
- Half And Half Ultra Pasteurized
- Half Baked Frozen Yogurt
```
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
```
So, given a `product_name` we need to calculate its `order_hour_of_day` pattern.
## Subset and Merge
```
product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']
order_products_cols = ['order_id', 'product_id']
products_cols = ['product_id', 'product_name']
orders_cols = ['order_id', 'order_hour_of_day']
merged = orders[orders_cols].merge(order_products[order_products_cols]).merge(products[products_cols])
subset = merged[merged.product_name.isin(product_names)]
subset.head()
```
## 4 ways to reshape and plot
### 1. value_counts
```
froyo = subset[subset['product_name'] == product_names[0]]
creamer = subset[subset['product_name'] == product_names[1]]
creamer['order_hour_of_day'].value_counts(normalize=True).sort_index().plot()
froyo['order_hour_of_day'].value_counts(normalize=True).sort_index().plot();
```
### 2. crosstab
```
pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize='columns').plot();
```
### 3. pivot_table
```
subset.pivot_table(index='order_hour_of_day',
columns='product_name',
values='order_id',
aggfunc=len).plot();
```
### 4. melt
```
table = pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize=True)
melted = (table
.reset_index()
.melt(id_vars='order_hour_of_day')
.rename(columns={
'order_hour_of_day':'Hour of Day Ordered',
'product_name': 'Product',
'value': 'Percent of Orders by Product'
}))
sns.relplot(x='Hour of Day Ordered',
y='Percent of Orders by Product',
hue='Product',
data=melted,
kind='line')
```
# ASSIGNMENT
- Replicate the lesson code
- Complete the code cells we skipped near the beginning of the notebook
- Table 2 --> Tidy
- Tidy --> Table 2
- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.
```
flights = sns.load_dataset('flights')
flights_pivot = flights.pivot_table(index='year',
columns='month',
values='passengers')
flights_pivot
```
# STRETCH OPTIONS
_Try whatever sounds most interesting to you!_
- Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
- Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing "Number of Purchases" vs "Percent Reorder Purchases"
- Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)
- Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)
```
#merge all dfs:
merged = orders.merge(order_products).merge(products)
#drop some columns we dont need to make the df more memory-efficient:
merged.drop(columns=['eval_set', 'order_dow', 'days_since_prior_order', 'reordered', 'aisle_id', 'department_id'], inplace=True)
#Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
#subset of popular items:
product_order_counts = merged.product_name.value_counts()
popular = product_order_counts[product_order_counts >= 2900]
#branch merged df to one that only includes popular items:
merged_popular = merged[merged['product_name'].isin(popular.index)]
#group by product name and take the mean hour of day for each product and sort
grouped = (merged_popular
.groupby('product_name')
.order_hour_of_day.mean()
.sort_values())
earliest_25 = grouped.head(25)
latest_25 = grouped.tail(25)
#make subsets of the merged df with the 25 earliest and latest:
early_merged = merged_popular[merged_popular['product_name'].isin(earliest_25.index)]
late_merged = merged_popular[merged_popular['product_name'].isin(latest_25.index)]
#make crosstabs to plot:
early_ct = pd.crosstab(early_merged['order_hour_of_day'],
early_merged['product_name'],
normalize='columns');
late_ct = pd.crosstab(late_merged['order_hour_of_day'],
late_merged['product_name'],
normalize='columns');
#Plot the data reproducing the visual from instacart page:
fig = plt.figure(figsize=(18,11))
ax = plt.axes()
plt.plot(early_ct, color='g')
legend1 = plt.legend(labels=earliest_25.index, loc=2)
plt.plot(late_ct, color='r')
legend2 = plt.legend(labels=latest_25.index, loc=1)
ax.add_artist(legend1)
ax.add_artist(legend2)
ax.set_xticks([0,2,4,6,8,10,12,14,16,18,20,22,24])
plt.title('Popular products purchased earliest in the day (green) and latest in the day (red)', fontsize=16)
plt.ylabel('Percent of Orders by Product',fontsize=12)
plt.xlabel('Hour of Day Ordered',fontsize=12)
plt.show()
#Get the most recent order for each user in Instacart's dataset. This is a useful
#baseline when predicting a user's next order
merged = merged.sort_values(by=['user_id', 'order_number'])
#group by user_id and max order number:
user_grouped = merged.groupby('user_id')
#get the max order number for each user_id
user_grouped_max = merged.groupby('user_id')['order_number'].max()
user_grouped_max.head()
#add max_order_number column ot merged df for each user_id
max_df = pd.DataFrame(user_grouped_max)
max_df.rename(columns={'order_number': 'max_order_number'}, inplace=True)
max_df = max_df.reset_index()
max_merged = merged.merge(max_df)
#make dataframe of the most recent order only: where the order number = the max_order number for that user_id
most_recent = max_merged[(max_merged['order_number'] == max_merged['max_order_number'])]
most_recent.head()
#drop a few more columns we dont need:
most_recent.drop(columns=['order_id', 'order_number', 'order_hour_of_day', 'add_to_cart_order'], inplace=True)
#consolidate multiple rows from the same order number into one, resulting in dataframe w 2 columns:
#user id, and list of products from the most recent order:
#first grab the Series:
most_recent_orders = most_recent.groupby('user_id')['product_name'].unique()
most_recent_orders.head()
#Convert to DataFrame:
most_recent_orders = pd.DataFrame(most_recent_orders).rename(columns={'product_name': 'List of items in most recent order'})
#Display the first 30 users:
pd.options.display.max_colwidth = 100
most_recent_orders.head(30)
```
|
github_jupyter
|
!pip install seaborn --upgrade
import os
os.kill(os.getpid(), 9)
%matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
table1 = pd.DataFrame(
[[np.nan, 2],
[16, 11],
[3, 1]],
index=['John Smith', 'Jane Doe', 'Mary Johnson'],
columns=['treatmenta', 'treatmentb'])
table2 = table1.T
table1
table2
table1.index.name = 'name'
tidy1 = table1.reset_index().melt(id_vars='name', var_name='trt', value_name='result')
tidy1['trt'] = tidy1['trt'].str.replace('treatment','')
tidy1
tidy2 = table2.T.reset_index().melt(id_vars='name', var_name='trt', value_name='result')
tidy2['trt'] = tidy2['trt'].str.replace('treatment','')
tidy2
tidy1.pivot_table(index='name', columns='trt', values='result')
tidy2.pivot_table(index='name', columns='trt', values='result')
sns.catplot(x='trt', y='result', col='name', kind='bar', data=tidy1, height=2);
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
products = pd.read_csv('products.csv')
order_products = pd.concat([pd.read_csv('order_products__prior.csv'),
pd.read_csv('order_products__train.csv')])
orders = pd.read_csv('orders.csv')
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*wKfV6OV-_1Ipwrl7AjjSuw.png'
example = Image(url=url, width=600)
display(example)
product_names = ['Half Baked Frozen Yogurt', 'Half And Half Ultra Pasteurized']
order_products_cols = ['order_id', 'product_id']
products_cols = ['product_id', 'product_name']
orders_cols = ['order_id', 'order_hour_of_day']
merged = orders[orders_cols].merge(order_products[order_products_cols]).merge(products[products_cols])
subset = merged[merged.product_name.isin(product_names)]
subset.head()
froyo = subset[subset['product_name'] == product_names[0]]
creamer = subset[subset['product_name'] == product_names[1]]
creamer['order_hour_of_day'].value_counts(normalize=True).sort_index().plot()
froyo['order_hour_of_day'].value_counts(normalize=True).sort_index().plot();
pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize='columns').plot();
subset.pivot_table(index='order_hour_of_day',
columns='product_name',
values='order_id',
aggfunc=len).plot();
table = pd.crosstab(subset['order_hour_of_day'],
subset['product_name'],
normalize=True)
melted = (table
.reset_index()
.melt(id_vars='order_hour_of_day')
.rename(columns={
'order_hour_of_day':'Hour of Day Ordered',
'product_name': 'Product',
'value': 'Percent of Orders by Product'
}))
sns.relplot(x='Hour of Day Ordered',
y='Percent of Orders by Product',
hue='Product',
data=melted,
kind='line')
flights = sns.load_dataset('flights')
flights_pivot = flights.pivot_table(index='year',
columns='month',
values='passengers')
flights_pivot
#merge all dfs:
merged = orders.merge(order_products).merge(products)
#drop some columns we dont need to make the df more memory-efficient:
merged.drop(columns=['eval_set', 'order_dow', 'days_since_prior_order', 'reordered', 'aisle_id', 'department_id'], inplace=True)
#Replicate more of Instacart's visualization showing "Hour of Day Ordered" vs "Percent of Orders by Product"
#subset of popular items:
product_order_counts = merged.product_name.value_counts()
popular = product_order_counts[product_order_counts >= 2900]
#branch merged df to one that only includes popular items:
merged_popular = merged[merged['product_name'].isin(popular.index)]
#group by product name and take the mean hour of day for each product and sort
grouped = (merged_popular
.groupby('product_name')
.order_hour_of_day.mean()
.sort_values())
earliest_25 = grouped.head(25)
latest_25 = grouped.tail(25)
#make subsets of the merged df with the 25 earliest and latest:
early_merged = merged_popular[merged_popular['product_name'].isin(earliest_25.index)]
late_merged = merged_popular[merged_popular['product_name'].isin(latest_25.index)]
#make crosstabs to plot:
early_ct = pd.crosstab(early_merged['order_hour_of_day'],
early_merged['product_name'],
normalize='columns');
late_ct = pd.crosstab(late_merged['order_hour_of_day'],
late_merged['product_name'],
normalize='columns');
#Plot the data reproducing the visual from instacart page:
fig = plt.figure(figsize=(18,11))
ax = plt.axes()
plt.plot(early_ct, color='g')
legend1 = plt.legend(labels=earliest_25.index, loc=2)
plt.plot(late_ct, color='r')
legend2 = plt.legend(labels=latest_25.index, loc=1)
ax.add_artist(legend1)
ax.add_artist(legend2)
ax.set_xticks([0,2,4,6,8,10,12,14,16,18,20,22,24])
plt.title('Popular products purchased earliest in the day (green) and latest in the day (red)', fontsize=16)
plt.ylabel('Percent of Orders by Product',fontsize=12)
plt.xlabel('Hour of Day Ordered',fontsize=12)
plt.show()
#Get the most recent order for each user in Instacart's dataset. This is a useful
#baseline when predicting a user's next order
merged = merged.sort_values(by=['user_id', 'order_number'])
#group by user_id and max order number:
user_grouped = merged.groupby('user_id')
#get the max order number for each user_id
user_grouped_max = merged.groupby('user_id')['order_number'].max()
user_grouped_max.head()
#add max_order_number column ot merged df for each user_id
max_df = pd.DataFrame(user_grouped_max)
max_df.rename(columns={'order_number': 'max_order_number'}, inplace=True)
max_df = max_df.reset_index()
max_merged = merged.merge(max_df)
#make dataframe of the most recent order only: where the order number = the max_order number for that user_id
most_recent = max_merged[(max_merged['order_number'] == max_merged['max_order_number'])]
most_recent.head()
#drop a few more columns we dont need:
most_recent.drop(columns=['order_id', 'order_number', 'order_hour_of_day', 'add_to_cart_order'], inplace=True)
#consolidate multiple rows from the same order number into one, resulting in dataframe w 2 columns:
#user id, and list of products from the most recent order:
#first grab the Series:
most_recent_orders = most_recent.groupby('user_id')['product_name'].unique()
most_recent_orders.head()
#Convert to DataFrame:
most_recent_orders = pd.DataFrame(most_recent_orders).rename(columns={'product_name': 'List of items in most recent order'})
#Display the first 30 users:
pd.options.display.max_colwidth = 100
most_recent_orders.head(30)
| 0.397704 | 0.991101 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
print(school_data_complete.describe())
print(school_data_complete.size)
school_data_complete
```
## District Summary
* Calculate the total number of schools
* Calculate the total number of students
* Calculate the total budget
* Calculate the average math score
* Calculate the average reading score
* Calculate the percentage of students with a passing math score (70 or greater)
* Calculate the percentage of students with a passing reading score (70 or greater)
* Calculate the percentage of students who passed math **and** reading (% Overall Passing)
* Create a dataframe to hold the above results
* Optional: give the displayed data cleaner formatting
```
total_schools = len(school_data_complete['school_name'].unique())
total_students = len(school_data_complete['Student ID'].unique())
total_budget = sum(school_data_complete['budget'].unique())
avg_math_score = school_data_complete['math_score'].mean()
avg_reading_score = school_data_complete['reading_score'].mean()
print (total_schools,total_students, total_budget, avg_math_score, avg_reading_score)
school_data_math_over_70_df = school_data_complete.loc[school_data_complete['math_score'] >= 70]
students_with_math_score_over_70 = school_data_math_over_70_df['Student ID'].count()
students_with_math_score_over_70_percentage = (students_with_math_score_over_70/total_students) * 100
school_data_reading_over_70_df = school_data_complete.loc[school_data_complete['reading_score'] >= 70]
students_with_reading_score_over_70 = school_data_reading_over_70_df['Student ID'].count()
students_with_reading_score_over_70_percentage = (students_with_reading_score_over_70/total_students) * 100
print (students_with_math_score_over_70_percentage,students_with_reading_score_over_70_percentage)
school_data_mathreading_over_70_df = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)]
students_with_mathreading_score_over_70 = school_data_mathreading_over_70_df['Student ID'].count()
students_with_mathreading_score_over_70_percentage = (students_with_mathreading_score_over_70/total_students) * 100
print (students_with_mathreading_score_over_70_percentage)
district_summary_df = pd.DataFrame({"Total Number of Schools": total_schools,
"Total Number of Students": total_students,
"Total Budget": total_budget,
"Average Math Score": avg_math_score,
"Average Reading Score": avg_reading_score,
"% of Students with Passing Math Score": [students_with_math_score_over_70_percentage],
"% of Students with Passing Reading Score": students_with_reading_score_over_70_percentage,
"% of Students with Overall Passing Score": students_with_mathreading_score_over_70_percentage})
district_summary_df
district_summary_df.style.format({'Total Budget':'${:,.0f}',
'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% of Students with Passing Math Score':'{:,.2f}',
'% of Students with Passing Reading Score':'{:,.2f}',
'% of Students with Overall Passing Score':'{:,.2f}'})
```
## School Summary
* Create an overview table that summarizes key metrics about each school, including:
* School Name
* School Type
* Total Students
* Total School Budget
* Per Student Budget
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* % Overall Passing (The percentage of students that passed math **and** reading.)
* Create a dataframe to hold the above results
```
#grouped_school_data_df = school_data_complete.groupby(['school_name', 'type','size'])
grouped_school_data_df = school_data_complete.groupby(['school_name'])
grouped_school_data_df.count().head()
total_students_by_school = school_data_complete['school_name'].value_counts()
total_students_by_school.head()
total_budget_by_school = grouped_school_data_df['budget'].unique()
total_budget_by_school
avg_math_by_school = grouped_school_data_df['math_score'].mean()
avg_math_by_school.head()
avg_reading_by_school = grouped_school_data_df['reading_score'].mean()
avg_reading_by_school.head()
math_passing_by_school = school_data_math_over_70_df.groupby(['school_name'])
students_math_passing_by_school = math_passing_by_school['school_name'].count()
students_math_passing_by_school.head()
mathreading_passing_by_school = school_data_mathreading_over_70_df.groupby(['school_name'])
students_mathreading_passing_by_school = mathreading_passing_by_school['school_name'].count()
students_mathreading_passing_by_school.head()
reading_passing_by_school = school_data_reading_over_70_df.groupby(['school_name'])
students_reading_passing_by_school = reading_passing_by_school['school_name'].count()
students_reading_passing_by_school.head()
school_summary_df = pd.DataFrame({"Total Students": total_students_by_school,
"School Type": grouped_school_data_df['type'].unique(),
"School Size": grouped_school_data_df['size'].unique(),
"Total Budget": total_budget_by_school,
"Average Math Score": avg_math_by_school,
"Average Reading Score": avg_reading_by_school,
"% Passing Math": (students_math_passing_by_school/total_students_by_school)*100,
"% Passing Reading": (students_reading_passing_by_school/total_students_by_school)*100,
"% Overall Passing": (students_mathreading_passing_by_school/total_students_by_school)*100})
school_summary_df['School Type'] = [','.join(map(str, l)) for l in school_summary_df['School Type']]
school_summary_df.reset_index(inplace=True)
school_summary_df.rename(columns = {"index" : "school_name"}, inplace=True)
#school_summary_df.dtypes
school_summary_df
school_summary_df.style.format({'Total Budget':'${:,.0f}',
'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
#school_summary_df.dtypes
school_summary_df
```
## Top Performing Schools (By % Overall Passing)
* Sort and display the top five performing schools by % overall passing.
```
top_school_summary_df = school_summary_df.sort_values(['% Overall Passing'], ascending=False)
top_school_summary_df.head()
```
## Bottom Performing Schools (By % Overall Passing)
* Sort and display the five worst-performing schools by % overall passing.
```
bottom_school_summary_df = school_summary_df.sort_values(['% Overall Passing'], ascending=True)
bottom_school_summary_df.head()
```
## Math Scores by Grade
* Create a table that lists the average Math Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
* Create a pandas series for each grade. Hint: use a conditional statement.
* Group each series by school
* Combine the series into a dataframe
* Optional: give the displayed data cleaner formatting
```
#Method # 1 - Grades in columns grouped by School Names
school_data_complete.loc[school_data_complete['grade'] == '9th', '9th'] = school_data_complete['math_score']
school_data_9th_grade_df = school_data_complete.groupby(["school_name"])
school_data_9th_grade = school_data_9th_grade_df['9th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '10th', '10th'] = school_data_complete['math_score']
school_data_10th_grade_df = school_data_complete.groupby(["school_name"])
school_data_10th_grade = school_data_10th_grade_df['10th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '11th', '11th'] = school_data_complete['math_score']
school_data_11th_grade_df = school_data_complete.groupby(["school_name"])
school_data_11th_grade = school_data_11th_grade_df['11th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '12th', '12th'] = school_data_complete['math_score']
school_data_12th_grade_df = school_data_complete.groupby(["school_name"])
school_data_12th_grade = school_data_12th_grade_df['12th'].mean()
school_summary_mathgrade_df = pd.DataFrame({"9th": school_data_9th_grade,
"10th": school_data_10th_grade,
"11th": school_data_11th_grade,
"12th": school_data_12th_grade})
school_summary_mathgrade_df
#Method # 2 - Grade in rows grouped by School Names
school_data_by_grade_df = school_data_complete.groupby(["school_name", "grade"])
summary_school_data_by_math_grade = school_data_by_grade_df["math_score"].mean()
summary_school_data_by_math_grade.head()
school_summary_by_math_grade_df = pd.DataFrame({"Avg Math Score": summary_school_data_by_math_grade })
school_summary_by_math_grade_df.head()
school_summary_by_math_grade_df.style.format({'Avg Math Score':'{:,.2f}'})
```
## Reading Score by Grade
* Perform the same operations as above for reading scores
```
#Method # 1 - Grades in columns grouped by School Names
school_data_complete.loc[school_data_complete['grade'] == '9th', '9th'] = school_data_complete['reading_score']
school_data_9th_grade_df = school_data_complete.groupby(["school_name"])
school_data_9th_grade = school_data_9th_grade_df['9th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '10th', '10th'] = school_data_complete['reading_score']
school_data_10th_grade_df = school_data_complete.groupby(["school_name"])
school_data_10th_grade = school_data_10th_grade_df['10th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '11th', '11th'] = school_data_complete['reading_score']
school_data_11th_grade_df = school_data_complete.groupby(["school_name"])
school_data_11th_grade = school_data_11th_grade_df['11th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '12th', '12th'] = school_data_complete['reading_score']
school_data_12th_grade_df = school_data_complete.groupby(["school_name"])
school_data_12th_grade = school_data_12th_grade_df['12th'].mean()
school_summary_mathgrade_df = pd.DataFrame({"9th": school_data_9th_grade,
"10th": school_data_10th_grade,
"11th": school_data_11th_grade,
"12th": school_data_12th_grade})
school_summary_mathgrade_df
#Method # 2 - Grade in rows grouped by School Names
school_data_by_grade_df = school_data_complete.groupby(["school_name", "grade"])
summary_school_data_by_reading_grade = school_data_by_grade_df["reading_score"].mean()
summary_school_data_by_reading_grade.head()
school_summary_by_reading_grade_df = pd.DataFrame({"Avg Reading Score": summary_school_data_by_reading_grade })
school_summary_by_reading_grade_df.head()
school_summary_by_reading_grade_df.rename_axis(('School Name','Grade'))
school_summary_by_reading_grade_df.style.format({'Avg Reading Score':'{:,.2f}'})
```
## Scores by School Spending
* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* Overall Passing Rate (Average of the above two)
```
#print(school_summary_df)
school_summary_df['Budget Per Student']= school_summary_df['Total Budget']/school_summary_df['Total Students']
school_summary_df.sort_values(by='Budget Per Student', ascending=True)
# Create the bins in which Data will be held
bins = [0, 584 , 629, 644, 675]
# Create the names for the bins
group_names = ["<$584", "$585-629", "$630-644", "$645-675"]
school_summary_df["Budget Range"] = pd.cut(school_summary_df["Budget Per Student"], bins, labels=group_names, include_lowest=True)
school_summary_df.sort_values(by='Budget Range', ascending=True)
school_summary_budget_range_df = pd.DataFrame(school_summary_df.groupby(['Budget Range']).mean())
school_summary_budget_range_df.drop(columns=['Total Students'], inplace=True)
school_summary_budget_range_df.style.format({'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
```
## Scores by School Size
* Perform the same operations as above, based on school size.
```
# Create the bins in which Data will be held
bins_size = [0, 999 , 1999, 5000]
# Create the names for the bins
group_names_size = ["Small(<1000)", "Medium(1000-2000)", "Large(2000-5000)"]
school_summary_df["School Size"] = pd.cut(school_summary_df["School Size"], bins_size, labels=group_names_size, include_lowest=True)
school_summary_df.head(10)
school_summary_size_range_df = pd.DataFrame(school_summary_df.groupby(['School Size']).mean())
school_summary_size_range_df.drop(columns=['Total Students'], inplace=True)
school_summary_size_range_df.style.format({'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
#changing column name for merging later
#school_summary_df.reset_index().rename(columns = {'index':'school_name'},inplace=True)
#school_summary_df['school_name'] = school_summary_df.index
#school_summary_df.head()
#school_summary_by_size_df = pd.DataFrame({"Total Students": school_data_by_size})
#school_summary_by_size_df.reset_index(inplace=True)
#print(school_summary_by_size_df.dtypes)
#school_summary_by_size_df
#merge_df = pd.merge(school_summary_by_size_df, school_summary_df, on=["school_name","school_name"])
#merge_df = pd.merge(school_summary_by_size_df, school_data_complete, on=["school_name","school_name"])
#merge_df.drop(merge_df.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)
#merge_df
#grouped_merge_df = merge_df.groupby('school_name')
#grouped_merge_avg_math = grouped_merge_df['math_score'].mean()
#grouped_merge_avg_reading = grouped_merge_df['reading_score'].mean()
```
## Scores by School Type
* Perform the same operations as above, based on school type
```
school_summary_type_df = pd.DataFrame(school_summary_df.groupby(['School Type']).mean())
school_summary_type_df.drop(columns=['Total Students'], inplace=True)
school_summary_type_df
```
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
print(school_data_complete.describe())
print(school_data_complete.size)
school_data_complete
total_schools = len(school_data_complete['school_name'].unique())
total_students = len(school_data_complete['Student ID'].unique())
total_budget = sum(school_data_complete['budget'].unique())
avg_math_score = school_data_complete['math_score'].mean()
avg_reading_score = school_data_complete['reading_score'].mean()
print (total_schools,total_students, total_budget, avg_math_score, avg_reading_score)
school_data_math_over_70_df = school_data_complete.loc[school_data_complete['math_score'] >= 70]
students_with_math_score_over_70 = school_data_math_over_70_df['Student ID'].count()
students_with_math_score_over_70_percentage = (students_with_math_score_over_70/total_students) * 100
school_data_reading_over_70_df = school_data_complete.loc[school_data_complete['reading_score'] >= 70]
students_with_reading_score_over_70 = school_data_reading_over_70_df['Student ID'].count()
students_with_reading_score_over_70_percentage = (students_with_reading_score_over_70/total_students) * 100
print (students_with_math_score_over_70_percentage,students_with_reading_score_over_70_percentage)
school_data_mathreading_over_70_df = school_data_complete[(school_data_complete['math_score'] >= 70) & (school_data_complete['reading_score'] >= 70)]
students_with_mathreading_score_over_70 = school_data_mathreading_over_70_df['Student ID'].count()
students_with_mathreading_score_over_70_percentage = (students_with_mathreading_score_over_70/total_students) * 100
print (students_with_mathreading_score_over_70_percentage)
district_summary_df = pd.DataFrame({"Total Number of Schools": total_schools,
"Total Number of Students": total_students,
"Total Budget": total_budget,
"Average Math Score": avg_math_score,
"Average Reading Score": avg_reading_score,
"% of Students with Passing Math Score": [students_with_math_score_over_70_percentage],
"% of Students with Passing Reading Score": students_with_reading_score_over_70_percentage,
"% of Students with Overall Passing Score": students_with_mathreading_score_over_70_percentage})
district_summary_df
district_summary_df.style.format({'Total Budget':'${:,.0f}',
'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% of Students with Passing Math Score':'{:,.2f}',
'% of Students with Passing Reading Score':'{:,.2f}',
'% of Students with Overall Passing Score':'{:,.2f}'})
#grouped_school_data_df = school_data_complete.groupby(['school_name', 'type','size'])
grouped_school_data_df = school_data_complete.groupby(['school_name'])
grouped_school_data_df.count().head()
total_students_by_school = school_data_complete['school_name'].value_counts()
total_students_by_school.head()
total_budget_by_school = grouped_school_data_df['budget'].unique()
total_budget_by_school
avg_math_by_school = grouped_school_data_df['math_score'].mean()
avg_math_by_school.head()
avg_reading_by_school = grouped_school_data_df['reading_score'].mean()
avg_reading_by_school.head()
math_passing_by_school = school_data_math_over_70_df.groupby(['school_name'])
students_math_passing_by_school = math_passing_by_school['school_name'].count()
students_math_passing_by_school.head()
mathreading_passing_by_school = school_data_mathreading_over_70_df.groupby(['school_name'])
students_mathreading_passing_by_school = mathreading_passing_by_school['school_name'].count()
students_mathreading_passing_by_school.head()
reading_passing_by_school = school_data_reading_over_70_df.groupby(['school_name'])
students_reading_passing_by_school = reading_passing_by_school['school_name'].count()
students_reading_passing_by_school.head()
school_summary_df = pd.DataFrame({"Total Students": total_students_by_school,
"School Type": grouped_school_data_df['type'].unique(),
"School Size": grouped_school_data_df['size'].unique(),
"Total Budget": total_budget_by_school,
"Average Math Score": avg_math_by_school,
"Average Reading Score": avg_reading_by_school,
"% Passing Math": (students_math_passing_by_school/total_students_by_school)*100,
"% Passing Reading": (students_reading_passing_by_school/total_students_by_school)*100,
"% Overall Passing": (students_mathreading_passing_by_school/total_students_by_school)*100})
school_summary_df['School Type'] = [','.join(map(str, l)) for l in school_summary_df['School Type']]
school_summary_df.reset_index(inplace=True)
school_summary_df.rename(columns = {"index" : "school_name"}, inplace=True)
#school_summary_df.dtypes
school_summary_df
school_summary_df.style.format({'Total Budget':'${:,.0f}',
'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
#school_summary_df.dtypes
school_summary_df
top_school_summary_df = school_summary_df.sort_values(['% Overall Passing'], ascending=False)
top_school_summary_df.head()
bottom_school_summary_df = school_summary_df.sort_values(['% Overall Passing'], ascending=True)
bottom_school_summary_df.head()
#Method # 1 - Grades in columns grouped by School Names
school_data_complete.loc[school_data_complete['grade'] == '9th', '9th'] = school_data_complete['math_score']
school_data_9th_grade_df = school_data_complete.groupby(["school_name"])
school_data_9th_grade = school_data_9th_grade_df['9th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '10th', '10th'] = school_data_complete['math_score']
school_data_10th_grade_df = school_data_complete.groupby(["school_name"])
school_data_10th_grade = school_data_10th_grade_df['10th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '11th', '11th'] = school_data_complete['math_score']
school_data_11th_grade_df = school_data_complete.groupby(["school_name"])
school_data_11th_grade = school_data_11th_grade_df['11th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '12th', '12th'] = school_data_complete['math_score']
school_data_12th_grade_df = school_data_complete.groupby(["school_name"])
school_data_12th_grade = school_data_12th_grade_df['12th'].mean()
school_summary_mathgrade_df = pd.DataFrame({"9th": school_data_9th_grade,
"10th": school_data_10th_grade,
"11th": school_data_11th_grade,
"12th": school_data_12th_grade})
school_summary_mathgrade_df
#Method # 2 - Grade in rows grouped by School Names
school_data_by_grade_df = school_data_complete.groupby(["school_name", "grade"])
summary_school_data_by_math_grade = school_data_by_grade_df["math_score"].mean()
summary_school_data_by_math_grade.head()
school_summary_by_math_grade_df = pd.DataFrame({"Avg Math Score": summary_school_data_by_math_grade })
school_summary_by_math_grade_df.head()
school_summary_by_math_grade_df.style.format({'Avg Math Score':'{:,.2f}'})
#Method # 1 - Grades in columns grouped by School Names
school_data_complete.loc[school_data_complete['grade'] == '9th', '9th'] = school_data_complete['reading_score']
school_data_9th_grade_df = school_data_complete.groupby(["school_name"])
school_data_9th_grade = school_data_9th_grade_df['9th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '10th', '10th'] = school_data_complete['reading_score']
school_data_10th_grade_df = school_data_complete.groupby(["school_name"])
school_data_10th_grade = school_data_10th_grade_df['10th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '11th', '11th'] = school_data_complete['reading_score']
school_data_11th_grade_df = school_data_complete.groupby(["school_name"])
school_data_11th_grade = school_data_11th_grade_df['11th'].mean()
school_data_complete.loc[school_data_complete['grade'] == '12th', '12th'] = school_data_complete['reading_score']
school_data_12th_grade_df = school_data_complete.groupby(["school_name"])
school_data_12th_grade = school_data_12th_grade_df['12th'].mean()
school_summary_mathgrade_df = pd.DataFrame({"9th": school_data_9th_grade,
"10th": school_data_10th_grade,
"11th": school_data_11th_grade,
"12th": school_data_12th_grade})
school_summary_mathgrade_df
#Method # 2 - Grade in rows grouped by School Names
school_data_by_grade_df = school_data_complete.groupby(["school_name", "grade"])
summary_school_data_by_reading_grade = school_data_by_grade_df["reading_score"].mean()
summary_school_data_by_reading_grade.head()
school_summary_by_reading_grade_df = pd.DataFrame({"Avg Reading Score": summary_school_data_by_reading_grade })
school_summary_by_reading_grade_df.head()
school_summary_by_reading_grade_df.rename_axis(('School Name','Grade'))
school_summary_by_reading_grade_df.style.format({'Avg Reading Score':'{:,.2f}'})
#print(school_summary_df)
school_summary_df['Budget Per Student']= school_summary_df['Total Budget']/school_summary_df['Total Students']
school_summary_df.sort_values(by='Budget Per Student', ascending=True)
# Create the bins in which Data will be held
bins = [0, 584 , 629, 644, 675]
# Create the names for the bins
group_names = ["<$584", "$585-629", "$630-644", "$645-675"]
school_summary_df["Budget Range"] = pd.cut(school_summary_df["Budget Per Student"], bins, labels=group_names, include_lowest=True)
school_summary_df.sort_values(by='Budget Range', ascending=True)
school_summary_budget_range_df = pd.DataFrame(school_summary_df.groupby(['Budget Range']).mean())
school_summary_budget_range_df.drop(columns=['Total Students'], inplace=True)
school_summary_budget_range_df.style.format({'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
# Create the bins in which Data will be held
bins_size = [0, 999 , 1999, 5000]
# Create the names for the bins
group_names_size = ["Small(<1000)", "Medium(1000-2000)", "Large(2000-5000)"]
school_summary_df["School Size"] = pd.cut(school_summary_df["School Size"], bins_size, labels=group_names_size, include_lowest=True)
school_summary_df.head(10)
school_summary_size_range_df = pd.DataFrame(school_summary_df.groupby(['School Size']).mean())
school_summary_size_range_df.drop(columns=['Total Students'], inplace=True)
school_summary_size_range_df.style.format({'Average Math Score':'{:,.2f}',
'Average Reading Score':'{:,.2f}',
'% Passing Math':'{:,.2f}',
'% Passing Reading':'{:,.2f}',
'% Overall Passing':'{:,.2f}'})
#changing column name for merging later
#school_summary_df.reset_index().rename(columns = {'index':'school_name'},inplace=True)
#school_summary_df['school_name'] = school_summary_df.index
#school_summary_df.head()
#school_summary_by_size_df = pd.DataFrame({"Total Students": school_data_by_size})
#school_summary_by_size_df.reset_index(inplace=True)
#print(school_summary_by_size_df.dtypes)
#school_summary_by_size_df
#merge_df = pd.merge(school_summary_by_size_df, school_summary_df, on=["school_name","school_name"])
#merge_df = pd.merge(school_summary_by_size_df, school_data_complete, on=["school_name","school_name"])
#merge_df.drop(merge_df.filter(regex='_y$').columns.tolist(),axis=1, inplace=True)
#merge_df
#grouped_merge_df = merge_df.groupby('school_name')
#grouped_merge_avg_math = grouped_merge_df['math_score'].mean()
#grouped_merge_avg_reading = grouped_merge_df['reading_score'].mean()
school_summary_type_df = pd.DataFrame(school_summary_df.groupby(['School Type']).mean())
school_summary_type_df.drop(columns=['Total Students'], inplace=True)
school_summary_type_df
| 0.488771 | 0.80871 |
```
# 라이브러리 import
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier, plot_importance
from matplotlib import pyplot as plt
from lightgbm import plot_importance
from lightgbm import plot_tree
from sklearn.metrics import classification_report
import lightgbm as lgbm
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import warnings
import platform
from matplotlib import font_manager, rc
## 운영체제별 글꼴 세팅
path = "c:/Windows/Fonts/malgun.ttf"
if platform.system() == 'Darwin':
font_name = 'AppleGothic'
rc('font', family='AppleGothic')
elif platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
font_name = font_manager.FontProperties(fname="/usr/share/fonts/nanumfont/NanumGothic.ttf")
rc('font', family="NanumGothic")
warnings.simplefilter(action='ignore')
df_cat = pd.read_csv("total_data(교통,인구,집값,어린이,벨포함).csv")
df_cat = df_cat[(df_cat["year"] != "2020(상반기)") & (df_cat["year"] != "2020(하반기)")]
X = df_cat[df_cat.columns[2:-5]]
y = df_cat[df_cat.columns[-5:]]
y1 = df_cat[["q1"]]
y2 = df_cat[["q2"]]
y3 = df_cat[["q3"]]
y4 = df_cat[["q4"]]
y5 = df_cat[["q5"]]
standard = StandardScaler()
standard.fit(X)
X_stand = standard.transform(X)
X_scale = pd.DataFrame(X_stand,columns = X.columns)
# train, test셋 구분
X_train = X_scale.loc[:164]
X1_train = X_train.drop(columns=["before_q2", "before_q3", "before_q4", "before_q5"])
X2_train = X_train.drop(columns=["before_q1", "before_q3", "before_q4", "before_q5"])
X3_train = X_train.drop(columns=["before_q1", "before_q1", "before_q4", "before_q5"])
X4_train = X_train.drop(columns=["before_q1", "before_q2", "before_q3", "before_q5"])
X5_train = X_train.drop(columns=["before_q1", "before_q2", "before_q3", "before_q4"])
X_test = X_scale.loc[164:]
X1_test = X_test.drop(columns=["before_q2", "before_q3", "before_q4", "before_q5"])
X2_test = X_test.drop(columns=["before_q1", "before_q3", "before_q4", "before_q5"])
X3_test = X_test.drop(columns=["before_q1", "before_q1", "before_q4", "before_q5"])
X4_test = X_test.drop(columns=["before_q1", "before_q2", "before_q3", "before_q5"])
X5_test = X_test.drop(columns=["before_q1", "before_q2", "before_q3", "before_q4"])
y1_train = y1.loc[:164]
y1_test = y1.loc[164:]
y2_train = y2.loc[:164]
y2_test = y2.loc[164:]
y3_train = y3.loc[:164]
y3_test = y3.loc[164:]
y4_train = y4.loc[:164]
y4_test = y4.loc[164:]
y5_train = y5.loc[:164]
y5_test = y5.loc[164:]
max_depth=7,
n_estimators=150
```
# Q1
```
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X1_train, y1_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X1_test)
mean_absolute_error(pred, y1_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X1_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
```
# Q2
```
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X2_train, y2_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X2_test)
mean_absolute_error(pred, y2_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X2_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
```
# Q3
```
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X3_train, y3_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X3_test)
mean_absolute_error(pred, y3_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X3_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
```
# Q4
```
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X4_train, y4_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X4_test)
mean_absolute_error(pred, y4_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X4_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
```
# Q5
```
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X5_train, y5_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X5_test)
mean_absolute_error(pred, y5_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X5_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
```
|
github_jupyter
|
# 라이브러리 import
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
from sklearn.model_selection import train_test_split
from lightgbm import LGBMClassifier, plot_importance
from matplotlib import pyplot as plt
from lightgbm import plot_importance
from lightgbm import plot_tree
from sklearn.metrics import classification_report
import lightgbm as lgbm
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
import warnings
import platform
from matplotlib import font_manager, rc
## 운영체제별 글꼴 세팅
path = "c:/Windows/Fonts/malgun.ttf"
if platform.system() == 'Darwin':
font_name = 'AppleGothic'
rc('font', family='AppleGothic')
elif platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname=path).get_name()
rc('font', family=font_name)
else:
font_name = font_manager.FontProperties(fname="/usr/share/fonts/nanumfont/NanumGothic.ttf")
rc('font', family="NanumGothic")
warnings.simplefilter(action='ignore')
df_cat = pd.read_csv("total_data(교통,인구,집값,어린이,벨포함).csv")
df_cat = df_cat[(df_cat["year"] != "2020(상반기)") & (df_cat["year"] != "2020(하반기)")]
X = df_cat[df_cat.columns[2:-5]]
y = df_cat[df_cat.columns[-5:]]
y1 = df_cat[["q1"]]
y2 = df_cat[["q2"]]
y3 = df_cat[["q3"]]
y4 = df_cat[["q4"]]
y5 = df_cat[["q5"]]
standard = StandardScaler()
standard.fit(X)
X_stand = standard.transform(X)
X_scale = pd.DataFrame(X_stand,columns = X.columns)
# train, test셋 구분
X_train = X_scale.loc[:164]
X1_train = X_train.drop(columns=["before_q2", "before_q3", "before_q4", "before_q5"])
X2_train = X_train.drop(columns=["before_q1", "before_q3", "before_q4", "before_q5"])
X3_train = X_train.drop(columns=["before_q1", "before_q1", "before_q4", "before_q5"])
X4_train = X_train.drop(columns=["before_q1", "before_q2", "before_q3", "before_q5"])
X5_train = X_train.drop(columns=["before_q1", "before_q2", "before_q3", "before_q4"])
X_test = X_scale.loc[164:]
X1_test = X_test.drop(columns=["before_q2", "before_q3", "before_q4", "before_q5"])
X2_test = X_test.drop(columns=["before_q1", "before_q3", "before_q4", "before_q5"])
X3_test = X_test.drop(columns=["before_q1", "before_q1", "before_q4", "before_q5"])
X4_test = X_test.drop(columns=["before_q1", "before_q2", "before_q3", "before_q5"])
X5_test = X_test.drop(columns=["before_q1", "before_q2", "before_q3", "before_q4"])
y1_train = y1.loc[:164]
y1_test = y1.loc[164:]
y2_train = y2.loc[:164]
y2_test = y2.loc[164:]
y3_train = y3.loc[:164]
y3_test = y3.loc[164:]
y4_train = y4.loc[:164]
y4_test = y4.loc[164:]
y5_train = y5.loc[:164]
y5_test = y5.loc[164:]
max_depth=7,
n_estimators=150
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X1_train, y1_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X1_test)
mean_absolute_error(pred, y1_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X1_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X2_train, y2_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X2_test)
mean_absolute_error(pred, y2_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X2_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X3_train, y3_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X3_test)
mean_absolute_error(pred, y3_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X3_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X4_train, y4_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X4_test)
mean_absolute_error(pred, y4_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X4_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
model = lgbm.LGBMRegressor()
para = {"max_depth" : [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], "n_estimators" : [30, 50, 100], "learning_rate" : [0.1, 0.01], "num_leaves" : range(30)}
grid = GridSearchCV(model, param_grid = para, cv = 3)
grid.fit(X5_train, y5_train.astype("int"))
best = grid.best_estimator_
best
pred = best.predict(X5_test)
mean_absolute_error(pred, y5_test)
cat_q1_full_fi = pd.DataFrame(best.feature_importances_,index=X5_train.columns)
cat_q1_full_fi.sort_values(by=0,ascending=False)[:30]
| 0.348313 | 0.627951 |
```
import os
import re
import numpy as np
import pandas as pd
import ujson as json
patient_ids = []
for filename in os.listdir('./raw'):
# the patient data in PhysioNet contains 6-digits
match = re.search('\d{6}', filename)
if match:
id_ = match.group()
patient_ids.append(id_)
out = pd.read_csv('./raw/Outcomes-a.txt').set_index('RecordID')['In-hospital_death']
# we select 35 attributes which contains enough non-values
attributes = ['DiasABP', 'HR', 'Na', 'Lactate', 'NIDiasABP', 'PaO2', 'WBC', 'pH', 'Albumin', 'ALT', 'Glucose', 'SaO2',
'Temp', 'AST', 'Bilirubin', 'HCO3', 'BUN', 'RespRate', 'Mg', 'HCT', 'SysABP', 'FiO2', 'K', 'GCS',
'Cholesterol', 'NISysABP', 'TroponinT', 'MAP', 'TroponinI', 'PaCO2', 'Platelets', 'Urine', 'NIMAP',
'Creatinine', 'ALP']
# mean and std of 35 attributes
mean = np.array([59.540976152469405, 86.72320413227443, 139.06972964987443, 2.8797765291788986, 58.13833409690321,
147.4835678885565, 12.670222585415166, 7.490957887101613, 2.922874149659863, 394.8899400819931,
141.4867570064675, 96.66380228136883, 37.07362841054398, 505.5576196473552, 2.906465787821709,
23.118951553526724, 27.413004968675743, 19.64795551193981, 2.0277491155660416, 30.692432164676188,
119.60137167841977, 0.5404785381886381, 4.135790642787733, 11.407767149315339, 156.51746031746032,
119.15012244292181, 1.2004983498349853, 80.20321011673151, 7.127188940092161, 40.39875518672199,
191.05877024038804, 116.1171573535279, 77.08923183026529, 1.5052390166989214, 116.77122488658458])
std = np.array(
[13.01436781437145, 17.789923096504985, 5.185595006246348, 2.5287518090506755, 15.06074282896952, 85.96290370390257,
7.649058756791069, 8.384743923130074, 0.6515057685658769, 1201.033856726966, 67.62249645388543, 3.294112002091972,
1.5604879744921516, 1515.362517984297, 5.902070316876287, 4.707600932877377, 23.403743427107095, 5.50914416318306,
0.4220051299992514, 5.002058959758486, 23.730556355204214, 0.18634432509312762, 0.706337033602292,
3.967579823394297, 45.99491531484596, 21.97610723063014, 2.716532297586456, 16.232515568438338, 9.754483687298688,
9.062327978713556, 106.50939503021543, 170.65318497610315, 14.856134327604906, 1.6369529387005546,
133.96778334724377])
fs = open('./json/json', 'w')
def to_time_bin(x):
h, m = map(int, x.split(':'))
return h
def parse_data(x):
x = x.set_index('Parameter').to_dict()['Value']
values = []
for attr in attributes:
if attr in x:
values.append(x[attr])
else:
values.append(np.nan)
return values
def parse_delta(masks, dir_):
if dir_ == 'backward':
masks = masks[::-1]
deltas = []
for h in range(48):
if h == 0:
deltas.append(np.ones(35))
else:
deltas.append(np.ones(35) + (1 - masks[h]) * deltas[-1])
return np.array(deltas)
def parse_rec(values, masks, evals, eval_masks, dir_):
deltas = parse_delta(masks, dir_)
# only used in GRU-D
forwards = pd.DataFrame(values).fillna(method='ffill').fillna(0.0).as_matrix()
rec = {}
rec['values'] = np.nan_to_num(values).tolist()
rec['masks'] = masks.astype('int32').tolist()
# imputation ground-truth
rec['evals'] = np.nan_to_num(evals).tolist()
rec['eval_masks'] = eval_masks.astype('int32').tolist()
rec['forwards'] = forwards.tolist()
rec['deltas'] = deltas.tolist()
return rec
def parse_id(id_):
data = pd.read_csv('./raw/{}.txt'.format(id_))
# accumulate the records within one hour
data['Time'] = data['Time'].apply(lambda x: to_time_bin(x))
evals = []
# merge all the metrics within one hour
for h in range(48):
evals.append(parse_data(data[data['Time'] == h]))
evals = (np.array(evals) - mean) / std
shp = evals.shape
evals = evals.reshape(-1)
# randomly eliminate 10% values as the imputation ground-truth
indices = np.where(~np.isnan(evals))[0].tolist()
indices = np.random.choice(indices, len(indices) // 10)
values = evals.copy()
values[indices] = np.nan
masks = ~np.isnan(values)
eval_masks = (~np.isnan(values)) ^ (~np.isnan(evals))
evals = evals.reshape(shp)
values = values.reshape(shp)
masks = masks.reshape(shp)
eval_masks = eval_masks.reshape(shp)
label = out.loc[int(id_)]
rec = {'label': label}
# prepare the model for both directions
rec['forward'] = parse_rec(values, masks, evals, eval_masks, dir_='forward')
rec['backward'] = parse_rec(values[::-1], masks[::-1], evals[::-1], eval_masks[::-1], dir_='backward')
rec = json.dumps(rec)
fs.write(rec + '\n')
test_id=(132539)
data = pd.read_csv('./raw/{}.txt'.format(test_id))
data['Time'] = data['Time'].apply(lambda x: to_time_bin(x))
data['Time']
evals = []
# merge all the metrics within one hour
for h in range(48):
evals.append(parse_data(data[data['Time'] == h]))
evals = (np.array(evals) - mean) / std
shp = evals.shape
evals = evals.reshape(-1)
# randomly eliminate 10% values as the imputation ground-truth
indices = np.where(~np.isnan(evals))[0].tolist()
indices = np.random.choice(indices, len(indices) // 10)
values = evals.copy()
values[indices] = np.nan
masks = ~np.isnan(values)
eval_masks = (~np.isnan(values)) ^ (~np.isnan(evals))
evals = evals.reshape(shp)
values = values.reshape(shp)
masks = masks.reshape(shp)
eval_masks = eval_masks.reshape(shp)
label = out.loc[int(test_id)]
rec = {}
rec['label']=int(label)
rec['forward'] = parse_rec(values, masks, evals, eval_masks, dir_='forward')
rec['backward'] = parse_rec(values[::-1], masks[::-1], evals[::-1], eval_masks[::-1], dir_='backward')
rec = json.dumps(rec)
for id_ in patient_ids:
print('Processing patient {}'.format(id_))
try:
parse_id(id_)
except Exception as e:
print(e)
continue
fs.close()
```
|
github_jupyter
|
import os
import re
import numpy as np
import pandas as pd
import ujson as json
patient_ids = []
for filename in os.listdir('./raw'):
# the patient data in PhysioNet contains 6-digits
match = re.search('\d{6}', filename)
if match:
id_ = match.group()
patient_ids.append(id_)
out = pd.read_csv('./raw/Outcomes-a.txt').set_index('RecordID')['In-hospital_death']
# we select 35 attributes which contains enough non-values
attributes = ['DiasABP', 'HR', 'Na', 'Lactate', 'NIDiasABP', 'PaO2', 'WBC', 'pH', 'Albumin', 'ALT', 'Glucose', 'SaO2',
'Temp', 'AST', 'Bilirubin', 'HCO3', 'BUN', 'RespRate', 'Mg', 'HCT', 'SysABP', 'FiO2', 'K', 'GCS',
'Cholesterol', 'NISysABP', 'TroponinT', 'MAP', 'TroponinI', 'PaCO2', 'Platelets', 'Urine', 'NIMAP',
'Creatinine', 'ALP']
# mean and std of 35 attributes
mean = np.array([59.540976152469405, 86.72320413227443, 139.06972964987443, 2.8797765291788986, 58.13833409690321,
147.4835678885565, 12.670222585415166, 7.490957887101613, 2.922874149659863, 394.8899400819931,
141.4867570064675, 96.66380228136883, 37.07362841054398, 505.5576196473552, 2.906465787821709,
23.118951553526724, 27.413004968675743, 19.64795551193981, 2.0277491155660416, 30.692432164676188,
119.60137167841977, 0.5404785381886381, 4.135790642787733, 11.407767149315339, 156.51746031746032,
119.15012244292181, 1.2004983498349853, 80.20321011673151, 7.127188940092161, 40.39875518672199,
191.05877024038804, 116.1171573535279, 77.08923183026529, 1.5052390166989214, 116.77122488658458])
std = np.array(
[13.01436781437145, 17.789923096504985, 5.185595006246348, 2.5287518090506755, 15.06074282896952, 85.96290370390257,
7.649058756791069, 8.384743923130074, 0.6515057685658769, 1201.033856726966, 67.62249645388543, 3.294112002091972,
1.5604879744921516, 1515.362517984297, 5.902070316876287, 4.707600932877377, 23.403743427107095, 5.50914416318306,
0.4220051299992514, 5.002058959758486, 23.730556355204214, 0.18634432509312762, 0.706337033602292,
3.967579823394297, 45.99491531484596, 21.97610723063014, 2.716532297586456, 16.232515568438338, 9.754483687298688,
9.062327978713556, 106.50939503021543, 170.65318497610315, 14.856134327604906, 1.6369529387005546,
133.96778334724377])
fs = open('./json/json', 'w')
def to_time_bin(x):
h, m = map(int, x.split(':'))
return h
def parse_data(x):
x = x.set_index('Parameter').to_dict()['Value']
values = []
for attr in attributes:
if attr in x:
values.append(x[attr])
else:
values.append(np.nan)
return values
def parse_delta(masks, dir_):
if dir_ == 'backward':
masks = masks[::-1]
deltas = []
for h in range(48):
if h == 0:
deltas.append(np.ones(35))
else:
deltas.append(np.ones(35) + (1 - masks[h]) * deltas[-1])
return np.array(deltas)
def parse_rec(values, masks, evals, eval_masks, dir_):
deltas = parse_delta(masks, dir_)
# only used in GRU-D
forwards = pd.DataFrame(values).fillna(method='ffill').fillna(0.0).as_matrix()
rec = {}
rec['values'] = np.nan_to_num(values).tolist()
rec['masks'] = masks.astype('int32').tolist()
# imputation ground-truth
rec['evals'] = np.nan_to_num(evals).tolist()
rec['eval_masks'] = eval_masks.astype('int32').tolist()
rec['forwards'] = forwards.tolist()
rec['deltas'] = deltas.tolist()
return rec
def parse_id(id_):
data = pd.read_csv('./raw/{}.txt'.format(id_))
# accumulate the records within one hour
data['Time'] = data['Time'].apply(lambda x: to_time_bin(x))
evals = []
# merge all the metrics within one hour
for h in range(48):
evals.append(parse_data(data[data['Time'] == h]))
evals = (np.array(evals) - mean) / std
shp = evals.shape
evals = evals.reshape(-1)
# randomly eliminate 10% values as the imputation ground-truth
indices = np.where(~np.isnan(evals))[0].tolist()
indices = np.random.choice(indices, len(indices) // 10)
values = evals.copy()
values[indices] = np.nan
masks = ~np.isnan(values)
eval_masks = (~np.isnan(values)) ^ (~np.isnan(evals))
evals = evals.reshape(shp)
values = values.reshape(shp)
masks = masks.reshape(shp)
eval_masks = eval_masks.reshape(shp)
label = out.loc[int(id_)]
rec = {'label': label}
# prepare the model for both directions
rec['forward'] = parse_rec(values, masks, evals, eval_masks, dir_='forward')
rec['backward'] = parse_rec(values[::-1], masks[::-1], evals[::-1], eval_masks[::-1], dir_='backward')
rec = json.dumps(rec)
fs.write(rec + '\n')
test_id=(132539)
data = pd.read_csv('./raw/{}.txt'.format(test_id))
data['Time'] = data['Time'].apply(lambda x: to_time_bin(x))
data['Time']
evals = []
# merge all the metrics within one hour
for h in range(48):
evals.append(parse_data(data[data['Time'] == h]))
evals = (np.array(evals) - mean) / std
shp = evals.shape
evals = evals.reshape(-1)
# randomly eliminate 10% values as the imputation ground-truth
indices = np.where(~np.isnan(evals))[0].tolist()
indices = np.random.choice(indices, len(indices) // 10)
values = evals.copy()
values[indices] = np.nan
masks = ~np.isnan(values)
eval_masks = (~np.isnan(values)) ^ (~np.isnan(evals))
evals = evals.reshape(shp)
values = values.reshape(shp)
masks = masks.reshape(shp)
eval_masks = eval_masks.reshape(shp)
label = out.loc[int(test_id)]
rec = {}
rec['label']=int(label)
rec['forward'] = parse_rec(values, masks, evals, eval_masks, dir_='forward')
rec['backward'] = parse_rec(values[::-1], masks[::-1], evals[::-1], eval_masks[::-1], dir_='backward')
rec = json.dumps(rec)
for id_ in patient_ids:
print('Processing patient {}'.format(id_))
try:
parse_id(id_)
except Exception as e:
print(e)
continue
fs.close()
| 0.291384 | 0.337258 |
Lambda School Data Science
*Unit 2, Sprint 2, Module 2*
---
# Random Forests
## Assignment
- [x] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.
- [x] Continue to participate in our Kaggle challenge.
- [x] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features.
- [x] Try Ordinal Encoding.
- [x] Try a Random Forest Classifier.
- [x] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
- [x] Commit your notebook to your fork of the GitHub repo.
## Stretch Goals
### Doing
- [ ] Add your own stretch goal(s) !
- [x] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.
- [x] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/).
- [x] Get and plot your feature importances.
- [x] Make visualizations and share on Slack.
### Reading
Top recommendations in _**bold italic:**_
#### Decision Trees
- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_
- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)
- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)
- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)
- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU)
#### Random Forests
- [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods
- [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)
- _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_
#### Categorical encoding for trees
- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)
- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)
- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_
- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_
- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)
- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html)
#### Imposter Syndrome
- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)
- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)
- ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html)
- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_
### More Categorical Encodings
**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:
- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.
- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).
- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).
- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).
**2.** The short video
**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.
Category Encoders has multiple implementations of this general concept:
- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)
- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)
- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)
- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)
- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)
- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)
Category Encoder's mean encoding implementations work for regression problems or binary classification problems.
For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:
```python
encoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting
X_train_encoded = encoder.fit_transform(X_train, y_train=='functional')
X_val_encoded = encoder.transform(X_train, y_val=='functional')
```
For this reason, mean encoding won't work well within pipelines for multi-class classification problems.
**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.
```python
dirty_cat.TargetEncoder(clf_type='multiclass-clf')
```
It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).
However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.
**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals.
_**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categoricals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_
### Setup
You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
```
## Imports
```
!pip install category_encoders
# Import
%matplotlib inline
import numpy as np
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.pipeline import make_pipeline
import category_encoders as ce
```
## Separate Train into Train and Validation
```
# Train/Validate/Test split the data
train, val = train_test_split(
train, train_size=0.80,
test_size=0.20,
stratify=train['status_group'],
random_state=42
)
train.shape, val.shape, test.shape
```
## Data Exploration
```
train.groupby('quality_group')['status_group'].value_counts(normalize=True)
#seems that good and functional correlate
train.groupby('quantity')['status_group'].value_counts(normalize=True)
#Category enough also seems to be an indicator of functionality
```
## Feature Engineerng/Wrangle
```
from math import radians, cos, sin, asin, sqrt
def single_pt_haversine(lat, lng, degrees=True):
"""
'Single-point' Haversine: Calculates the great circle distance
between a point on Earth and the (0, 0) lat-long coordinate
"""
r = 6371 # Earth's radius (km). Have r = 3956 if you want miles
# Convert decimal degrees to radians
if degrees:
lat, lng = map(radians, [lat, lng])
# 'Single-point' Haversine formula
a = sin(lat/2)**2 + cos(lat) * sin(lng/2)**2
d = 2 * r * asin(sqrt(a))
return d
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'population', 'gps_height']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
# Drop recorded_by (all the same), id (can't glean useful info), num_private(mostly 0s)
unused = ['recorded_by']
X = X.drop(columns=unused)
# Changing region and district code into categories rather than ints
X['region_code'] = X['region_code'].astype('str')
X['district_code'] = X['district_code'].astype('str')
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
#binning construction_years
cut_bins = [0, 1, 1960, 1970, 1980, 1990, 2000, 2010, 2013]
X['construction_bin'] = pd.cut(x=X['construction_year'], bins= cut_bins, include_lowest=True, duplicates='raise').values.add_categories('missing')
X['construction_bin']=X['construction_bin'].fillna('missing')
# Engineer feature: how many years from construction_year to date_recorded
X['years_till_check'] = X['year_recorded'] - X['construction_year']
# Engineer feature: Age of waterpoint
X['age'] = 2013 - X['construction_year']
# Engineer feature: Water quality and quantity
X['good_enough'] = ((X['quality_group'] == 'good') & (X['quantity'] == 'enough')).astype(int)
# Engineer feature: Using lat and long features
# Trying two ways to use the lat and long - haversine and
# https://datascience.stackexchange.com/questions/13567/ways-to-deal-with-longitude-latitude-feature
# see which one works better for model
X['x'] = np.cos(X['latitude']) * np.cos(X['longitude'])
X['y'] = np.cos(X['latitude']) * np.sin(X['longitude'])
X['z'] = np.sin(X['latitude'])
# Engineer feature: Haversine point
X['harvesine_distance'] = [single_pt_haversine(latitude, longitude) for latitude, longitude in zip(X['latitude'], X['longitude'])]
# Eeturn the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
train.head()
```
## Models
### Features
```
# Select features
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.drop('id').tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality.index.tolist()
# Combine the lists
features = numeric_features + categorical_features
print(features)
print(len(features))
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
```
### Ordinal Encoding and Random Forest Classifier
```
# Random forest model
rf1 = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators = 100,
random_state=42,
n_jobs=-1)
)
#Fit on train, score on val
rf1.fit(X_train, y_train)
# Val accuracy
print('Train Accuracy', rf1.score(X_train, y_train))
print('Validation Accuracy', rf1.score(X_val, y_val))
rf1.named_steps
# Plot the feature importances
model = rf1.named_steps['randomforestclassifier']
encoder = rf1.named_steps['ordinalencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='pink');
```
### Binary Encoder and Random Forest Classifier
```
# Random forest model
rf2 = make_pipeline(
ce.BinaryEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators = 100,
random_state=42,
n_jobs=-1)
)
#Fit on train, score on val
rf2.fit(X_train, y_train)
# Val accuracy
print('Train Accuracy', rf2.score(X_train, y_train))
print('Validation Accuracy', rf2.score(X_val, y_val))
#cannot really make much changes to pipeline because it keeps crashing
#could not try OneHotEncoder, IterativeImputer or modify RandomForest
rf2.named_steps
# Plot the feature importances
model = rf2.named_steps['randomforestclassifier']
encoder = rf2.named_steps['binaryencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='green');
```
### Binary Encoder and XGBoost
```
%%time
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
xg = make_pipeline(
ce.BinaryEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg.fit(X_train, y_train)
# Score on val
print('Train Accuracy:', xg.score(X_train, y_train))
print('Validation Accuracy:', xg.score(X_val, y_val))
# Plot the feature importances
model = xg.named_steps['xgbclassifier']
encoder = xg.named_steps['binaryencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='grey');
```
### Ordinal Encoding and XGBoost
```
%%time
xg1 = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(max_depth=20, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg1.fit(X_train, y_train)
# Score on val
print('Train Accuracy:', xg1.score(X_train, y_train))
print('Validation Accuracy:', xg1.score(X_val, y_val))
# Plot the feature importances
model = xg1.named_steps['xgbclassifier']
encoder = xg1.named_steps['ordinalencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color= 'blue');
```
## All Features
```
target = 'status_group'
X_train1 = train.drop(columns = target)
y_train1 = train[target]
X_val1 = val.drop(columns = target)
y_val1 = val[target]
X_test1 = test
```
### Ordinal Encoding and Random Forest Classifier
```
rf3 = make_pipeline(
ce.ordinal.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=100,
n_jobs=-1,
random_state=42)
)
rf3.fit(X_train1, y_train1)
print(f'Train accuracy:{rf3.score(X_train1, y_train1)}')
print(f'Validation accuracy:{rf3.score(X_val1, y_val1)}')
```
### Binary Encoding and Random Forest Classifier
```
rf4 = make_pipeline(
ce.BinaryEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=100,
n_jobs=-1,
random_state=42)
)
rf4.fit(X_train1, y_train1)
print(f'Train accuracy:{rf4.score(X_train1, y_train1)}')
print(f'Validation accuracy:{rf4.score(X_val1, y_val1)}')
```
### Ordinal Encoding and XGBoost
```
%%time
xg3 = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg3.fit(X_train1, y_train1)
# Score on val
print('Train Accuracy:', xg3.score(X_train1, y_train1))
print('Validation Accuracy:', xg3.score(X_val1, y_val1))
```
### Binary Encoding and XGBoost
```
%%time
xg4 = make_pipeline(
ce.BinaryEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg4.fit(X_train1, y_train1)
# Score on val
print('Train Accuracy:', xg4.score(X_train1, y_train1))
print('Validation Accuracy:', xg4.score(X_val1, y_val1))
assert all(X_train.columns == X_test.columns)
# Predict on test
y_pred = xg.predict(X_test)
# #Write a submission csv file
# submission = sample_submission.copy()
# submission['status_group'] = y_pred
# submission.to_csv('Obukwelu_DSPT5_Unit_2_submission2.csv', index=False)
# submission.head()
# submission.shape
```
|
github_jupyter
|
encoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) # Both parameters > 1 to avoid overfitting
X_train_encoded = encoder.fit_transform(X_train, y_train=='functional')
X_val_encoded = encoder.transform(X_train, y_val=='functional')
dirty_cat.TargetEncoder(clf_type='multiclass-clf')
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
!pip install category_encoders
# Import
%matplotlib inline
import numpy as np
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer, SimpleImputer
from sklearn.pipeline import make_pipeline
import category_encoders as ce
# Train/Validate/Test split the data
train, val = train_test_split(
train, train_size=0.80,
test_size=0.20,
stratify=train['status_group'],
random_state=42
)
train.shape, val.shape, test.shape
train.groupby('quality_group')['status_group'].value_counts(normalize=True)
#seems that good and functional correlate
train.groupby('quantity')['status_group'].value_counts(normalize=True)
#Category enough also seems to be an indicator of functionality
from math import radians, cos, sin, asin, sqrt
def single_pt_haversine(lat, lng, degrees=True):
"""
'Single-point' Haversine: Calculates the great circle distance
between a point on Earth and the (0, 0) lat-long coordinate
"""
r = 6371 # Earth's radius (km). Have r = 3956 if you want miles
# Convert decimal degrees to radians
if degrees:
lat, lng = map(radians, [lat, lng])
# 'Single-point' Haversine formula
a = sin(lat/2)**2 + cos(lat) * sin(lng/2)**2
d = 2 * r * asin(sqrt(a))
return d
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# Prevent SettingWithCopyWarning
X = X.copy()
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these values like zero.
X['latitude'] = X['latitude'].replace(-2e-08, 0)
# When columns have zeros and shouldn't, they are like null values.
# So we will replace the zeros with nulls, and impute missing values later.
cols_with_zeros = ['longitude', 'latitude', 'construction_year',
'population', 'gps_height']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
X[col+'_MISSING'] = X[col].isnull()
# Drop duplicate columns
duplicates = ['quantity_group', 'payment_type']
X = X.drop(columns=duplicates)
# Drop recorded_by (all the same), id (can't glean useful info), num_private(mostly 0s)
unused = ['recorded_by']
X = X.drop(columns=unused)
# Changing region and district code into categories rather than ints
X['region_code'] = X['region_code'].astype('str')
X['district_code'] = X['district_code'].astype('str')
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
#binning construction_years
cut_bins = [0, 1, 1960, 1970, 1980, 1990, 2000, 2010, 2013]
X['construction_bin'] = pd.cut(x=X['construction_year'], bins= cut_bins, include_lowest=True, duplicates='raise').values.add_categories('missing')
X['construction_bin']=X['construction_bin'].fillna('missing')
# Engineer feature: how many years from construction_year to date_recorded
X['years_till_check'] = X['year_recorded'] - X['construction_year']
# Engineer feature: Age of waterpoint
X['age'] = 2013 - X['construction_year']
# Engineer feature: Water quality and quantity
X['good_enough'] = ((X['quality_group'] == 'good') & (X['quantity'] == 'enough')).astype(int)
# Engineer feature: Using lat and long features
# Trying two ways to use the lat and long - haversine and
# https://datascience.stackexchange.com/questions/13567/ways-to-deal-with-longitude-latitude-feature
# see which one works better for model
X['x'] = np.cos(X['latitude']) * np.cos(X['longitude'])
X['y'] = np.cos(X['latitude']) * np.sin(X['longitude'])
X['z'] = np.sin(X['latitude'])
# Engineer feature: Haversine point
X['harvesine_distance'] = [single_pt_haversine(latitude, longitude) for latitude, longitude in zip(X['latitude'], X['longitude'])]
# Eeturn the wrangled dataframe
return X
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
train.head()
# Select features
# The status_group column is the target
target = 'status_group'
# Get a dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# Get a list of the numeric features
numeric_features = train_features.select_dtypes(include='number').columns.drop('id').tolist()
# Get a series with the cardinality of the nonnumeric features
cardinality = train_features.select_dtypes(exclude='number').nunique()
# Get a list of all categorical features with cardinality <= 50
categorical_features = cardinality.index.tolist()
# Combine the lists
features = numeric_features + categorical_features
print(features)
print(len(features))
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# Random forest model
rf1 = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators = 100,
random_state=42,
n_jobs=-1)
)
#Fit on train, score on val
rf1.fit(X_train, y_train)
# Val accuracy
print('Train Accuracy', rf1.score(X_train, y_train))
print('Validation Accuracy', rf1.score(X_val, y_val))
rf1.named_steps
# Plot the feature importances
model = rf1.named_steps['randomforestclassifier']
encoder = rf1.named_steps['ordinalencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='pink');
# Random forest model
rf2 = make_pipeline(
ce.BinaryEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators = 100,
random_state=42,
n_jobs=-1)
)
#Fit on train, score on val
rf2.fit(X_train, y_train)
# Val accuracy
print('Train Accuracy', rf2.score(X_train, y_train))
print('Validation Accuracy', rf2.score(X_val, y_val))
#cannot really make much changes to pipeline because it keeps crashing
#could not try OneHotEncoder, IterativeImputer or modify RandomForest
rf2.named_steps
# Plot the feature importances
model = rf2.named_steps['randomforestclassifier']
encoder = rf2.named_steps['binaryencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='green');
%%time
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
xg = make_pipeline(
ce.BinaryEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg.fit(X_train, y_train)
# Score on val
print('Train Accuracy:', xg.score(X_train, y_train))
print('Validation Accuracy:', xg.score(X_val, y_val))
# Plot the feature importances
model = xg.named_steps['xgbclassifier']
encoder = xg.named_steps['binaryencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color='grey');
%%time
xg1 = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(max_depth=20, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg1.fit(X_train, y_train)
# Score on val
print('Train Accuracy:', xg1.score(X_train, y_train))
print('Validation Accuracy:', xg1.score(X_val, y_val))
# Plot the feature importances
model = xg1.named_steps['xgbclassifier']
encoder = xg1.named_steps['ordinalencoder']
encoded_columns = encoder.fit_transform(X_train).columns
importances = pd.Series(model.feature_importances_, encoded_columns)
plt.figure(figsize=(20,30))
importances.sort_values().plot.barh(color= 'blue');
target = 'status_group'
X_train1 = train.drop(columns = target)
y_train1 = train[target]
X_val1 = val.drop(columns = target)
y_val1 = val[target]
X_test1 = test
rf3 = make_pipeline(
ce.ordinal.OrdinalEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=100,
n_jobs=-1,
random_state=42)
)
rf3.fit(X_train1, y_train1)
print(f'Train accuracy:{rf3.score(X_train1, y_train1)}')
print(f'Validation accuracy:{rf3.score(X_val1, y_val1)}')
rf4 = make_pipeline(
ce.BinaryEncoder(),
SimpleImputer(),
RandomForestClassifier(n_estimators=100,
n_jobs=-1,
random_state=42)
)
rf4.fit(X_train1, y_train1)
print(f'Train accuracy:{rf4.score(X_train1, y_train1)}')
print(f'Validation accuracy:{rf4.score(X_val1, y_val1)}')
%%time
xg3 = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg3.fit(X_train1, y_train1)
# Score on val
print('Train Accuracy:', xg3.score(X_train1, y_train1))
print('Validation Accuracy:', xg3.score(X_val1, y_val1))
%%time
xg4 = make_pipeline(
ce.BinaryEncoder(),
XGBClassifier(max_depth=10, eta=0.3, n_estimators=100,
n_jobs=-1, min_child_weight=1, subsample=0.9,
colsample_bytree=0.9, objective='multi:softmax',
eval_metric='merror', gamma=0, scale_pos_weight=1,
seed=42, verbose_eval=True)
)
# Fit on train
xg4.fit(X_train1, y_train1)
# Score on val
print('Train Accuracy:', xg4.score(X_train1, y_train1))
print('Validation Accuracy:', xg4.score(X_val1, y_val1))
assert all(X_train.columns == X_test.columns)
# Predict on test
y_pred = xg.predict(X_test)
# #Write a submission csv file
# submission = sample_submission.copy()
# submission['status_group'] = y_pred
# submission.to_csv('Obukwelu_DSPT5_Unit_2_submission2.csv', index=False)
# submission.head()
# submission.shape
| 0.592431 | 0.978447 |
<img src="./intro_images/MIE.PNG" width="100%" align="left" />
<table style="float:right;">
<tr>
<td>
<div style="text-align: right"><a href="https://alandavies.netlify.com" target="_blank">Dr Alan Davies</a></div>
<div style="text-align: right">Senior Lecturer Health Data Science</div>
<div style="text-align: right">University of Manchester</div>
</td>
<td>
<img src="./intro_images/alan.PNG" width="30%" />
</td>
</tr>
</table>
# 10.0 Object oriented programming
****
#### About this Notebook
This notebook introduces the Object Oriented Programming (OOP) paradigm in Python. This allows us to encapsulate variables, functions and other data structures in a single overarching reusable data structure that can model real world objects.
<div class="alert alert-block alert-warning"><b>Learning Objectives:</b>
<br/> At the end of this notebook you will be able to:
- Investigate key features of OOP in Python
- Practice creating classes and objects in Python
</div>
Python also supports the <code>Object Orientated Programming</code> (OOP) paradigm (as well as imperative, functional, procedural). This is essentially a way of storing multiple functions and variables that are in some way semantically related together in an overarching data structure.
Consider building a system that could model health interactions. We could create <code>objects</code> to represent the key elements of this system such as doctors, nurses and patients. To do this we can design a <code>class</code> for each of these that <code>encapsulates</code> various functions (called methods) and variables (attributes). Let's start by building a basic class for a doctor.
```
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
```
So here is a basic class containing one method called <code>__init__()</code> (2 underscores, the word init and 2 more underscores) that takes some parameters for the type of doctor and their name and stores these in variables inside the class. A class is like a blueprint where an object is like a specific instance. This would be like having class <code>phone</code> and then an instance of this called <code>iPhone</code> or a class called <code>car</code> with an instance called <code>Mini</code>.
<div class="alert alert-success">
<b>Note:</b> Class names tend to start with capital letters to distinguish them from other variables and functions.
</div>
The <code>__init__()</code> function is what is known as the class <code>constructor</code>. You can think of this as a default initialisation function that gets run when you create an instance of the object. The constructor doesn't have to have any values other than the <code>self</code> keyword. In the other methods the <code>self</code> keyword is used to tell Python that we are referring to the variable in the instance of the object (it's own copy of the variable).
Here we can create 2 instances of the <code>Doctor</code> class and customise their parameters.
```
my_doctor = Doctor("Sandra Clark", "Cardiac consultant")
another_doctor = Doctor("Mike Smith", "Respiratory F1")
```
Here we have made 2 instances of our doctor class (like 2 copies that we can then customize). Another way to think about classes and instances is that the class is like the blueprint you give to a machine in the factory to manufacture some item. It specifies the item. When the machine starts to produce items (instances) the items can then be customized. Imagine a machine that makes a certain type of car. Once manufactured, each car can be customized. Maybe you spray them a different colour for example.
<br /><br />
Lets add some methods to the class:
```
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient):
print(self.name, "will diagnose patient", patient)
self.process_patient()
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
patient_1 = "Alan"
patient_2 = "Jane"
sandra = Doctor("Sandra Clark", "Cardiac consultant")
mike = Doctor("Mike Smith", "Respiratory F1")
sandra.admit_patient(patient_1)
sandra.diagnose_patient(patient_1)
sandra.discharge_patient(patient_1)
mike.admit_patient(patient_2)
mike.discharge_patient(patient_2)
print("Sandra processed patients", sandra.number_of_times_patients_processed(), "times")
print("Mike processed patients", mike.number_of_times_patients_processed(), "times")
```
To access methods in the class we type the object instance name (i.e. <code>sandra</code>) followed by a dot (period) <code>.</code> and then the method we want to call i.e. <code>admit_patient()</code>. We also have to use the <code>self</code> keyword before variables and functions contained within a class to tell Python that they belong to this particular instance of the class.
<div class="alert alert-block alert-info">
<b>Task 1:</b>
<br>
1. Write a method in the class called <code>current_role</code> that outputs the doctors role.<br />
2. Create a new doctor instance called <code>mary</code> and call the new method.<br />
3. Make Mary a <code>nephrologist</code> (kidney doctor)
</div>
```
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient):
print(self.name, "will diagnose patient", patient)
self.process_patient()
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
def current_role(self):
print("My role is:", self.role)
mary = Doctor("Mary", "Nephrologist")
mary.current_role()
```
Another way we can represent classes and design them/show interactions between them is by making a class diagram:
<img src="./intro_images/doctor.PNG" width="500" />
The diagram shows the class name at the top followed by the attributes (variables) and what data type they represent. The next section shows the class methods and their inputs.
So far we have been representing our patients as simple strings. Let's make a patient class so that it can interact with our doctor class. We can give the patients a name, age, hospital number, presenting problem, diagnosis and past medical history.
```
class Patient:
def __init__(self, name, hospital_number, presenting_complaint):
self.name = name
self.hospital_number = hospital_number
self.presenting_complaint = presenting_complaint
self.PMH = []
self.diagnosis = None
def add_medical_history(self, medical_history_item):
self.PMH.append(medical_history_item)
def get_medical_history(self):
return self.PMH
def show_diagnosis(self):
return self.diagnosis
def update_diagnosis(self, diagnosis):
self.diagnosis = diagnosis
def whats_wrong(self):
return self.presenting_complaint
```
Now let's make some patients and give them some past and current medical problems.
```
john = Patient("John Miles", 123456, "Abdominal pain")
john.add_medical_history("Gout")
john.add_medical_history("IHD")
john.add_medical_history("MS")
jane = Patient("Jane Smith", 344532, "Chest pain")
jane.add_medical_history("Hypertension")
jane.add_medical_history("Type II diabetes")
print(john.get_medical_history())
print(jane.get_medical_history())
```
Now let's update our doctor class to work better with our patient class.
```
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient, presenting_complaint):
diagnosis = ""
print(self.name, "will diagnose patient", patient)
self.process_patient()
if presenting_complaint == "Abdominal pain":
diagnosis = "Gall stones"
elif presenting_complaint == "Chest pain":
diagnosis = "Myocardial infarction (heart attack)"
else:
diagnosis = "Unknown - need to run more tests"
return diagnosis
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
```
<div class="alert alert-block alert-info">
<b>Task 2:</b>
<br>
1. Write a method in the class called <code>discharge_patient</code> that takes <code>patient</code> as a parameter<br />
2. Print the doctors name and state they will discharge the patient.<br />
3. Call the <code>process_patient()</code> function
</div>
```
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient, presenting_complaint):
diagnosis = ""
print(self.name, "will diagnose patient", patient)
self.process_patient()
if presenting_complaint == "Abdominal pain":
diagnosis = "Gall stones"
elif presenting_complaint == "Chest pain":
diagnosis = "Myocardial infarction (heart attack)"
else:
diagnosis = "Unknown - need to run more tests"
return diagnosis
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
```
Now lets use the class
```
print("John's diagnosis =", john.show_diagnosis())
mike = Doctor("Mike Smith", "Respiratory F1")
mike.admit_patient(john.name)
john.update_diagnosis(mike.diagnose_patient(john.name, john.whats_wrong()))
print("John's diagnosis =", john.show_diagnosis())
```
Hopefully you can start to see how we could continue to build this up into a more complex and interconnected system that we could use to start modeling things and processes in real life. There are 4 main principles of OOP and these include:
<ul>
<li><strong>Encapsulation:</strong> Storing the data and methods of an object such that they are invisible and inaccessible to unauthorized parties</li>
<li><strong>Abstraction:</strong> An abstract representation of a thing. The inner workings are hidden and are not essential to know in order to interact with the object</li>
<li><strong>Inheritance:</strong> Reusing and extending existing code to make something more specific. i.e. a <code>surgeon</code> may be based on a super class of <code>doctor</code> inheriting its methods and attributes and extending them with surgeon specific features</li>
<li><strong>Polymorphism:</strong> Used to process data differently depending on the input and redefine methods for a derived class </li>
</ul>
We have already been using <code>encapsulation</code> and <code>abstraction</code> in the previous examples. But let's look at using <code>inheritance</code> with an example of making a surgeon from our doctor class.
```
class Surgeon(Doctor):
def do_brain_surgery(self, patient):
print(self.name, "will do a frontal lobectomy on patient", patient)
self.process_patient()
barry = Surgeon("Barry Anderton", "Brain surgeon")
barry.admit_patient(john.name)
barry.do_brain_surgery(john.name)
```
As you can see, the new <code>Surgeon</code> class has all the functionally of our <code>Doctor</code> class but with the addition of a method that allows them to carry out a particular surgical procedure. This way we could continue to build up a series of doctors like radiologists, GP's and so on, all of which have the basic doctor functions with a role specific version unique to themselves.
<div class="alert alert-block alert-info">
<b>Task 3:</b>
<br>
1. Create a <code>Radiologist</code> class that extends the <code>Doctor</code> class. Give them 2 methods:<br />
2. <code>do_xray</code> and <code>do_MRI</code>.<br />
3. Create 2 instances of the Radiologist class with each calling one of the 2 methods on <code>Jane</code> and <code>John</code>.
</div>
```
class Radiologist(Doctor):
def do_xray(self, patient):
print(self.name, "Will do an x-ray on patient", patient)
self.process_patient()
def do_MRI(self, patient):
print(self.name, "Will do an MRI scan on patient", patient)
self.process_patient()
norman = Radiologist("Norman Sanders", "Radiologist")
norman.admit_patient(jane.name)
norman.do_xray(jane.name)
sarah = Radiologist("Sarah Mullroy", "Radiologist")
sarah.admit_patient(john.name)
sarah.do_MRI(john.name)
```
<div class="alert alert-block alert-info">
<b>Task 4:</b>
<br>
1. Create a new class for another healthcare professional of your choice (i.e. Nurse, Paramedic, Physio, ...)<br />
2. Think about what methods they might have and implement them<br />
3. Test out your new class by making it interact with our existing <code>Doctor</code> and <code>Patient</code> classes.
</div>
The next notebook looks at going beyond notebooks and introduces <code>Integrated Development Environments</code> (IDE's). We also take a look at using Python for analysis.
### Notebook details
<br>
<i>Notebook created by <strong>Dr. Alan Davies</strong>.
<br>
© Alan Davies 2021
## Notes:
|
github_jupyter
|
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
my_doctor = Doctor("Sandra Clark", "Cardiac consultant")
another_doctor = Doctor("Mike Smith", "Respiratory F1")
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient):
print(self.name, "will diagnose patient", patient)
self.process_patient()
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
patient_1 = "Alan"
patient_2 = "Jane"
sandra = Doctor("Sandra Clark", "Cardiac consultant")
mike = Doctor("Mike Smith", "Respiratory F1")
sandra.admit_patient(patient_1)
sandra.diagnose_patient(patient_1)
sandra.discharge_patient(patient_1)
mike.admit_patient(patient_2)
mike.discharge_patient(patient_2)
print("Sandra processed patients", sandra.number_of_times_patients_processed(), "times")
print("Mike processed patients", mike.number_of_times_patients_processed(), "times")
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient):
print(self.name, "will diagnose patient", patient)
self.process_patient()
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
def current_role(self):
print("My role is:", self.role)
mary = Doctor("Mary", "Nephrologist")
mary.current_role()
class Patient:
def __init__(self, name, hospital_number, presenting_complaint):
self.name = name
self.hospital_number = hospital_number
self.presenting_complaint = presenting_complaint
self.PMH = []
self.diagnosis = None
def add_medical_history(self, medical_history_item):
self.PMH.append(medical_history_item)
def get_medical_history(self):
return self.PMH
def show_diagnosis(self):
return self.diagnosis
def update_diagnosis(self, diagnosis):
self.diagnosis = diagnosis
def whats_wrong(self):
return self.presenting_complaint
john = Patient("John Miles", 123456, "Abdominal pain")
john.add_medical_history("Gout")
john.add_medical_history("IHD")
john.add_medical_history("MS")
jane = Patient("Jane Smith", 344532, "Chest pain")
jane.add_medical_history("Hypertension")
jane.add_medical_history("Type II diabetes")
print(john.get_medical_history())
print(jane.get_medical_history())
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient, presenting_complaint):
diagnosis = ""
print(self.name, "will diagnose patient", patient)
self.process_patient()
if presenting_complaint == "Abdominal pain":
diagnosis = "Gall stones"
elif presenting_complaint == "Chest pain":
diagnosis = "Myocardial infarction (heart attack)"
else:
diagnosis = "Unknown - need to run more tests"
return diagnosis
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
class Doctor:
def __init__(self, name, role):
self.name = name
self.role = role
self.patients_processed = 0
def admit_patient(self, patient):
print(self.name, "will admit patient", patient)
self.process_patient()
def diagnose_patient(self, patient, presenting_complaint):
diagnosis = ""
print(self.name, "will diagnose patient", patient)
self.process_patient()
if presenting_complaint == "Abdominal pain":
diagnosis = "Gall stones"
elif presenting_complaint == "Chest pain":
diagnosis = "Myocardial infarction (heart attack)"
else:
diagnosis = "Unknown - need to run more tests"
return diagnosis
def discharge_patient(self, patient):
print(self.name, "will discharge patient", patient)
self.process_patient()
def process_patient(self):
self.patients_processed += 1
def number_of_times_patients_processed(self):
return self.patients_processed
print("John's diagnosis =", john.show_diagnosis())
mike = Doctor("Mike Smith", "Respiratory F1")
mike.admit_patient(john.name)
john.update_diagnosis(mike.diagnose_patient(john.name, john.whats_wrong()))
print("John's diagnosis =", john.show_diagnosis())
class Surgeon(Doctor):
def do_brain_surgery(self, patient):
print(self.name, "will do a frontal lobectomy on patient", patient)
self.process_patient()
barry = Surgeon("Barry Anderton", "Brain surgeon")
barry.admit_patient(john.name)
barry.do_brain_surgery(john.name)
class Radiologist(Doctor):
def do_xray(self, patient):
print(self.name, "Will do an x-ray on patient", patient)
self.process_patient()
def do_MRI(self, patient):
print(self.name, "Will do an MRI scan on patient", patient)
self.process_patient()
norman = Radiologist("Norman Sanders", "Radiologist")
norman.admit_patient(jane.name)
norman.do_xray(jane.name)
sarah = Radiologist("Sarah Mullroy", "Radiologist")
sarah.admit_patient(john.name)
sarah.do_MRI(john.name)
| 0.436622 | 0.981997 |
```
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
```
### Create placeholder
```
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
```
### Create generator
```
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
h1 = tf.layers.dense(z, n_units, activation=None)
h1 = tf.maximum(alpha * h1, h1) # Leaky ReLU
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
```
### Create discriminator
```
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
h1 = tf.layers.dense(x, n_units, activation=None)
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
```
### Initialize hyper parameters
```
input_size = 784
z_size = 100
g_hidden_size = 128
d_hidden_size = 128
alpha = 0.01
smooth = 0.1
```
### Define the graph of GAN model
```
tf.reset_default_graph()
input_real, input_z = model_inputs(input_size, z_size)
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
```
### define loss functions
```
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
```
### Define a learning rate
```
learning_rate = 0.002
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_train_optimize = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
d_train_optimize = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
```
### Check the batch data
```
batch_size = 100
batch = mnist.train.next_batch(batch_size)
batch
batch[0][0]
batch[1][0]
```
### Implement training
```
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for i in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images * 2 - 1
# Generator
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
_ = sess.run(d_train_optimize, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_optimize, feed_dict={input_z: batch_z})
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print('エポック数 {}/{}'.format(e + 1, epochs),
'D ロス: {:.4f}'.format(train_loss_d),
'G ロス: {:.4f}'.format(train_loss_g))
losses.append((train_loss_d, train_loss_g))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
with open('training_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
```
### Show the graph
```
fig, ax = plt.subplots()
losses = np.array(losses)
ax.plot(losses.T[0], label='D')
ax.plot(losses.T[1], label='G')
plt.title('Train Loss')
plt.legend()
```
### Convert it to images
```
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7, 7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
return fig, axes
with open('training_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7, 12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
```
### Generate new images from checkpoint file (new image generation)
```
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
```
### Try the program written above
```
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
```
|
github_jupyter
|
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
h1 = tf.layers.dense(z, n_units, activation=None)
h1 = tf.maximum(alpha * h1, h1) # Leaky ReLU
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
h1 = tf.layers.dense(x, n_units, activation=None)
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
input_size = 784
z_size = 100
g_hidden_size = 128
d_hidden_size = 128
alpha = 0.01
smooth = 0.1
tf.reset_default_graph()
input_real, input_z = model_inputs(input_size, z_size)
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
learning_rate = 0.002
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_train_optimize = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
d_train_optimize = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
batch_size = 100
batch = mnist.train.next_batch(batch_size)
batch
batch[0][0]
batch[1][0]
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for i in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images * 2 - 1
# Generator
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
_ = sess.run(d_train_optimize, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_optimize, feed_dict={input_z: batch_z})
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print('エポック数 {}/{}'.format(e + 1, epochs),
'D ロス: {:.4f}'.format(train_loss_d),
'G ロス: {:.4f}'.format(train_loss_g))
losses.append((train_loss_d, train_loss_g))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
with open('training_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
fig, ax = plt.subplots()
losses = np.array(losses)
ax.plot(losses.T[0], label='D')
ax.plot(losses.T[1], label='G')
plt.title('Train Loss')
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7, 7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
return fig, axes
with open('training_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7, 12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
| 0.761893 | 0.809389 |
```
import pandas as pd
import numpy as np
import pickle
repository_info = pd.read_csv("../../data/processed/final_repo_english_whatwhy.csv")
```
# Export rankings for user study
## Top 500 pairs
```
with open("../../rankings/ranking_nmf_45_mult.pickle", "rb") as f:
ranking_nmf_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_multiplicative[1][0:500]],
"score" : ranking_nmf_multiplicative[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_multiplicative_top500.csv")
with open("../../rankings/ranking_lda_45_mult.pickle", "rb") as f:
ranking_lda_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_multiplicative[1][0:500]],
"score" : ranking_lda_multiplicative[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_multiplicative_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr00.pickle", "rb") as f:
ranking_nmf_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d00[1][0:500]],
"score" : ranking_nmf_rwr_d00[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d00_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr01.pickle", "rb") as f:
ranking_nmf_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d01[1][0:500]],
"score" : ranking_nmf_rwr_d01[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr02.pickle", "rb") as f:
ranking_nmf_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d02[1][0:500]],
"score" : ranking_nmf_rwr_d02[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr03.pickle", "rb") as f:
ranking_nmf_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d03[1][0:500]],
"score" : ranking_nmf_rwr_d03[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d03_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr05.pickle", "rb") as f:
ranking_nmf_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d05[1][0:500]],
"score" : ranking_nmf_rwr_d05[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d05_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr08.pickle", "rb") as f:
ranking_nmf_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d08[1][0:500]],
"score" : ranking_nmf_rwr_d08[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d08_top500.csv")
with open("../../rankings/ranking_lda_45_rwr00.pickle", "rb") as f:
ranking_lda_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d00[1][0:500]],
"score" : ranking_lda_rwr_d00[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d00_top500.csv")
with open("../../rankings/ranking_lda_45_rwr01.pickle", "rb") as f:
ranking_lda_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d01[1][0:500]],
"score" : ranking_lda_rwr_d01[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d01_top500.csv")
with open("../../rankings/ranking_lda_45_rwr02.pickle", "rb") as f:
ranking_lda_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d02[1][0:500]],
"score" : ranking_lda_rwr_d02[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d02_top500.csv")
with open("../../rankings/ranking_lda_45_rwr03.pickle", "rb") as f:
ranking_lda_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d03[1][0:500]],
"score" : ranking_lda_rwr_d03[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d03_top500.csv")
with open("../../rankings/ranking_lda_45_rwr05.pickle", "rb") as f:
ranking_lda_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d05[1][0:500]],
"score" : ranking_lda_rwr_d05[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d05_top500.csv")
with open("../../rankings/ranking_lda_45_rwr08.pickle", "rb") as f:
ranking_lda_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d08[1][0:500]],
"score" : ranking_lda_rwr_d08[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d08_top500.csv")
```
## Top 6 repository $r_2$ for top 100 repository $r_1$
```
with open("../../rankings/ranking_nmf_45_mult.pickle", "rb") as f:
ranking_nmf_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_multiplicative[1][0:500]],
"score" : ranking_nmf_multiplicative[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_multiplicative_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_mult.pickle", "rb") as f:
ranking_lda_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_multiplicative[1][0:500]],
"score" : ranking_lda_multiplicative[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_multiplicative_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr00.pickle", "rb") as f:
ranking_nmf_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d00[1][0:500]],
"score" : ranking_nmf_rwr_d00[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d00_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr01.pickle", "rb") as f:
ranking_nmf_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d01[1][0:500]],
"score" : ranking_nmf_rwr_d01[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr02.pickle", "rb") as f:
ranking_nmf_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d02[1][0:500]],
"score" : ranking_nmf_rwr_d02[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr03.pickle", "rb") as f:
ranking_nmf_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d03[1][0:500]],
"score" : ranking_nmf_rwr_d03[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d03_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr05.pickle", "rb") as f:
ranking_nmf_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d05[1][0:500]],
"score" : ranking_nmf_rwr_d05[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d05_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr08.pickle", "rb") as f:
ranking_nmf_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d08[1][0:500]],
"score" : ranking_nmf_rwr_d08[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d08_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr00.pickle", "rb") as f:
ranking_lda_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d00[1][0:500]],
"score" : ranking_lda_rwr_d00[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d00_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr01.pickle", "rb") as f:
ranking_lda_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d01[1][0:500]],
"score" : ranking_lda_rwr_d01[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d01_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr02.pickle", "rb") as f:
ranking_lda_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d02[1][0:500]],
"score" : ranking_lda_rwr_d02[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d02_top_6_per_repo.csv")
with open("..rankings/ranking_lda_45_rwr03.pickle", "rb") as f:
ranking_lda_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d03[1][0:500]],
"score" : ranking_lda_rwr_d03[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d03_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr05.pickle", "rb") as f:
ranking_lda_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d05[1][0:500]],
"score" : ranking_lda_rwr_d05[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d05_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr08.pickle", "rb") as f:
ranking_lda_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d08[1][0:500]],
"score" : ranking_lda_rwr_d08[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d08_top_6_per_repo.csv")
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import pickle
repository_info = pd.read_csv("../../data/processed/final_repo_english_whatwhy.csv")
with open("../../rankings/ranking_nmf_45_mult.pickle", "rb") as f:
ranking_nmf_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_multiplicative[1][0:500]],
"score" : ranking_nmf_multiplicative[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_multiplicative_top500.csv")
with open("../../rankings/ranking_lda_45_mult.pickle", "rb") as f:
ranking_lda_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_multiplicative[1][0:500]],
"score" : ranking_lda_multiplicative[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_multiplicative_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr00.pickle", "rb") as f:
ranking_nmf_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d00[1][0:500]],
"score" : ranking_nmf_rwr_d00[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d00_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr01.pickle", "rb") as f:
ranking_nmf_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d01[1][0:500]],
"score" : ranking_nmf_rwr_d01[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr02.pickle", "rb") as f:
ranking_nmf_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d02[1][0:500]],
"score" : ranking_nmf_rwr_d02[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr03.pickle", "rb") as f:
ranking_nmf_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d03[1][0:500]],
"score" : ranking_nmf_rwr_d03[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d03_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr05.pickle", "rb") as f:
ranking_nmf_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d05[1][0:500]],
"score" : ranking_nmf_rwr_d05[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d05_top500.csv")
with open("../../rankings/ranking_nmf_45_rwr08.pickle", "rb") as f:
ranking_nmf_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d08[1][0:500]],
"score" : ranking_nmf_rwr_d08[2][0:500]
}
).to_csv("../../data/evaluation/user_study/nmf_rwr_d08_top500.csv")
with open("../../rankings/ranking_lda_45_rwr00.pickle", "rb") as f:
ranking_lda_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d00[1][0:500]],
"score" : ranking_lda_rwr_d00[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d00_top500.csv")
with open("../../rankings/ranking_lda_45_rwr01.pickle", "rb") as f:
ranking_lda_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d01[1][0:500]],
"score" : ranking_lda_rwr_d01[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d01_top500.csv")
with open("../../rankings/ranking_lda_45_rwr02.pickle", "rb") as f:
ranking_lda_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d02[1][0:500]],
"score" : ranking_lda_rwr_d02[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d02_top500.csv")
with open("../../rankings/ranking_lda_45_rwr03.pickle", "rb") as f:
ranking_lda_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d03[1][0:500]],
"score" : ranking_lda_rwr_d03[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d03_top500.csv")
with open("../../rankings/ranking_lda_45_rwr05.pickle", "rb") as f:
ranking_lda_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d05[1][0:500]],
"score" : ranking_lda_rwr_d05[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d05_top500.csv")
with open("../../rankings/ranking_lda_45_rwr08.pickle", "rb") as f:
ranking_lda_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d08[1][0:500]],
"score" : ranking_lda_rwr_d08[2][0:500]
}
).to_csv("../../data/evaluation/user_study/lda_rwr_d08_top500.csv")
with open("../../rankings/ranking_nmf_45_mult.pickle", "rb") as f:
ranking_nmf_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_multiplicative[1][0:500]],
"score" : ranking_nmf_multiplicative[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_multiplicative_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_mult.pickle", "rb") as f:
ranking_lda_multiplicative = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_multiplicative[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_multiplicative[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_multiplicative[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_multiplicative[1][0:500]],
"score" : ranking_lda_multiplicative[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_multiplicative_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr00.pickle", "rb") as f:
ranking_nmf_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d00[1][0:500]],
"score" : ranking_nmf_rwr_d00[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d00_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr01.pickle", "rb") as f:
ranking_nmf_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d01[1][0:500]],
"score" : ranking_nmf_rwr_d01[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr02.pickle", "rb") as f:
ranking_nmf_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d02[1][0:500]],
"score" : ranking_nmf_rwr_d02[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d02_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr03.pickle", "rb") as f:
ranking_nmf_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d03[1][0:500]],
"score" : ranking_nmf_rwr_d03[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d03_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr05.pickle", "rb") as f:
ranking_nmf_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d05[1][0:500]],
"score" : ranking_nmf_rwr_d05[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d05_top_6_per_repo.csv")
with open("../../rankings/ranking_nmf_45_rwr08.pickle", "rb") as f:
ranking_nmf_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_nmf_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_nmf_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_nmf_rwr_d08[1][0:500]],
"score" : ranking_nmf_rwr_d08[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/nmf_rwr_d08_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr00.pickle", "rb") as f:
ranking_lda_rwr_d00 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d00[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d00[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d00[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d00[1][0:500]],
"score" : ranking_lda_rwr_d00[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d00_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr01.pickle", "rb") as f:
ranking_lda_rwr_d01 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d01[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d01[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d01[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d01[1][0:500]],
"score" : ranking_lda_rwr_d01[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d01_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr02.pickle", "rb") as f:
ranking_lda_rwr_d02 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d02[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d02[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d02[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d02[1][0:500]],
"score" : ranking_lda_rwr_d02[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d02_top_6_per_repo.csv")
with open("..rankings/ranking_lda_45_rwr03.pickle", "rb") as f:
ranking_lda_rwr_d03 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d03[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d03[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d03[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d03[1][0:500]],
"score" : ranking_lda_rwr_d03[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d03_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr05.pickle", "rb") as f:
ranking_lda_rwr_d05 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d05[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d05[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d05[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d05[1][0:500]],
"score" : ranking_lda_rwr_d05[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d05_top_6_per_repo.csv")
with open("../../rankings/ranking_lda_45_rwr08.pickle", "rb") as f:
ranking_lda_rwr_d08 = pickle.load(f)
pd.DataFrame(
{
"repo_url1" : repository_info.url.values[ranking_lda_rwr_d08[0][0:500]],
"repo_description1" : repository_info.description.values[ranking_lda_rwr_d08[0][0:500]],
"repo_url2" : repository_info.url.values[ranking_lda_rwr_d08[1][0:500]],
"repo_description2" : repository_info.description.values[ranking_lda_rwr_d08[1][0:500]],
"score" : ranking_lda_rwr_d08[2][0:500]
}
).groupby("repo_url1").head(6).reset_index(drop=True).to_csv("../../data/evaluation/user_study/lda_rwr_d08_top_6_per_repo.csv")
| 0.325521 | 0.589775 |
```
%load_ext nb_black
from IPython.core.debugger import set_trace
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
module_path = os.path.abspath(os.path.join(".."))
if module_path not in sys.path:
sys.path.append(module_path)
plt.style.use(style="seaborn")
%matplotlib inline
```
#### Read CSV
```
df = pd.read_csv(
f"../data/IMDb Movies.csv", encoding="ISO-8859-1", error_bad_lines=False
)
```
#### K-means
K-means clustering is a clustering method that aims to partition N observations into K clusters in which each observation belongs to the cluster with the nearest cluster center (cluster centroid).
The standard k-means algorithm is only applicable to numeric data and isn't directly applicable to categorical data for various reasons.
One being that as the sample space for categorical data is discrete, an Euclidean distance function on such a space is not meaningful.

How does it work ?
If we would be given the centroids we would assign each observation to to the centroid that is closest.
But since we aren't given the centroids' locations, we start by placing the centroids randomly in the feature space and labelling the observations (assigning them to the closest centroid).
Then we update the locations of the centroids and label the observations again. And we repeat this procedure as many times as needed until the centroids stop moving.

```
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
df.head(2)
df.dtypes
```
Let's first start with only one feature
```
features = ["duration", "year", "votes", "reviews_from_users"]
# Select one feature
data = df[features].copy()
data.fillna(-999, inplace=True)
data = data.values
print(data)
```
Scale the input feature
```
scaler = StandardScaler()
data = scaler.fit_transform(data)
print(data)
```
Since we don't have prior information about this dataset we don't know how to set the number of clusters to best split this data.
We therefore need to find the optimal number of clusters.
For that we can compute the model's inertia which is the mean squared distance between each observation and it's centroid.
By default the Kmeans algorithm runs n times and keeps the model with the lowest inertia.
So theoretically the best number of clusters can be defined by the model that has the lowest inertia right?
Well no as the more clusters we have, the closer each instance will be to it's closest centroid and the lower the inertia will be.
If we would have a number of clusters equal to the number of observations, then the inertia would be 0 and that doesn't help with anything.
So we need to select the number of clusters using the elbow method.
```
%%time
n_cluster = range(1, 21)
kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster]
scores = [kmeans[i].score(data) for i in range(len(kmeans))]
f = plt.figure(1, figsize=(16,6))
plt.plot(scores)
_ = plt.xticks(n_cluster)
```
We plot the explained variation as a function of the number of clusters, and pick the *elbow* of the curve as the number of clusters to use.
Another option to get the best number of clusters is to use a silhouette score. But it's computationally expensive and we'll stick to the elbow method for practical reasons.
```
%%time
from sklearn.metrics import silhouette_score
kmeans = KMeans(n_clusters=4).fit_predict(data)
print(silhouette_score(data, kmeans))
```
Get the best number of clusters based on the elbow method, where the difference between scores is smaller than the 90% percentile.
```
scores = np.array(scores)
dif_scores = scores / scores[0]
dif_scores = np.diff(dif_scores)
n_clusters = np.argwhere(dif_scores < np.quantile(dif_scores, 0.9))[-1][0]
print(n_clusters)
```
Let's check the first observation
```
preds = KMeans(n_clusters=n_clusters).fit_predict(data)
print(f"First observation's cluster: {preds[0]}")
df = pd.DataFrame(df)
df["cluster"] = preds
outliers = [
x
for x in zip(
df["cluster"].value_counts().iloc[-2:].index,
df["cluster"].value_counts().iloc[-2:].values,
)
]
df["cluster"].value_counts()
# first item in tuple is the predicted cluster, the second is the number of observations
outliers
# get only the categories that contain a number of observations that in the lower quantile of 10%
clusters = [
x[0]
for x in outliers
if x[1] < np.quantile(df["cluster"].value_counts().values, 0.10)
]
clusters
for c in clusters:
print(df[df.cluster == c][features])
df[features].describe()
f = plt.figure(1, figsize=(16, 6))
_ = plt.hist(df[features].values, bins=20)
f = plt.figure(1, figsize=(16, 6))
_ = plt.boxplot(df[features].values)
```
|
github_jupyter
|
%load_ext nb_black
from IPython.core.debugger import set_trace
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
module_path = os.path.abspath(os.path.join(".."))
if module_path not in sys.path:
sys.path.append(module_path)
plt.style.use(style="seaborn")
%matplotlib inline
df = pd.read_csv(
f"../data/IMDb Movies.csv", encoding="ISO-8859-1", error_bad_lines=False
)
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
df.head(2)
df.dtypes
features = ["duration", "year", "votes", "reviews_from_users"]
# Select one feature
data = df[features].copy()
data.fillna(-999, inplace=True)
data = data.values
print(data)
scaler = StandardScaler()
data = scaler.fit_transform(data)
print(data)
%%time
n_cluster = range(1, 21)
kmeans = [KMeans(n_clusters=i).fit(data) for i in n_cluster]
scores = [kmeans[i].score(data) for i in range(len(kmeans))]
f = plt.figure(1, figsize=(16,6))
plt.plot(scores)
_ = plt.xticks(n_cluster)
%%time
from sklearn.metrics import silhouette_score
kmeans = KMeans(n_clusters=4).fit_predict(data)
print(silhouette_score(data, kmeans))
scores = np.array(scores)
dif_scores = scores / scores[0]
dif_scores = np.diff(dif_scores)
n_clusters = np.argwhere(dif_scores < np.quantile(dif_scores, 0.9))[-1][0]
print(n_clusters)
preds = KMeans(n_clusters=n_clusters).fit_predict(data)
print(f"First observation's cluster: {preds[0]}")
df = pd.DataFrame(df)
df["cluster"] = preds
outliers = [
x
for x in zip(
df["cluster"].value_counts().iloc[-2:].index,
df["cluster"].value_counts().iloc[-2:].values,
)
]
df["cluster"].value_counts()
# first item in tuple is the predicted cluster, the second is the number of observations
outliers
# get only the categories that contain a number of observations that in the lower quantile of 10%
clusters = [
x[0]
for x in outliers
if x[1] < np.quantile(df["cluster"].value_counts().values, 0.10)
]
clusters
for c in clusters:
print(df[df.cluster == c][features])
df[features].describe()
f = plt.figure(1, figsize=(16, 6))
_ = plt.hist(df[features].values, bins=20)
f = plt.figure(1, figsize=(16, 6))
_ = plt.boxplot(df[features].values)
| 0.409457 | 0.896342 |
```
#!/usr/bin/env python
DEBUG = True
if DEBUG:
# This code only exists to help us visually inspect the images.
# It's in an `if DEBUG:` block to illustrate that we don't need it for our code to work.
import os
from PIL import Image
import numpy as np
def read_image(path):
return np.asarray(Image.open(path).convert('L'))
def write_image(image, path):
img = Image.fromarray(np.array(image), 'L')
img.save(path)
# /Users/immanuvelprathaps/Desktop/OCR_KNN/data/fashion-mnist/t10k-images-idx3-ubyte
DATA_DIR = '/Users/immanuvelprathaps/Desktop/OCR_KNN/data/'
TEST_DIR = 'test/'
DATASET = 'mnist' # `'mnist'` or `'fashion-mnist'`
TEST_DATA_FILENAME = DATA_DIR + DATASET + '/t10k-images-idx3-ubyte'
TEST_LABELS_FILENAME = DATA_DIR + DATASET + '/t10k-labels-idx1-ubyte'
TRAIN_DATA_FILENAME = DATA_DIR + DATASET + '/train-images-idx3-ubyte'
TRAIN_LABELS_FILENAME = DATA_DIR + DATASET + '/train-labels-idx1-ubyte'
def bytes_to_int(byte_data):
return int.from_bytes(byte_data, 'big')
def read_images(filename, n_max_images=None):
images = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_images = bytes_to_int(f.read(4))
if n_max_images:
n_images = n_max_images
n_rows = bytes_to_int(f.read(4))
n_columns = bytes_to_int(f.read(4))
for image_idx in range(n_images):
image = []
for row_idx in range(n_rows):
row = []
for col_idx in range(n_columns):
pixel = f.read(1)
row.append(pixel)
image.append(row)
images.append(image)
return images
def read_labels(filename, n_max_labels=None):
labels = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_labels = bytes_to_int(f.read(4))
if n_max_labels:
n_labels = n_max_labels
for label_idx in range(n_labels):
label = bytes_to_int(f.read(1))
labels.append(label)
return labels
def flatten_list(l):
return [pixel for sublist in l for pixel in sublist]
def extract_features(X):
return [flatten_list(sample) for sample in X]
def dist(x, y):
"""
Returns the Euclidean distance between vectors `x` and `y`.
"""
return sum(
[
(bytes_to_int(x_i) - bytes_to_int(y_i)) ** 2
for x_i, y_i in zip(x, y)
]
) ** (0.5)
def get_training_distances_for_test_sample(X_train, test_sample):
return [dist(train_sample, test_sample) for train_sample in X_train]
def get_most_frequent_element(l):
return max(l, key=l.count)
def knn(X_train, y_train, X_test, k=3):
y_pred = []
for test_sample_idx, test_sample in enumerate(X_test):
print(test_sample_idx, end=' ', flush=True)
training_distances = get_training_distances_for_test_sample(
X_train, test_sample
)
sorted_distance_indices = [
pair[0]
for pair in sorted(
enumerate(training_distances),
key=lambda x: x[1]
)
]
candidates = [
y_train[idx]
for idx in sorted_distance_indices[:k]
]
top_candidate = get_most_frequent_element(candidates)
y_pred.append(top_candidate)
print()
return y_pred
def get_garment_from_label(label):
return [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot',
][label]
def main():
n_train = 12000
n_test = 2000
k = 3
print(f'Dataset: {DATASET}')
print(f'n_train: {n_train}')
print(f'n_test: {n_test}')
print(f'k: {k}')
X_train = read_images(TRAIN_DATA_FILENAME, n_train)
y_train = read_labels(TRAIN_LABELS_FILENAME, n_train)
X_test = read_images(TEST_DATA_FILENAME, n_test)
y_test = read_labels(TEST_LABELS_FILENAME, n_test)
if DEBUG:
# Write some images out just so we can see them visually.
for idx, test_sample in enumerate(X_test):
write_image(test_sample, f'{TEST_DIR}{idx}.png')
# Load in the `your_test.png` that you drew yourselve!
# X_test = [read_image(f'{DATA_DIR}our_test.png')]
# y_test = [5]
X_train = extract_features(X_train)
X_test = extract_features(X_test)
y_pred = knn(X_train, y_train, X_test, k)
accuracy = sum([
int(y_pred_i == y_test_i)
for y_pred_i, y_test_i
in zip(y_pred, y_test)
]) / len(y_test)
if DATASET == 'fashion-mnist':
garments_pred = [
get_garment_from_label(label)
for label in y_pred
]
print(f'Predicted garments: {garments_pred}')
else:
print(f'Predicted labels: {y_pred}')
print(f'Accuracy: {accuracy * 100}%')
if __name__ == '__main__':
main()
```
## Results:
##### The 'KNN MODEL' has an 'ACCURACY OF 93.05%' while training with '1/5th of the MNSIT Dataset'.
- The Model can have a better accuracy if trained on all the samples in the test set. But, your system hardware should have the required computational power to process it quickly.
- It took my sytem about 2-3 hours to run the prediction for 1/5th of the MNSIT Original dataset which you can download on this link: http://yann.lecun.com/exdb/mnist/
- In the next update i will upload the files with the solutions to make the KNN Model run faster.
|
github_jupyter
|
#!/usr/bin/env python
DEBUG = True
if DEBUG:
# This code only exists to help us visually inspect the images.
# It's in an `if DEBUG:` block to illustrate that we don't need it for our code to work.
import os
from PIL import Image
import numpy as np
def read_image(path):
return np.asarray(Image.open(path).convert('L'))
def write_image(image, path):
img = Image.fromarray(np.array(image), 'L')
img.save(path)
# /Users/immanuvelprathaps/Desktop/OCR_KNN/data/fashion-mnist/t10k-images-idx3-ubyte
DATA_DIR = '/Users/immanuvelprathaps/Desktop/OCR_KNN/data/'
TEST_DIR = 'test/'
DATASET = 'mnist' # `'mnist'` or `'fashion-mnist'`
TEST_DATA_FILENAME = DATA_DIR + DATASET + '/t10k-images-idx3-ubyte'
TEST_LABELS_FILENAME = DATA_DIR + DATASET + '/t10k-labels-idx1-ubyte'
TRAIN_DATA_FILENAME = DATA_DIR + DATASET + '/train-images-idx3-ubyte'
TRAIN_LABELS_FILENAME = DATA_DIR + DATASET + '/train-labels-idx1-ubyte'
def bytes_to_int(byte_data):
return int.from_bytes(byte_data, 'big')
def read_images(filename, n_max_images=None):
images = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_images = bytes_to_int(f.read(4))
if n_max_images:
n_images = n_max_images
n_rows = bytes_to_int(f.read(4))
n_columns = bytes_to_int(f.read(4))
for image_idx in range(n_images):
image = []
for row_idx in range(n_rows):
row = []
for col_idx in range(n_columns):
pixel = f.read(1)
row.append(pixel)
image.append(row)
images.append(image)
return images
def read_labels(filename, n_max_labels=None):
labels = []
with open(filename, 'rb') as f:
_ = f.read(4) # magic number
n_labels = bytes_to_int(f.read(4))
if n_max_labels:
n_labels = n_max_labels
for label_idx in range(n_labels):
label = bytes_to_int(f.read(1))
labels.append(label)
return labels
def flatten_list(l):
return [pixel for sublist in l for pixel in sublist]
def extract_features(X):
return [flatten_list(sample) for sample in X]
def dist(x, y):
"""
Returns the Euclidean distance between vectors `x` and `y`.
"""
return sum(
[
(bytes_to_int(x_i) - bytes_to_int(y_i)) ** 2
for x_i, y_i in zip(x, y)
]
) ** (0.5)
def get_training_distances_for_test_sample(X_train, test_sample):
return [dist(train_sample, test_sample) for train_sample in X_train]
def get_most_frequent_element(l):
return max(l, key=l.count)
def knn(X_train, y_train, X_test, k=3):
y_pred = []
for test_sample_idx, test_sample in enumerate(X_test):
print(test_sample_idx, end=' ', flush=True)
training_distances = get_training_distances_for_test_sample(
X_train, test_sample
)
sorted_distance_indices = [
pair[0]
for pair in sorted(
enumerate(training_distances),
key=lambda x: x[1]
)
]
candidates = [
y_train[idx]
for idx in sorted_distance_indices[:k]
]
top_candidate = get_most_frequent_element(candidates)
y_pred.append(top_candidate)
print()
return y_pred
def get_garment_from_label(label):
return [
'T-shirt/top',
'Trouser',
'Pullover',
'Dress',
'Coat',
'Sandal',
'Shirt',
'Sneaker',
'Bag',
'Ankle boot',
][label]
def main():
n_train = 12000
n_test = 2000
k = 3
print(f'Dataset: {DATASET}')
print(f'n_train: {n_train}')
print(f'n_test: {n_test}')
print(f'k: {k}')
X_train = read_images(TRAIN_DATA_FILENAME, n_train)
y_train = read_labels(TRAIN_LABELS_FILENAME, n_train)
X_test = read_images(TEST_DATA_FILENAME, n_test)
y_test = read_labels(TEST_LABELS_FILENAME, n_test)
if DEBUG:
# Write some images out just so we can see them visually.
for idx, test_sample in enumerate(X_test):
write_image(test_sample, f'{TEST_DIR}{idx}.png')
# Load in the `your_test.png` that you drew yourselve!
# X_test = [read_image(f'{DATA_DIR}our_test.png')]
# y_test = [5]
X_train = extract_features(X_train)
X_test = extract_features(X_test)
y_pred = knn(X_train, y_train, X_test, k)
accuracy = sum([
int(y_pred_i == y_test_i)
for y_pred_i, y_test_i
in zip(y_pred, y_test)
]) / len(y_test)
if DATASET == 'fashion-mnist':
garments_pred = [
get_garment_from_label(label)
for label in y_pred
]
print(f'Predicted garments: {garments_pred}')
else:
print(f'Predicted labels: {y_pred}')
print(f'Accuracy: {accuracy * 100}%')
if __name__ == '__main__':
main()
| 0.513668 | 0.365372 |
# Lab 2: Linear Regression
Author: Seungjae Lee (이승재)
<div class="alert alert-warning">
We use elemental PyTorch to implement linear regression here. However, in most actual applications, abstractions such as <code>nn.Module</code> or <code>nn.Linear</code> are used.
</div>
## Theoretical Overview
$$ H(x) = Wx + b $$
$$ cost(W, b) = \frac{1}{m} \sum^m_{i=1} \left( H(x^{(i)}) - y^{(i)} \right)^2 $$
- $H(x)$: 주어진 $x$ 값에 대해 예측을 어떻게 할 것인가
- $cost(W, b)$: $H(x)$ 가 $y$ 를 얼마나 잘 예측했는가
## Imports
```
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# For reproducibility
torch.manual_seed(1)
```
## Data
We will use fake data for this example.
```
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
print(x_train)
print(x_train.shape)
print(y_train)
print(y_train.shape)
```
기본적으로 PyTorch는 NCHW 형태이다.
## Weight Initialization
```
W = torch.zeros(1, requires_grad=True)
print(W)
b = torch.zeros(1, requires_grad=True)
print(b)
```
## Hypothesis
$$ H(x) = Wx + b $$
```
hypothesis = x_train * W + b
print(hypothesis)
```
## Cost
$$ cost(W, b) = \frac{1}{m} \sum^m_{i=1} \left( H(x^{(i)}) - y^{(i)} \right)^2 $$
```
print(hypothesis)
print(y_train)
print(hypothesis - y_train)
print((hypothesis - y_train) ** 2)
cost = torch.mean((hypothesis - y_train) ** 2)
print(cost)
```
## Gradient Descent
```
optimizer = optim.SGD([W, b], lr=0.01)
optimizer.zero_grad()
cost.backward()
optimizer.step()
print(W)
print(b)
```
Let's check if the hypothesis is now better.
```
hypothesis = x_train * W + b
print(hypothesis)
cost = torch.mean((hypothesis - y_train) ** 2)
print(cost)
```
## Training with Full Code
In reality, we will be training on the dataset for multiple epochs. This can be done simply with loops.
```
# 데이터
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
# 모델 초기화
W = torch.zeros(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=0.01)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# H(x) 계산
hypothesis = x_train * W + b
# cost 계산
cost = torch.mean((hypothesis - y_train) ** 2)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
print('Epoch {:4d}/{} W: {:.3f}, b: {:.3f} Cost: {:.6f}'.format(
epoch, nb_epochs, W.item(), b.item(), cost.item()
))
```
## High-level Implementation with `nn.Module`
Remember that we had this fake data.
```
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
```
이제 linear regression 모델을 만들면 되는데, 기본적으로 PyTorch의 모든 모델은 제공되는 `nn.Module`을 inherit 해서 만들게 됩니다.
```
class LinearRegressionModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
```
모델의 `__init__`에서는 사용할 레이어들을 정의하게 됩니다. 여기서 우리는 linear regression 모델을 만들기 때문에, `nn.Linear` 를 이용할 것입니다. 그리고 `forward`에서는 이 모델이 어떻게 입력값에서 출력값을 계산하는지 알려줍니다.
```
model = LinearRegressionModel()
```
## Hypothesis
이제 모델을 생성해서 예측값 $H(x)$를 구해보자
```
hypothesis = model(x_train)
print(hypothesis)
```
## Cost
이제 mean squared error (MSE) 로 cost를 구해보자. MSE 역시 PyTorch에서 기본적으로 제공한다.
```
print(hypothesis)
print(y_train)
cost = F.mse_loss(hypothesis, y_train)
print(cost)
```
## Gradient Descent
마지막 주어진 cost를 이용해 $H(x)$ 의 $W, b$ 를 바꾸어서 cost를 줄여봅니다. 이때 PyTorch의 `torch.optim` 에 있는 `optimizer` 들 중 하나를 사용할 수 있습니다.
```
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer.zero_grad()
cost.backward()
optimizer.step()
```
## Training with Full Code
이제 Linear Regression 코드를 이해했으니, 실제로 코드를 돌려 피팅시켜보겠습니다.
```
# 데이터
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
# 모델 초기화
model = LinearRegressionModel()
# optimizer 설정
optimizer = optim.SGD(model.parameters(), lr=0.01)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# H(x) 계산
prediction = model(x_train)
# cost 계산
cost = F.mse_loss(prediction, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
params = list(model.parameters())
W = params[0].item()
b = params[1].item()
print('Epoch {:4d}/{} W: {:.3f}, b: {:.3f} Cost: {:.6f}'.format(
epoch, nb_epochs, W, b, cost.item()
))
```
점점 $H(x)$ 의 $W$ 와 $b$ 를 조정해서 cost가 줄어드는 것을 볼 수 있습니다.
|
github_jupyter
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# For reproducibility
torch.manual_seed(1)
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
print(x_train)
print(x_train.shape)
print(y_train)
print(y_train.shape)
W = torch.zeros(1, requires_grad=True)
print(W)
b = torch.zeros(1, requires_grad=True)
print(b)
hypothesis = x_train * W + b
print(hypothesis)
print(hypothesis)
print(y_train)
print(hypothesis - y_train)
print((hypothesis - y_train) ** 2)
cost = torch.mean((hypothesis - y_train) ** 2)
print(cost)
optimizer = optim.SGD([W, b], lr=0.01)
optimizer.zero_grad()
cost.backward()
optimizer.step()
print(W)
print(b)
hypothesis = x_train * W + b
print(hypothesis)
cost = torch.mean((hypothesis - y_train) ** 2)
print(cost)
# 데이터
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
# 모델 초기화
W = torch.zeros(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
# optimizer 설정
optimizer = optim.SGD([W, b], lr=0.01)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# H(x) 계산
hypothesis = x_train * W + b
# cost 계산
cost = torch.mean((hypothesis - y_train) ** 2)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
print('Epoch {:4d}/{} W: {:.3f}, b: {:.3f} Cost: {:.6f}'.format(
epoch, nb_epochs, W.item(), b.item(), cost.item()
))
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
class LinearRegressionModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
model = LinearRegressionModel()
hypothesis = model(x_train)
print(hypothesis)
print(hypothesis)
print(y_train)
cost = F.mse_loss(hypothesis, y_train)
print(cost)
optimizer = optim.SGD(model.parameters(), lr=0.01)
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 데이터
x_train = torch.FloatTensor([[1], [2], [3]])
y_train = torch.FloatTensor([[1], [2], [3]])
# 모델 초기화
model = LinearRegressionModel()
# optimizer 설정
optimizer = optim.SGD(model.parameters(), lr=0.01)
nb_epochs = 1000
for epoch in range(nb_epochs + 1):
# H(x) 계산
prediction = model(x_train)
# cost 계산
cost = F.mse_loss(prediction, y_train)
# cost로 H(x) 개선
optimizer.zero_grad()
cost.backward()
optimizer.step()
# 100번마다 로그 출력
if epoch % 100 == 0:
params = list(model.parameters())
W = params[0].item()
b = params[1].item()
print('Epoch {:4d}/{} W: {:.3f}, b: {:.3f} Cost: {:.6f}'.format(
epoch, nb_epochs, W, b, cost.item()
))
| 0.903845 | 0.987005 |
# NY Time API Scrape
Tools in this notebook are for the use of scraping news articles using the NY Times API. The following code will make requests to the API, scrape 1,000 of the most news recent articles, and parse the results for each article headline, snippet, and url into a DataFrame.
Please visit https://developers.nytimes.com/ before using this tool to review the NY Times API terms of service, obtain your personal NY Times developer API key (free), and research any additional information relating to the use of the API.
## Imports
```
import requests
import pandas as pd
import time
```
## NY Times API Scrape
Enter your API key and the topic you which to search for, as a string in the cell below
```
api_key = ''
topic = 'fire'
```
This function makes multiple requests to the API, pulls 1,000 of the most recent articles (10 per request) for the topic designated above. This is the maximum amount of request the API allows for, adjust the range in the function to collect a lesser amount. Output is a list of dictionaries with all article specific information contained in the json.
```
def nytimes_api_scrape(topic, api_key):
article_list = []
# adjust range to scrape less then 1,000
for i in range(100):
# gives an update every 100 articles
if i % 10 == 0:
print('{} articles gathered so far'.format(i*10))
url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q='+ topic + '&'+ str(i) +'&api-key='+ api_key
res = requests.get(url)
# checks to see if request was a success and adds to list
if res.status_code == 200:
the_json = res.json()
article_list.extend(the_json['response']['docs'])
else:
print('Bad request status {}'.format(response.status_code))
break
# intentionally delay requests to the server
time.sleep(5)
print('You gathered {} articles about {}'.format(len(article_list), topic))
return article_list
```
**Important Note:** total run time of the function is about 8.5 minutes
```
fire_articles = nytimes_api_scrape(topic, api_key)
fire_articles[0].keys()
```
This function takes the headline, snippet, and url for each article and returns a DataFrame for output
```
def to_df(article_list):
key_list = []
for i in range(len(fire_articles)):
key_list.append({
'headline': article_list[i]['headline']['main'],
'snippet': article_list[i]['snippet'],
'web_url': article_list[i]['web_url']
})
df = pd.DataFrame(key_list)
return df
df = to_df(fire_articles)
```
## Output
```
# save to csv to use for use in other notebook
df.to_csv('./datasets/{}'.format(topic))
```
|
github_jupyter
|
import requests
import pandas as pd
import time
api_key = ''
topic = 'fire'
def nytimes_api_scrape(topic, api_key):
article_list = []
# adjust range to scrape less then 1,000
for i in range(100):
# gives an update every 100 articles
if i % 10 == 0:
print('{} articles gathered so far'.format(i*10))
url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q='+ topic + '&'+ str(i) +'&api-key='+ api_key
res = requests.get(url)
# checks to see if request was a success and adds to list
if res.status_code == 200:
the_json = res.json()
article_list.extend(the_json['response']['docs'])
else:
print('Bad request status {}'.format(response.status_code))
break
# intentionally delay requests to the server
time.sleep(5)
print('You gathered {} articles about {}'.format(len(article_list), topic))
return article_list
fire_articles = nytimes_api_scrape(topic, api_key)
fire_articles[0].keys()
def to_df(article_list):
key_list = []
for i in range(len(fire_articles)):
key_list.append({
'headline': article_list[i]['headline']['main'],
'snippet': article_list[i]['snippet'],
'web_url': article_list[i]['web_url']
})
df = pd.DataFrame(key_list)
return df
df = to_df(fire_articles)
# save to csv to use for use in other notebook
df.to_csv('./datasets/{}'.format(topic))
| 0.088907 | 0.8809 |
<a href="https://colab.research.google.com/github/soma2000-lang/Churn-/blob/main/employeee_churrn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import sklearn.metrics
import math
import datetime as dt
plt.style.use('ggplot')
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import lightgbm as lgb
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import classification_report
data=pd.read_csv('/Churn.csv')
data.head()
data.shape
data.head()
data.info()
data.head(2)
data['Churn'].value_counts()
data.isnull().sum()
plt.figure(figsize=(12,8))
sns.histplot(x='MonthlyCharges',data=data,hue='Churn')
sns.countplot(data=data,x='Churn',hue='Dependents')
sns.countplot(data=data,x='Churn',hue='SeniorCitizen')
sns.countplot(data=data,x='Churn',hue='gender')
plt.figure(figsize=(12,8))
sns.histplot(x='tenure',data=data,hue='Churn')
sns.countplot(data=data,x='Churn',hue='PhoneService')
sns.countplot(x='InternetService',data=data,hue='Churn')
sns.countplot(x=data['PaperlessBilling'],hue=data['Churn'])
plt.show()
data[data['TotalCharges']==' ']
Gender_Category_map = {
'Male' : 1,
'Female': 0,
}
Partner_Category_map = {
'Yes' : 1,
'No': 0,
}
Dependents_Category_map = {
'Yes' : 1,
'No': 0,
}
PhoneService_Category_map = {
'Yes' : 1,
'No': 0,
}
OnlineSecurity_Category_map = {
'Yes' : 1,
'No': 0,
}
OnlineBackup_Category_map = {
'Yes' : 1,
'No': 0,
}
DeviceProtection_Category_map = {
'Yes' : 1,
'No': 0,
}
TechSupport_Category_map = {
'Yes' : 1,
'No': 0,
}
StreamingTV_Category_map = {
'Yes' : 1,
'NO': 0,
}
StreamingMovies_Category_map = {
'Yes' : 1,
'No': 0,
}
PaperlessBilling_Category_map = {
'Yes' : 1,
'No': 0,
}
Churn_Category_map = {
'Yes' : 1,
'No': 0,
}
data.loc[:, 'gender'] = data['gender'].map(Gender_Category_map)
data.loc[:, 'Partner'] = data['Partner'].map(Partner_Category_map)
data.loc[:, 'Dependents'] = data['Dependents'].map(Dependents_Category_map)
data.loc[:, 'PhoneService'] = data['PhoneService'].map(PhoneService_Category_map)
data.loc[:, 'OnlineSecurity'] = data['OnlineSecurity'].map(OnlineSecurity_Category_map)
data.loc[:, 'OnlineBackup'] = data['OnlineBackup'].map(OnlineBackup_Category_map)
data.loc[:, 'DeviceProtection'] = data['DeviceProtection'].map(DeviceProtection_Category_map)
data.loc[:, 'TechSupport'] = data['TechSupport'].map(TechSupport_Category_map)
data.loc[:, 'StreamingMovies'] = data['StreamingMovies'].map(StreamingMovies_Category_map)
data.loc[:, 'PaperlessBilling'] = data['PaperlessBilling'].map(PaperlessBilling_Category_map)
data.loc[:, 'Churn'] = data['Churn'].map(Churn_Category_map)
data.head()
```
get_dummies variable to convert the catergorical data to dummy datA
```
InternetService=data['InternetService']
InternetService=pd.get_dummies(InternetService,drop_first=True)
PaymentMethod=data['PaymentMethod']
PaymentMethod=pd.get_dummies(PaymentMethod,drop_first=True)
data=data.drop(['Contract','MultipleLines','InternetService','PaymentMethod','StreamingTV'],axis=1)
data.head()
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import sklearn.metrics
import math
import datetime as dt
plt.style.use('ggplot')
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import lightgbm as lgb
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.metrics import classification_report
data=pd.read_csv('/Churn.csv')
data.head()
data.shape
data.head()
data.info()
data.head(2)
data['Churn'].value_counts()
data.isnull().sum()
plt.figure(figsize=(12,8))
sns.histplot(x='MonthlyCharges',data=data,hue='Churn')
sns.countplot(data=data,x='Churn',hue='Dependents')
sns.countplot(data=data,x='Churn',hue='SeniorCitizen')
sns.countplot(data=data,x='Churn',hue='gender')
plt.figure(figsize=(12,8))
sns.histplot(x='tenure',data=data,hue='Churn')
sns.countplot(data=data,x='Churn',hue='PhoneService')
sns.countplot(x='InternetService',data=data,hue='Churn')
sns.countplot(x=data['PaperlessBilling'],hue=data['Churn'])
plt.show()
data[data['TotalCharges']==' ']
Gender_Category_map = {
'Male' : 1,
'Female': 0,
}
Partner_Category_map = {
'Yes' : 1,
'No': 0,
}
Dependents_Category_map = {
'Yes' : 1,
'No': 0,
}
PhoneService_Category_map = {
'Yes' : 1,
'No': 0,
}
OnlineSecurity_Category_map = {
'Yes' : 1,
'No': 0,
}
OnlineBackup_Category_map = {
'Yes' : 1,
'No': 0,
}
DeviceProtection_Category_map = {
'Yes' : 1,
'No': 0,
}
TechSupport_Category_map = {
'Yes' : 1,
'No': 0,
}
StreamingTV_Category_map = {
'Yes' : 1,
'NO': 0,
}
StreamingMovies_Category_map = {
'Yes' : 1,
'No': 0,
}
PaperlessBilling_Category_map = {
'Yes' : 1,
'No': 0,
}
Churn_Category_map = {
'Yes' : 1,
'No': 0,
}
data.loc[:, 'gender'] = data['gender'].map(Gender_Category_map)
data.loc[:, 'Partner'] = data['Partner'].map(Partner_Category_map)
data.loc[:, 'Dependents'] = data['Dependents'].map(Dependents_Category_map)
data.loc[:, 'PhoneService'] = data['PhoneService'].map(PhoneService_Category_map)
data.loc[:, 'OnlineSecurity'] = data['OnlineSecurity'].map(OnlineSecurity_Category_map)
data.loc[:, 'OnlineBackup'] = data['OnlineBackup'].map(OnlineBackup_Category_map)
data.loc[:, 'DeviceProtection'] = data['DeviceProtection'].map(DeviceProtection_Category_map)
data.loc[:, 'TechSupport'] = data['TechSupport'].map(TechSupport_Category_map)
data.loc[:, 'StreamingMovies'] = data['StreamingMovies'].map(StreamingMovies_Category_map)
data.loc[:, 'PaperlessBilling'] = data['PaperlessBilling'].map(PaperlessBilling_Category_map)
data.loc[:, 'Churn'] = data['Churn'].map(Churn_Category_map)
data.head()
InternetService=data['InternetService']
InternetService=pd.get_dummies(InternetService,drop_first=True)
PaymentMethod=data['PaymentMethod']
PaymentMethod=pd.get_dummies(PaymentMethod,drop_first=True)
data=data.drop(['Contract','MultipleLines','InternetService','PaymentMethod','StreamingTV'],axis=1)
data.head()
| 0.316898 | 0.856692 |
```
%matplotlib inline
import gym
import matplotlib
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.blackjack import BlackjackEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = BlackjackEnv()
def create_random_policy(nA):
"""
Creates a random policy function.
Args:
nA: Number of actions in the environment.
Returns:
A function that takes an observation as input and returns a vector
of action probabilities
"""
A = np.ones(nA, dtype=float) / nA
def policy_fn(observation):
return A
return policy_fn
def create_greedy_policy(Q):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(observation):
pass
# Implement this!
return policy_fn
def mc_control_importance_sampling(env, num_episodes, behavior_policy, discount_factor=1.0):
"""
Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.
Finds an optimal greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
behavior_policy: The behavior to follow while generating episodes.
A function that given an observation returns a vector of probabilities for each action.
discount_factor: Gamma discount factor.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities. This is the optimal greedy policy.
"""
# The final action-value function.
# A dictionary that maps state -> action values
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Our greedily policy we want to learn
target_policy = create_greedy_policy(Q)
# Implement this!
return Q, target_policy
random_policy = create_random_policy(env.action_space.n)
Q, policy = mc_control_importance_sampling(env, num_episodes=500000, behavior_policy=random_policy)
# For plotting: Create value function from action-value function
# by picking the best action at each state
V = defaultdict(float)
for state, action_values in Q.items():
action_value = np.max(action_values)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
```
|
github_jupyter
|
%matplotlib inline
import gym
import matplotlib
import numpy as np
import sys
from collections import defaultdict
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.blackjack import BlackjackEnv
from lib import plotting
matplotlib.style.use('ggplot')
env = BlackjackEnv()
def create_random_policy(nA):
"""
Creates a random policy function.
Args:
nA: Number of actions in the environment.
Returns:
A function that takes an observation as input and returns a vector
of action probabilities
"""
A = np.ones(nA, dtype=float) / nA
def policy_fn(observation):
return A
return policy_fn
def create_greedy_policy(Q):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(observation):
pass
# Implement this!
return policy_fn
def mc_control_importance_sampling(env, num_episodes, behavior_policy, discount_factor=1.0):
"""
Monte Carlo Control Off-Policy Control using Weighted Importance Sampling.
Finds an optimal greedy policy.
Args:
env: OpenAI gym environment.
num_episodes: Number of episodes to sample.
behavior_policy: The behavior to follow while generating episodes.
A function that given an observation returns a vector of probabilities for each action.
discount_factor: Gamma discount factor.
Returns:
A tuple (Q, policy).
Q is a dictionary mapping state -> action values.
policy is a function that takes an observation as an argument and returns
action probabilities. This is the optimal greedy policy.
"""
# The final action-value function.
# A dictionary that maps state -> action values
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# Our greedily policy we want to learn
target_policy = create_greedy_policy(Q)
# Implement this!
return Q, target_policy
random_policy = create_random_policy(env.action_space.n)
Q, policy = mc_control_importance_sampling(env, num_episodes=500000, behavior_policy=random_policy)
# For plotting: Create value function from action-value function
# by picking the best action at each state
V = defaultdict(float)
for state, action_values in Q.items():
action_value = np.max(action_values)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
| 0.664105 | 0.667212 |
# DSCI 525 - Web and Cloud Computing
***Milestone 3:*** This milestone aims to set up your spark cluster and develop your machine learning to deploy in the cloud for the next milestone.
## Milestone 3 checklist :
- [x] Setup your EMR cluster with Spark, Hadoop, JupyterEnterpriseGateway, JupyterHub 1.1.0, and Livy.
- [x] Make sure you set up foxy proxy for your web browser(Firefox). Probably you already set this up from the previous milestone.
- [x] Develop a ML model using scikit-learn. (We will be using this model to deploy for our next milestone.)
- [x] Obtain the best hyperparameter settings using spark's MLlib.
**Keep in mind:**
- _Please use the Firefox browser for this milestone. Make sure you got foxy proxy setup._
- _All services you use are in region us-west-2 region._
- _Use only default VPC and subnet, if not specified explicitly in instruction, leave all other options default when setting up your cluster._
- _No IP addresses are visible when you provide the screenshot (***Please mask it before uploading***)._
- _1 node cluster with a single master node (zero slave nodes) of size ```m5.xlarge``` is good enough for your spark MLlib process. These configurations might take 15 - 20 minutes to get optimal tuning parameters for the entire dataset._
- _Say something went wrong and you want to spin up another EMR cluster, then make sure you terminate the previous one._
- _Upon termination, stored data in your cluster will be lost. Make sure you save any data to S3 and download the notebooks to your laptop so that next time you have your jupyterHub in a different cluster, you can upload your notebook there._
_***Outside of Milestone [OPTIONAL]:*** You are encouraged to practice it yourself by spinning up EMR clusters._
***VERY IMPORTANT:*** With task 4, make sure you occasionally download the notebook to your local computer. Once the lab is stopped after 3 hours, your EMR cluster will be terminated, and everything will vanish.
### 1. Setup your EMR cluster
rubric={correctness:25}
Follow the instructions shown during the lecture to set up your EMR cluster. I am adding instructions here again for guidance.
1.1) Go to advanced options.
1.2) Choose Release 6.5.0.
1.3) Check Spark, Hadoop, JupyterEnterpriseGateway, JupyterHub 1.1.0, and Livy.
1.4) Core instances to be 0, master 1.
1.5) By default, the instance will be selected as m5.xlarge. However, you can also choose a bigger instance (e.g., m4.4xlarge, but make sure you budget )
1.6) Cluster name : Your-group-number.
1.7) Uncheck Enable auto-termination.
1.8) Select the key pair you have access to (from your milestone 2).
1.9) EC2 security group, please go with the default. Remember, this is a managed service; what we learned from the shared responsibility model so that AWS will take care of many things. EMR comes in the list of container services. Check [this]( https://aws.amazon.com/blogs/industries/applying-the-aws-shared-responsibility-model-to-your-gxp-solution/).
1.10) Wait for the cluster to start. This takes around ~15 min. Once it is ready, you will see a solid green dot.
#### Please attach this screen shots from your group for grading
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/Task1.png

### 2. Setup your browser , jupyter environment & connect to the master node.
rubric={correctness:25}
2.1) Under cluster ```summary > Application user interfaces > On-cluster user interfaces```: Click on _***Enable an SSH Connection***_.
2.2) From instructions in the popup from Step 2.1, use: **Step 1: Open an SSH Tunnel to the Amazon EMR Master Node.** Remember you are running this from your laptop terminal, and after running, it will look like [this](https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/eg.png). For the private key make sure you point to the correct location in your computer.
2.3) (If you haven't done so from milestone 2) From instructions in the popup from Step 2.1, please ignore **Step 2: Configure a proxy management tool**. Instead follow instructions given [here](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-connect-master-node-proxy.html), under section **Example: Configure FoxyProxy for Firefox:**. Get foxy proxy standard [here](https://addons.mozilla.org/en-CA/firefox/addon/foxyproxy-standard/)
2.4) Move to **application user interfaces** tab, use the jupytetHub URL to access it.
2.4.1) Username: ```jovyan```, Password: ```jupyter```. These are default more details [here](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-jupyterhub-user-access.html)
2.5) Login into the master node from your laptop terminal (```cluster summary > Connect to the Master Node Using SSH```), and install the necessary packages. Here are the needed packages based on my solution; you might have to install other packages depending on your approach.
sudo yum install python3-devel
sudo pip3 install pandas
sudo pip3 install s3fs
**IMPORTANT:**
- Make sure ssh -i ~/ggeorgeAD.pem -ND 8157 hadoop@xxxxx.compute.amazonaws.com (Step 2.2) is running in your terminal window before trying to access your jupyter URL. Sometimes the connection might lose; in that case, run that step again to access your jupyterHub.
- Don't confuse Step 2.2 and Step 2.5. In 2.2, you open an ssh tunnel to access the jupyterHub URL. With Step 2.6, you log into the master node to install the necessary packages.
#### Please attach this screen shots from your group for grading
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/Task2.png

### 3. Develop a ML model using scikit-learn.
rubric={correctness:25}
You can either use the setup that we have from our last milestone. But it might have been shut down by AWS due to the time limit; also, we haven't got permission from AWS to spin up instances larger than t2.large. Considering the situation, I recommend doing this on your local computer. So upload this notebook to your local jupyter notebook and follow the instructions.
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/Milestone3-Task3.ipynb
Task 3 notebook:
There are 2 parts to this notebook; For doing part 2, you want information from Task 4.
#### Please attach this screen shots from your group for grading
Your S3 bucket showing ```model.joblib``` file.

### 4. Obtain best hyperparameter settings using spark's MLlib.
rubric={correctness:20}
Upload this notebook to your jupyterHub (AWS managed jupyterHub in the cluster) you set up in Task 2 and follow the instructions given in the notebook.
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/Milestone3-Task4.ipynb
### 5. Submission instructions
rubric={mechanics:5}
***SUBMISSION:*** Please put a link to your GitHub folder in the canvas where TAs can find the following-
- [x] Python 3 notebook, with the code for ML model in scikit-learn. (You can develop this on your local computer)
- [x] PySpark notebook, with the code for obtaining the best hyperparameter settings. ( For this, you have to use PySpark notebook(kernal) in your EMR cluster )
- [x] Screenshot from
- [x] Setup your EMR cluster (Task 1).
- [x] Setup your browser, jupyter environment & connect to the master node (Task 2).
- [x] Your S3 bucket showing ```model.joblib``` file. (From Task 3 Develop a ML model using scikit-learn)
|
github_jupyter
|
# DSCI 525 - Web and Cloud Computing
***Milestone 3:*** This milestone aims to set up your spark cluster and develop your machine learning to deploy in the cloud for the next milestone.
## Milestone 3 checklist :
- [x] Setup your EMR cluster with Spark, Hadoop, JupyterEnterpriseGateway, JupyterHub 1.1.0, and Livy.
- [x] Make sure you set up foxy proxy for your web browser(Firefox). Probably you already set this up from the previous milestone.
- [x] Develop a ML model using scikit-learn. (We will be using this model to deploy for our next milestone.)
- [x] Obtain the best hyperparameter settings using spark's MLlib.
**Keep in mind:**
- _Please use the Firefox browser for this milestone. Make sure you got foxy proxy setup._
- _All services you use are in region us-west-2 region._
- _Use only default VPC and subnet, if not specified explicitly in instruction, leave all other options default when setting up your cluster._
- _No IP addresses are visible when you provide the screenshot (***Please mask it before uploading***)._
- _1 node cluster with a single master node (zero slave nodes) of size ```m5.xlarge``` is good enough for your spark MLlib process. These configurations might take 15 - 20 minutes to get optimal tuning parameters for the entire dataset._
- _Say something went wrong and you want to spin up another EMR cluster, then make sure you terminate the previous one._
- _Upon termination, stored data in your cluster will be lost. Make sure you save any data to S3 and download the notebooks to your laptop so that next time you have your jupyterHub in a different cluster, you can upload your notebook there._
_***Outside of Milestone [OPTIONAL]:*** You are encouraged to practice it yourself by spinning up EMR clusters._
***VERY IMPORTANT:*** With task 4, make sure you occasionally download the notebook to your local computer. Once the lab is stopped after 3 hours, your EMR cluster will be terminated, and everything will vanish.
### 1. Setup your EMR cluster
rubric={correctness:25}
Follow the instructions shown during the lecture to set up your EMR cluster. I am adding instructions here again for guidance.
1.1) Go to advanced options.
1.2) Choose Release 6.5.0.
1.3) Check Spark, Hadoop, JupyterEnterpriseGateway, JupyterHub 1.1.0, and Livy.
1.4) Core instances to be 0, master 1.
1.5) By default, the instance will be selected as m5.xlarge. However, you can also choose a bigger instance (e.g., m4.4xlarge, but make sure you budget )
1.6) Cluster name : Your-group-number.
1.7) Uncheck Enable auto-termination.
1.8) Select the key pair you have access to (from your milestone 2).
1.9) EC2 security group, please go with the default. Remember, this is a managed service; what we learned from the shared responsibility model so that AWS will take care of many things. EMR comes in the list of container services. Check [this]( https://aws.amazon.com/blogs/industries/applying-the-aws-shared-responsibility-model-to-your-gxp-solution/).
1.10) Wait for the cluster to start. This takes around ~15 min. Once it is ready, you will see a solid green dot.
#### Please attach this screen shots from your group for grading
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/Task1.png

### 2. Setup your browser , jupyter environment & connect to the master node.
rubric={correctness:25}
2.1) Under cluster ```summary > Application user interfaces > On-cluster user interfaces```: Click on _***Enable an SSH Connection***_.
2.2) From instructions in the popup from Step 2.1, use: **Step 1: Open an SSH Tunnel to the Amazon EMR Master Node.** Remember you are running this from your laptop terminal, and after running, it will look like [this](https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/eg.png). For the private key make sure you point to the correct location in your computer.
2.3) (If you haven't done so from milestone 2) From instructions in the popup from Step 2.1, please ignore **Step 2: Configure a proxy management tool**. Instead follow instructions given [here](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-connect-master-node-proxy.html), under section **Example: Configure FoxyProxy for Firefox:**. Get foxy proxy standard [here](https://addons.mozilla.org/en-CA/firefox/addon/foxyproxy-standard/)
2.4) Move to **application user interfaces** tab, use the jupytetHub URL to access it.
2.4.1) Username: ```jovyan```, Password: ```jupyter```. These are default more details [here](https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-jupyterhub-user-access.html)
2.5) Login into the master node from your laptop terminal (```cluster summary > Connect to the Master Node Using SSH```), and install the necessary packages. Here are the needed packages based on my solution; you might have to install other packages depending on your approach.
sudo yum install python3-devel
sudo pip3 install pandas
sudo pip3 install s3fs
**IMPORTANT:**
- Make sure ssh -i ~/ggeorgeAD.pem -ND 8157 hadoop@xxxxx.compute.amazonaws.com (Step 2.2) is running in your terminal window before trying to access your jupyter URL. Sometimes the connection might lose; in that case, run that step again to access your jupyterHub.
- Don't confuse Step 2.2 and Step 2.5. In 2.2, you open an ssh tunnel to access the jupyterHub URL. With Step 2.6, you log into the master node to install the necessary packages.
#### Please attach this screen shots from your group for grading
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/images/Task2.png

### 3. Develop a ML model using scikit-learn.
rubric={correctness:25}
You can either use the setup that we have from our last milestone. But it might have been shut down by AWS due to the time limit; also, we haven't got permission from AWS to spin up instances larger than t2.large. Considering the situation, I recommend doing this on your local computer. So upload this notebook to your local jupyter notebook and follow the instructions.
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/Milestone3-Task3.ipynb
Task 3 notebook:
There are 2 parts to this notebook; For doing part 2, you want information from Task 4.
#### Please attach this screen shots from your group for grading
Your S3 bucket showing ```model.joblib``` file.

### 4. Obtain best hyperparameter settings using spark's MLlib.
rubric={correctness:20}
Upload this notebook to your jupyterHub (AWS managed jupyterHub in the cluster) you set up in Task 2 and follow the instructions given in the notebook.
https://github.ubc.ca/mds-2021-22/DSCI_525_web-cloud-comp_students/blob/master/release/milestone3/Milestone3-Task4.ipynb
### 5. Submission instructions
rubric={mechanics:5}
***SUBMISSION:*** Please put a link to your GitHub folder in the canvas where TAs can find the following-
- [x] Python 3 notebook, with the code for ML model in scikit-learn. (You can develop this on your local computer)
- [x] PySpark notebook, with the code for obtaining the best hyperparameter settings. ( For this, you have to use PySpark notebook(kernal) in your EMR cluster )
- [x] Screenshot from
- [x] Setup your EMR cluster (Task 1).
- [x] Setup your browser, jupyter environment & connect to the master node (Task 2).
- [x] Your S3 bucket showing ```model.joblib``` file. (From Task 3 Develop a ML model using scikit-learn)
| 0.676834 | 0.85567 |
# Deep Q-Network (DQN)
---
In this notebook, you will implement a DQN agent with OpenAI Gym's LunarLander-v2 environment.
### 1. Import the Necessary Packages
```
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
```
### 2. Instantiate the Environment and Agent
Initialize the environment in the code cell below.
```
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
```
Please refer to the instructions in `Deep_Q_Network.ipynb` if you would like to write your own DQN agent. Otherwise, run the code cell below to load the solution files.
```
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0)
# watch an untrained agent
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 3. Train the Agent with DQN
Run the code cell below to train the agent from scratch. You are welcome to amend the supplied values of the parameters in the function, to try to see if you can get better performance!
Alternatively, you can skip to the next step below (**4. Watch a Smart Agent!**), to load the saved model weights from a pre-trained agent.
```
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
```
### 4. Watch a Smart Agent!
In the next code cell, you will load the trained weights from file to watch a smart agent!
```
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
for i in range(3):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 5. Explore
In this exercise, you have implemented a DQN agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:
- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task with discrete actions!
- You may like to implement some improvements such as prioritized experience replay, Double DQN, or Dueling DQN!
- Write a blog post explaining the intuition behind the DQN algorithm and demonstrating how to use it to solve an RL environment of your choosing.
|
github_jupyter
|
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0)
# watch an untrained agent
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
for i in range(3):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
| 0.608478 | 0.953923 |
# Como preparar e transformar dados usando Python
**Vamos mostrar nessa aula o que fazer ao receber um banco de dados _sujo_**:
- A remoção de linhas que contenham o dado;
- A alteração dos dados específicos utilizando um número médio que não afete a análise.
**Também vamos aprender como Normalizar e Padronizar os dados**:
- Normalização e Padronização de dados
<a name="intro"></a>
### Sumário
1. [Removendo dados estranhos](#git1)
2. [Substituindo valores](#git2)
3. [Normalização e Padronização de dados](#git3)
1. [Normalização](#git3.1)
2. [Padronização](#git3.2)
4. [Binarização dos dados](#git4)
5. [Transformando variáveis nominais em números inteiros](#git5)
6. [One-hot encoding: transformando valores em códigos binários](#git6)
7. [Correlacionando dados](#git7)
8. [Dados desbalanceados: como detectar, analisar e balancear](#git8)
9. [Análise de Componentes Principais (PCA)](#git9)
10. [Boxplot: detectando, exibindo e descartando _outliers_](#git10)
Ao receber um conjunto de dados, provavelmente podem ser encontrados erros, inconsistências e informações dobradas. Antes de realizar qualquer análise estatística ou aplicação de algoritmos, devemos realizar uma limpeza na base de dados colhida.
Para a leitura e manipulação de tabelas, utilizaremos a biblioteca _pandas_, que pode ser facilmente instalada através do comando _pip install pandas_.
O banco de dados será o arquivo CSV (que pode ser aberto também no Excel e em outros leitores de planilha).
Também utilizaremos a biblioteca matplotlib, uma das mais utilizadas para visualizar os dados por meio de gráficos.
Vamos importar as bibliotecas!
```
import random
random.seed(1)
import pandas as pd
import matplotlib.pyplot as plt
```
### 1. Removendo dados estranhos <a name="git1"></a> [🠡](#intro)
Agora, vamos associar os dados "iris-with-errors.csv" à variável _data_, chamar o _print_ contendo o número de linhas e colunas através do comando shape e exibir as 25 primeiras linhas (incluindo o cabeçalho)
```
data = pd.read_csv('dados/iris-with-errors.csv', header=(0))
print("Linha, coluna:", data.shape)
data.head(25)
```
Podemos ver alguns erros:
1. O caractere "?"
2. A expressão _NaN_, quando o computador não sabe de que tipo é a informação
3. Na coluna _species_, são indicadas se as linhas são duplicadas
Precisamos rever os dados ou descartá-los. Aqui, vamos escolher a segunda opção e utilizar o pandas para isso.
```
data = data.dropna() # Remove os dados NaN
data.duplicated() # Verifica se há dados duplicados
```
Podemos perceber no _Output_ acima que as linhas duplicadas receberam o estado **True**. De duas linhas iguais, somente a que vem depois da primeira é a "duplicada". Vamos descartar essas linhas!
```
data = data.drop_duplicates()
data.head(25)
```
Agora só falta removermos as interrogações "?". Um dos modos de fazer isso é transformar tal caractere em um _NaN_, e logo em seguida limpar novamente as linhas que contenham _NaN_.
```
import numpy as np
data = data.replace('?', np.nan) # Usa o comando replace para substituir as interrogações em NaN's
data = data.dropna() # Remove as linhas que contenham NaN
data.head(25)
```
Com os dados limpos, não precisamos mais das classificações da coluna **species**, certo? Vamos remove-la!
```
print("Vamos remover a coluna:", data.columns[4]) # Para exibir as colunas que serão removidas, use o comando .columns
data = data.drop(data.columns[4], axis=1) # Nesse comando, precisamos indicar o axis=1 que significa coluna
data.head(2) # Pra verificarmos se a coluna foi removida, podemos visualizar apenas poucas linhas.
```
Caso mais algum dado distoe e você queira remover diretamente uma linha escolhida, podemos utilizar o comando acima com a indicação **axis=0** indicando o número e o tipo linha.
```
print("Vamos remover as linhas:", data.index[[0, 2]]) # Para exibir as linhas que serão removidas, use o comando .index
data = data.drop(data.index[[0, 2]], axis=0)
data.head(25)
```
Pronto! Removemos as linhas _[0, 2]_, ou seja, a primeira e a terceira linha, de número **0** e **6**.
### 2. Substituindo valores <a name="git2"></a> [🠡](#intro)
Se forem constatados valores ausentes, podemos substitui-los facilmente. Vamos utilizar agora a variável _data_ausente_, relacionada aos mesmos dados iniciais.
```
data_ausente = pd.read_csv('dados/iris-with-errors.csv', header=(0))
print(data_ausente.shape) # Número de linhas e colunas, lembra?
data_ausente.head(50) # Opa! Eu quero agora exibir 50 linhas, o que será que vai acontecer?
```
Agora nós estamos com dó de descartar os dados, pois a linha ainda possui informações usáveis. Um jeito inteligente é substituir os valores "?" e os "NaN", colocando no lugar um **valor médio**, ou seja, o valor que seria mais provável naquele lugar. Essa opção é boa para esse caso, pois não possuímos muitas linhas de dados, e removê-las ocasionaria em menos informação.
```
import numpy as np
data_ausente = data_ausente.replace('?', np.nan) # Transformamos os "?" em NaN
# Vamos usar o comando abaixo para transformar as linhas e colunas em formato Numpy (em Arrays)
X = np.array(data_ausente[data_ausente.columns[0:data_ausente.shape[1]-1]], dtype = float) # Também ignoramos a última coluna
averages = np.nanmean(X, axis = 0) # Usamos a função nanmean que calcula a média (ou mediana em alguns casos) ignorando os Nan
for i in np.arange(0, X.shape[0]):
for j in np.arange(0, X.shape[1]):
if(np.isnan(X[i,j]) == True): # Vamos verificar se é um dado NaN
X[i,j] = averages[j] # Inserimos a média
print(X) # Exibimos o Array que foi construído, calculado e alterado
# Documentação - https://docs.scipy.org/doc/numpy/reference/generated/numpy.nanmean.html
```
### 3. Normalização e Padronização de dados <a name="git3"></a> [🠡](#intro)
#### 3.1 Normalização <a name="git3.1"></a> [🠡](#intro)
A **Normalização** é o método em que pegamos o maior dado da planilha e transformamos em **1**, e o menor em **0**. Os que estiverem entre eles serão **normalizados**. Para isso vamos utilizar a biblioteca sklearn que já faz isso para nós.
Importamos o banco de dados _iris.csv_ e associamos à variável _data_normalizada_, importamos as bibliotecas _numpy_ e _sklearn_. Transformamos a tabela em Array como da última vez, ignorando a última coluna.
**Atenção!!**: Não esqueça de instalar a nova biblioteca utilizando o comando _pip install sklearn_.
```
data_normalizada = pd.read_csv('dados/iris.csv', header=(0))
import numpy as np
from sklearn.preprocessing import MinMaxScaler
X = np.array(data_normalizada[data_normalizada.columns[0:data.shape[1]-1]]) # Transformação em Array ignorando última coluna
for i in range(X.shape[1]):
print("MAIOR valor da coluna", i, "=", max(X[:,i]))
print("MENOR Valor da coluna", i, "=", min(X[:,i]))
# prepara a função para transformar os dados
scaler = MinMaxScaler(feature_range=(0, 1)) # O mínimo e o máximo aqui será "0" e "1"
# Realiza a normalização e coloca em um novo vetor
X_norm = scaler.fit_transform(X) # A variável X_norm será a matriz criada através do comando .scaler
print(X_norm)
print('Mínimo dos atributos:', np.amin(X_norm, axis=0))
print('Máximo dos atributos:', np.amax(X_norm, axis=0))
```
#### 3.2 Padronização <a name="git3.2"></a> [🠡](#intro)
A **Padronização** de dados possui o mesmo objetivo que a **Normalização**, que é o de transformar todos os dados para que fiquem em uma certa ordem de grandeza. A diferença é que na Padronização, **a média é igual a 0** e **o desvio padrão é igual a 1**.
Vamos utilizar a biblioteca **sklearn** para padronizar nossos dados!
```
from sklearn.preprocessing import StandardScaler
data = pd.read_csv('dados/iris.csv', header=(0))
data_matriz = np.array(data[data.columns[0:data.shape[1]-1]]) # arquivo CSV transformado em matriz (array)
padronizador = StandardScaler().fit(data_matriz) # média = 0, desvio padrão = 1
matriz_padronizada = padronizador.transform(data_matriz)
print(matriz_padronizada)
```
É interessante notar que agora possuímos valores **negativos** em nossa matriz padronizada. Isso acontece pois **a média é igual a zero**, além do desvio padrão igual a **um**.
Vamos calcular a média de cada coluna da nossa matriz padronizada!
```
for i in np.arange(0,matriz_padronizada.shape[1]):
print('A Média da coluna', i, 'é:', np.mean(matriz_padronizada[:,i]))
print('O desvio padrão da coluna', i, 'é:', np.std(matriz_padronizada[:,i]), '\n')
```
Podemos interpretar **e-16** como sendo **muito próximo de zero**
### 4. Binarização dos dados <a name="git4"></a> [🠡](#intro)
Utilizamos dados binários para casos em que certo valor = 0 e um outro valor = 1, normalmente indicando "não" e "sim", ou "desligado" e "ligado".
Ao receber dados de exame de sangue de diversas pessoas, nós gostaríamos de saber se essa pessoa está **deficiente de ferro**. Para isso, diremos que:
1. Deficiente de ferro se a ferritina está **abaixo** de 30 microgramas\L;
2. Não deficiente se a ferritina está **acima** de 30 microgramas\L.
Atente para o fato de que **não estar deficiente** de ferro NÃO significa que ela está normalizada, pois uma grande quantidade de ferro pode representar outro tipo de doença.
Sendo assim, os valores serão **binarizados**:
1. Deficiente de ferro será **0**;
2. Não deficiente de ferro será **1**.
Vamos **criar uma tabela fictícia** utilizando a biblioteca nativa **csv**.
Depois vamos transformar os dados utilizando a biblioteca **sklearn** e suas etiquetas **Binarizer** e **MinMaxScaler**:
```
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import MinMaxScaler
import csv
#Criando uma tabela com dados fictícios
with open('dados/dados_ferritina_sangue.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Código", "Ferritina em microgramas\L"])
writer.writerow([32.8, "PACIENTE A"])
writer.writerow([30.1, "PACIENTE B"])
writer.writerow([29.9, "PACIENTE C"])
writer.writerow([27.6, "PACIENTE D"])
writer.writerow([64.3, "PACIENTE E"])
writer.writerow([45.1, "PACIENTE F"])
data_ferritina = pd.read_csv('dados/dados_ferritina_sangue.csv', header=(0), encoding = "ISO-8859-1")
# Transformando a tabela em matriz (array) e considerando apenas os atributos da coluna 1.
X = np.array(data_ferritina[data_ferritina.columns[0:1]])
T = 30 # valor do Limiar / Treshold
print('Limiar:', T)
print("-------------------")
# Binarização dos dados considerando o limiar T (threshhold)
binarizer = Binarizer(threshold=T).fit(X) #.fit(X_padronizado)
X_binarizado = binarizer.transform(X) #(X_padronizado)
for i in np.arange(0, X_padronizado.shape[0]):
print("Antes:", X[i,])
print("Depois:", X_binarizado[i, ])
print("-------------------")
```
### 5. Transformando variáveis nominais em números inteiros <a name="git5"></a> [🠡](#intro)
Esse é mais simples. Caso queira transformar algum tipo de variável nominal em números inteiros, basta indicar **o termo a ser substituído** e **em que ele se transformará**:
```
data = pd.read_csv('dados/iris.csv', header=(0))
classes = np.unique(data[data.columns[-1]])
number = 0 # valor que a classe será transformada
for i in classes:
data = data.replace(i, number) # cada classe corresponderá a um valor, respectivamente
number = number + 1 # esperamos que seja setosa = 0, versicolor = 1 e virginica = 2
classes_novas = np.unique(data[data.columns[-1]])
print("Dados antigos:", classes)
print("Novos dados:", classes_novas)
```
Também é possível fazer o processo contrário! Vamos pegar os dados de ferritina no sangue e, se o número for menor do que 30, ele será transformado em **DEFICIENTE EM FERRITINA**. Se for maior ou igual a 30, **NÃO DEFICIENTE EM FERRITINA**.
```
data_ferritina = pd.read_csv('dados/dados_ferritina_sangue.csv', header=(0), encoding = "ISO-8859-1")
classes_ferritina = np.unique(data_ferritina[data_ferritina.columns[0]])
deficiente = "DEFICIENTE EM FERRITINA" # valor STRING em que a classe será transformada
nao_deficiente = "NÃO DEFICIENTE EM FERRITINA" # valor STRING em que a classe será transformada
number = 30.0
print(data_ferritina)
for i in classes_ferritina:
if i < 30:
data_ferritina = data_ferritina.replace(i, deficiente)
else:
data_ferritina = data_ferritina.replace(i, nao_deficiente)
classes_ferritina_novas = np.unique(data_ferritina[data_ferritina.columns[0]])
#print("\nDados antigos:", classes_ferritina,)
#print("Novos dados:", classes_ferritina_novas)
print("\nAgora a tabela ficou assim:\n")
print(data_ferritina)
```
### 6. One-hot encoding: transformando valores em códigos binários <a name="git6"></a> [🠡](#intro)
O **one-hot encoding** é uma outra forma de trabalhar com binários. Pode ser feito de maneira automática com poucas linhas de código. Basicamente, contará o número de variáveis diferentes, atribuindo valor binário com mais de uma coluna.
Com o exemplo, ficará muito mais compreensível:
```
import pandas as pd
# Vamos criar um DataFrame com o pandas
df = pd.DataFrame ({'A':['a', 'b', 'c', 'd'],}) # Podemos contar QUATRO variáveis diferentes aqui: a, b, c, d
df.head()
df = pd.get_dummies(df) # Vamos usar o one-hot encoding
df.head()
```
Viu como transformamos os diferentes atributos em **códigos binários**?
- O **a** agora é 1 0 0 0
- O **d** agora é 0 0 0 1
### 7. Correlacionando dados <a name="git7"></a> [🠡](#intro)
**Correlacionar dados** significa identificar, dentre variantes, colunas ou qualquer outro tipo de atributo, **quais os atributos que possuem maior ou menor correlação**, e medi-la.
Para exemplificar, vamos correlacionas os dados da base de dados **BostonHousing**, que é bem conhecida e que relaciona Casas com Preços
```
data_housing = pd.read_csv('dados/BostonHousing.csv', header=(0))
data_housing.head(5)
corr = data_housing.corr() # corr é o método de correlação do Pandas. Acabamos de correlacionar os dados!
# Daqui para baixo, estaremos gerando o gráfico de correlação
plt.figure(figsize=(7,7))
plt.imshow(corr, cmap='Blues', interpolation='none', aspect='auto') # imshow exibe data como uma imagem
plt.colorbar() # gera a barra do lado direito
# Vamos incluir o nome de todas as variáveis
plt.xticks(range(len(corr)), corr.columns, rotation='vertical') # adiciona as variáveis na linha X
plt.yticks(range(len(corr)), corr.columns); # adiciona as variáveis na linha Y
plt.suptitle('Correlation between variables', fontsize=15, fontweight='bold') # adiciona um título
plt.grid(False)
plt.show()
```
Analisar o gráfico pode não ser tão intuitivo assim. Se você ainda não sabe como fazer, é bem simples! A correlação está sendo contada de **0** a **1**, como mostra a barra lateral direita.
Nessa mesma barra lateral, podemos ver que:
- Quanto **mais clara** a cor, **menos correlação** entre variáveis x e y temos, ou seja, correlação se aproxima de **0**;
- Quanto **mais escura** a cor, **mais correlação** entre variáveis x e y temos, ou seja, correlação se aproxima de **1**.
Nós geramos um gráfico para que seja melhor de entender o que está ocorrendo nesse _dataset_, porém saiba que os dados estão armazenados em uma matriz:
```
print(corr)
```
Percebeu que o **número máximo** é **1**? Isso está ocorrendo por conta do método utilizar a **Correlação de Pearson**. Aqui está a descrição do Wikipedia:
Em estatística descritiva, o coeficiente de correlação de Pearson, também chamado de "coeficiente de correlação produto-momento" ou simplesmente de "ρ de Pearson" mede o grau da correlação (e a direcção dessa correlação - se positiva ou negativa) entre duas variáveis de escala métrica (intervalar ou de rácio/razão).
Este coeficiente, normalmente representado por ρ assume apenas valores entre -1 e 1:
- **ρ = 1** significa uma **correlação positiva perfeita** entre as duas variáveis;
- **ρ = -1** significa uma **correlação negativa perfeita** entre as duas variáveis. **Se uma aumenta, a outra sempre diminui**;
- **ρ = 0** significa que as duas variáveis **não dependem linearmente uma da outra**. No entanto, pode existir uma dependência não linear. Assim, o resultado **ρ = 0** deve ser investigado por outros meios."
Vamos agora identificar quais as variáveis que possuem **maior correlação**:
```
p = 0.75 # Essa é a correlação mínima que estamos considerando
var = []
for i in corr.columns: # percorre toda a tabela para cara elemento "i"
for j in corr.columns: # percorre novamente toda a tabela para cada elemento "j"
if(i != j):
if np.abs(corr[i][j]) > p: # Se a correlação for maior que "p"
var.append([i,j]) # Coloca na lista "var"
print('As variáveis com maior correlação:\n', var)
```
Esse tipo de método é muito importante pois também nos ajuda a filtrar melhor o _dataset_. Se dois dados são muito correlacionados, um deles pode ser removido, pois podem ter um significado muito próximo e interferir em outros dados.
### 8. Dados desbalanceados: como detectar, analisar e balancear <a name="git8"></a> [🠡](#intro)
Em uma amostragem de dados, possivelmente alguns deles podem estar **desbalanceados**. Isso ocorre quando o _dataset_ possui quantidades diferentes de elementos por classe. Quanto **maior a distância** entre a quantidade de elementos entre classes, **maior o desbalanceamento**. Vamos entender melhor com o próximo exemplo.
Para calcularmos quantas classes possui um _dataset_, basta contarmos através da coluna de classes. No _dataset_ abaixo, a coluna de classes é a última, que indica qual o tipo de veículo da linha correspondente.
```
import pandas as pd
data_veiculo = pd.read_csv('dados/Vehicle.csv', header=(0))
data_veiculo.head(10)
coluna_class = data_veiculo[data_veiculo.columns[-1]] # Vamos colocar a coluna "class" em uma variável
print(coluna_class)
```
Agora vamos separar a coluna class por elementos e contar a ocorrência de cada um deles.
```
tipos_de_classe = np.unique(coluna_class) # Vamos pegar o nome dos diferentes elementos da coluna "class"
print(tipos_de_classe)
print(tipos_de_classe[2])
# Vamos pegar o número de ocorrência de cada uma das quatro diferentes classes guardadas na variável acima
numero_de_classes = np.zeros(len(tipos_de_classe)) # pega qual a quantidade de classes diferentes, que é quatro
for i in np.arange(0, len(tipos_de_classe)): # para os elementos "i" de "0" até "quantidade de classes diferentes"
a = coluna_class == tipos_de_classe[i] # "a" é variável da coluna_class, que é equivalente ao tipo de classe "i"
numero_de_classes[i] = len(coluna_class[a]) # quantidade de aparição do elemento "i"
print(numero_de_classes)
```
Uma boa forma de analisar visualmente é utilizando o gráfico do tipo **Histograma**, que exibirá a quantidade de ocorrência por elemento (por tipo de veículo).
```
import matplotlib.pyplot as plt
numbers = np.arange(0, len(tipos_de_classe))
plt.bar(numbers, numero_de_classes, alpha=.75)
# Agora vamos exibir o nome das classes ao invés de exibir números
plt.xticks(numbers, tipos_de_classe)
plt.title('Número de elementos por classe')
plt.show(True)
```
Ao obter a quantidade de elementos por classe, e ao analisar o Histograma acima, é possível concluir que **as classes estão bem distribuídas**, possuindo pouca distância entre quantidades.
Porém, ainda assim podemos realizar uma redistribuição de dados para que tenhamos um **balanceamento perfeito**. O resultado será cada classe com o mesmo número de elementos.
```
N = 3 # Vamos selecionar apenas três ocorrências de cada classe e seus respectivos atributos.
# classes
cl = np.unique(coluna_class) #pega o número de diferentes classes
X = np.array(data_veiculo) #transforma o dataset em uma matriz/array
Xnew = [] # este será o array que receberá appends, por enquanto está vazio
cls = np.array(data_veiculo[data_veiculo.columns[-1]])
for i in np.arange(0, len(cl)): # de 0 até o tamanho(número de diferentes classes)
a = np.argwhere(cls == cl[i])
inds = np.random.choice(a[:,0], N, replace=False) # Aleatoriamente, seleciona um elemento "a" qualquer "N" vezes
Xnew.append(X[inds,:]) # append do valor armazenado na variável inds
Xnew = np.array(Xnew) # transforma Xnew em array estruturado
print('Dados obtidos a partir da amostragem')
print(Xnew)
```
### 9. Análise de Componentes Principais (PCA) <a name="git9"></a> [🠡](#intro)
A **Análise de Componentes Principais** ou **Principal Components Analisis (PCA)** envolve teoria de **espectro de matrizes**, o que foge da álgebra linear. Por fugir muito das análises vistas aqui nesse guia, a teoria não será passada POR ENQUANTO.
Ele é utilizado com a finalidade de analisar dados eliminando sobreposições e utilizando as formas mais representativas dos dados.
Vamos realizar essa análise através de poucas linhas de código! O mais interessante é o resultado gráfico.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('dados/Iris.csv', header=(0))
coluna_class = np.unique(data[data.columns[-1]]) # Vamos guardar a última coluna, 'Class'
print(data.shape) # Através desse print, vemos que possui 150 linhas e 5 colunas
list_labels = list(data.columns)
data.head(10)
data = data.to_numpy() # Transformando o dataset em formato NumPy
nrow,ncol = data.shape
y = data[:,-1]
X = data[:,0:ncol-1] # '-1' para ignorar a última coluna, que é a de Classes
```
Vamos **padronizar** os dados utilizando o **sklearn**:
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
```
Agora vamos finalmente realizar a **PCA**:
```
from sklearn.decomposition import PCA
pca = PCA(n_components=2) # Número de componentes
pca_resultado = pca.fit_transform(X)
# Vamos alterar os atributos do gráfico e exibi-lo utilizando o matplotlib
plt.figure(figsize=(8,5))
plt.scatter(pca_resultado[:,0], pca_resultado[:,1], s=50, color = 'blue') # primeiro '0' e segundo eixo '1'
plt.xlabel("Primeiro componente", fontsize=20)
plt.ylabel("Segundo componente", fontsize=20)
plt.xticks(color='k', size=20)
plt.yticks(color='k', size=20)
plt.show(True)
# Agora vamos colorir de acordo com a classe dos dados, o que facilita muito na hora de analisar!
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
aux = 0
plt.figure(figsize=(8,5))
for c in coluna_class:
nodes = np.where(y == c)
plt.scatter(pca_resultado[nodes,0], pca_resultado[nodes,1], s=50, color = colors[aux], label = c)
aux = aux + 1
plt.legend()
plt.xlabel("Primeiro componente", fontsize=20)
plt.ylabel("Segundo componente", fontsize=20)
plt.xticks(color='k', size=20)
plt.yticks(color='k', size=20)
plt.show(True)
```
É possível perceber como fica fácil, através da análise **PCA**, o quão próximas ou distantes estão as classes:
1. A setosa é muito diferente da versicolor e da virginica;
2. A versicolor e virginica são diferentes;
3. Entre a versicolor e a virginica, alguns valores se confundem.
### 10. Boxplot: detectando, exibindo e descartando _outliers_ <a name="git10"></a> [🠡](#intro)
Vamos verificar se há a presença de **outliers** em nossos _datasets_! Para isso, utilizaremos o gráfico do tipo **Boxplot**.
Os **outliers** são dados que possuem diferenças significativas em relação à maioria. Eles podem prejudicar muito o cálculo da **Média** de um conjunto de dados.
Para gerar o tal gráfico **Boxplot**, são necessários alguns elementos, que serão calculados apenas na próxima aula!
1. A **Mediana** de um conjunto de dados;
1. **1º e 3º Quartil** de um conjunto de dados;
2. Um valor arbitrário de Mínimo e Máximo. Quem estiver fora dessa definição é considerado um **outlier**.
Vamos importar o _dataset_ "iris.csv" e exibir o Boxplot dele.
```
import seaborn as sns # É uma biblioteca de exibição gráfica
import pandas as pd
import matplotlib.pyplot as plt
data_iris = pd.read_csv('dados/iris.csv', header=(0))
print(data_iris) # Vamos exibir o print para compararmos com o gráfico
plt.figure(figsize=(8, 8))
# Vamos alterar a exibição dos elementos do Boxplot e exibi-lo
sns.boxplot(x="species", y="petal_length", data=data_iris) # É aqui que definimos o que será o atributo "x" e o "y"
plt.xlabel('Espécie', fontsize=18)
plt.ylabel('Comprimento da pétala', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show(True)
```
Perceba como conseguimos gerar o Boxplot da variante **petal_length** de cada uma das classes existentes em **species**. A linha do código que realizou essa definição foi essa:
``sns.boxplot(x="species", y="petal_length", data=data_iris) # É aqui que definimos o que será o atributo "x" e o "y"``
Na próxima aula, aprenderemos muito mais sobre como funcionam os cálculos. Aqui estamos apenas indicando uma forma fácil de encontrar esses dados e exibi-los graficamente.
Vamos agora detectar a distância interquantil (Q3 - Q1) para encontrar os **outliers** e exibilos graficamente!
```
import pandas as pd
# Vamos gerar dados de forma randômica valores de '1' a '200' para as variáveis 'a', 'b' e 'c'
data = pd.DataFrame({'a': np.random.randint(1, 200, 20),
'b': np.random.randint(1, 200, 20),
'c': np.random.randint(1, 200, 20)})
# Vamos agora gerar alguns outliers. Os valores acima de 150 serão multiplicados em 10.
data[data > 150] = data[data > 150]*10
print(data)
```
Vamos exibir um gráfico do _dataset_ criado utilizando a biblioteca **seaborn**.
```
import seaborn
seaborn.pairplot(data)
plt.show()
```
Agora vamos detectar os outliers utilizando a **distância interquartil** (**IQR**).
```
Q1 = data.quantile(0.25) #primeiro quartil, ou seja 25% do dataset
Q3 = data.quantile(0.75) #terceiro quartil, ou seja, 75% do dataset
IQR = Q3 - Q1
print(((data < (Q1 - 1.5 * IQR)) | (data > (Q3 + 1.5 * IQR))))
```
True são os valores **menores que (Q1 - 1.5 * IQR) e maiores que (Q3 + 1.5 * IQR)**, ou seja, são os os elementos **outliers**, que ultrapassam os limites do gráfico **Boxplot**, visto anteriormente. O Boxplot **exclui** os dados outliers da análise, para não interferir na exibição e leitura gráfica.
Novamente, se restaram dúvidas, não se preocupe pois trataremos desse dados **outliers** na próxima aula.
Por fim, **vamos deletar** as linhas que contenham algum dado outlier, para que ele não interfira em outros tipos de análise:
```
v = ((data < (Q1 - 1.5 * IQR)) | (data > (Q3 + 1.5 * IQR))).any(axis=1) # axis=1 significa linha, axis=0 significa coluna
data = data.drop(data.index[list(v)], axis=0) # Vamos deletar os valores 'v' utilizando data.drop
data.head(10)
```
Perceba como agora só ficaram as linhas em que todos os atributos tinham classificação **False** para outlier.
**Basicamente, essas são as formas mais utilizadas de exibição e tratamento de dados na Ciência de Dados.** Até o próximo guia!
|
github_jupyter
|
import random
random.seed(1)
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('dados/iris-with-errors.csv', header=(0))
print("Linha, coluna:", data.shape)
data.head(25)
data = data.dropna() # Remove os dados NaN
data.duplicated() # Verifica se há dados duplicados
data = data.drop_duplicates()
data.head(25)
import numpy as np
data = data.replace('?', np.nan) # Usa o comando replace para substituir as interrogações em NaN's
data = data.dropna() # Remove as linhas que contenham NaN
data.head(25)
print("Vamos remover a coluna:", data.columns[4]) # Para exibir as colunas que serão removidas, use o comando .columns
data = data.drop(data.columns[4], axis=1) # Nesse comando, precisamos indicar o axis=1 que significa coluna
data.head(2) # Pra verificarmos se a coluna foi removida, podemos visualizar apenas poucas linhas.
print("Vamos remover as linhas:", data.index[[0, 2]]) # Para exibir as linhas que serão removidas, use o comando .index
data = data.drop(data.index[[0, 2]], axis=0)
data.head(25)
data_ausente = pd.read_csv('dados/iris-with-errors.csv', header=(0))
print(data_ausente.shape) # Número de linhas e colunas, lembra?
data_ausente.head(50) # Opa! Eu quero agora exibir 50 linhas, o que será que vai acontecer?
import numpy as np
data_ausente = data_ausente.replace('?', np.nan) # Transformamos os "?" em NaN
# Vamos usar o comando abaixo para transformar as linhas e colunas em formato Numpy (em Arrays)
X = np.array(data_ausente[data_ausente.columns[0:data_ausente.shape[1]-1]], dtype = float) # Também ignoramos a última coluna
averages = np.nanmean(X, axis = 0) # Usamos a função nanmean que calcula a média (ou mediana em alguns casos) ignorando os Nan
for i in np.arange(0, X.shape[0]):
for j in np.arange(0, X.shape[1]):
if(np.isnan(X[i,j]) == True): # Vamos verificar se é um dado NaN
X[i,j] = averages[j] # Inserimos a média
print(X) # Exibimos o Array que foi construído, calculado e alterado
# Documentação - https://docs.scipy.org/doc/numpy/reference/generated/numpy.nanmean.html
data_normalizada = pd.read_csv('dados/iris.csv', header=(0))
import numpy as np
from sklearn.preprocessing import MinMaxScaler
X = np.array(data_normalizada[data_normalizada.columns[0:data.shape[1]-1]]) # Transformação em Array ignorando última coluna
for i in range(X.shape[1]):
print("MAIOR valor da coluna", i, "=", max(X[:,i]))
print("MENOR Valor da coluna", i, "=", min(X[:,i]))
# prepara a função para transformar os dados
scaler = MinMaxScaler(feature_range=(0, 1)) # O mínimo e o máximo aqui será "0" e "1"
# Realiza a normalização e coloca em um novo vetor
X_norm = scaler.fit_transform(X) # A variável X_norm será a matriz criada através do comando .scaler
print(X_norm)
print('Mínimo dos atributos:', np.amin(X_norm, axis=0))
print('Máximo dos atributos:', np.amax(X_norm, axis=0))
from sklearn.preprocessing import StandardScaler
data = pd.read_csv('dados/iris.csv', header=(0))
data_matriz = np.array(data[data.columns[0:data.shape[1]-1]]) # arquivo CSV transformado em matriz (array)
padronizador = StandardScaler().fit(data_matriz) # média = 0, desvio padrão = 1
matriz_padronizada = padronizador.transform(data_matriz)
print(matriz_padronizada)
for i in np.arange(0,matriz_padronizada.shape[1]):
print('A Média da coluna', i, 'é:', np.mean(matriz_padronizada[:,i]))
print('O desvio padrão da coluna', i, 'é:', np.std(matriz_padronizada[:,i]), '\n')
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import MinMaxScaler
import csv
#Criando uma tabela com dados fictícios
with open('dados/dados_ferritina_sangue.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["Código", "Ferritina em microgramas\L"])
writer.writerow([32.8, "PACIENTE A"])
writer.writerow([30.1, "PACIENTE B"])
writer.writerow([29.9, "PACIENTE C"])
writer.writerow([27.6, "PACIENTE D"])
writer.writerow([64.3, "PACIENTE E"])
writer.writerow([45.1, "PACIENTE F"])
data_ferritina = pd.read_csv('dados/dados_ferritina_sangue.csv', header=(0), encoding = "ISO-8859-1")
# Transformando a tabela em matriz (array) e considerando apenas os atributos da coluna 1.
X = np.array(data_ferritina[data_ferritina.columns[0:1]])
T = 30 # valor do Limiar / Treshold
print('Limiar:', T)
print("-------------------")
# Binarização dos dados considerando o limiar T (threshhold)
binarizer = Binarizer(threshold=T).fit(X) #.fit(X_padronizado)
X_binarizado = binarizer.transform(X) #(X_padronizado)
for i in np.arange(0, X_padronizado.shape[0]):
print("Antes:", X[i,])
print("Depois:", X_binarizado[i, ])
print("-------------------")
data = pd.read_csv('dados/iris.csv', header=(0))
classes = np.unique(data[data.columns[-1]])
number = 0 # valor que a classe será transformada
for i in classes:
data = data.replace(i, number) # cada classe corresponderá a um valor, respectivamente
number = number + 1 # esperamos que seja setosa = 0, versicolor = 1 e virginica = 2
classes_novas = np.unique(data[data.columns[-1]])
print("Dados antigos:", classes)
print("Novos dados:", classes_novas)
data_ferritina = pd.read_csv('dados/dados_ferritina_sangue.csv', header=(0), encoding = "ISO-8859-1")
classes_ferritina = np.unique(data_ferritina[data_ferritina.columns[0]])
deficiente = "DEFICIENTE EM FERRITINA" # valor STRING em que a classe será transformada
nao_deficiente = "NÃO DEFICIENTE EM FERRITINA" # valor STRING em que a classe será transformada
number = 30.0
print(data_ferritina)
for i in classes_ferritina:
if i < 30:
data_ferritina = data_ferritina.replace(i, deficiente)
else:
data_ferritina = data_ferritina.replace(i, nao_deficiente)
classes_ferritina_novas = np.unique(data_ferritina[data_ferritina.columns[0]])
#print("\nDados antigos:", classes_ferritina,)
#print("Novos dados:", classes_ferritina_novas)
print("\nAgora a tabela ficou assim:\n")
print(data_ferritina)
import pandas as pd
# Vamos criar um DataFrame com o pandas
df = pd.DataFrame ({'A':['a', 'b', 'c', 'd'],}) # Podemos contar QUATRO variáveis diferentes aqui: a, b, c, d
df.head()
df = pd.get_dummies(df) # Vamos usar o one-hot encoding
df.head()
data_housing = pd.read_csv('dados/BostonHousing.csv', header=(0))
data_housing.head(5)
corr = data_housing.corr() # corr é o método de correlação do Pandas. Acabamos de correlacionar os dados!
# Daqui para baixo, estaremos gerando o gráfico de correlação
plt.figure(figsize=(7,7))
plt.imshow(corr, cmap='Blues', interpolation='none', aspect='auto') # imshow exibe data como uma imagem
plt.colorbar() # gera a barra do lado direito
# Vamos incluir o nome de todas as variáveis
plt.xticks(range(len(corr)), corr.columns, rotation='vertical') # adiciona as variáveis na linha X
plt.yticks(range(len(corr)), corr.columns); # adiciona as variáveis na linha Y
plt.suptitle('Correlation between variables', fontsize=15, fontweight='bold') # adiciona um título
plt.grid(False)
plt.show()
print(corr)
p = 0.75 # Essa é a correlação mínima que estamos considerando
var = []
for i in corr.columns: # percorre toda a tabela para cara elemento "i"
for j in corr.columns: # percorre novamente toda a tabela para cada elemento "j"
if(i != j):
if np.abs(corr[i][j]) > p: # Se a correlação for maior que "p"
var.append([i,j]) # Coloca na lista "var"
print('As variáveis com maior correlação:\n', var)
import pandas as pd
data_veiculo = pd.read_csv('dados/Vehicle.csv', header=(0))
data_veiculo.head(10)
coluna_class = data_veiculo[data_veiculo.columns[-1]] # Vamos colocar a coluna "class" em uma variável
print(coluna_class)
tipos_de_classe = np.unique(coluna_class) # Vamos pegar o nome dos diferentes elementos da coluna "class"
print(tipos_de_classe)
print(tipos_de_classe[2])
# Vamos pegar o número de ocorrência de cada uma das quatro diferentes classes guardadas na variável acima
numero_de_classes = np.zeros(len(tipos_de_classe)) # pega qual a quantidade de classes diferentes, que é quatro
for i in np.arange(0, len(tipos_de_classe)): # para os elementos "i" de "0" até "quantidade de classes diferentes"
a = coluna_class == tipos_de_classe[i] # "a" é variável da coluna_class, que é equivalente ao tipo de classe "i"
numero_de_classes[i] = len(coluna_class[a]) # quantidade de aparição do elemento "i"
print(numero_de_classes)
import matplotlib.pyplot as plt
numbers = np.arange(0, len(tipos_de_classe))
plt.bar(numbers, numero_de_classes, alpha=.75)
# Agora vamos exibir o nome das classes ao invés de exibir números
plt.xticks(numbers, tipos_de_classe)
plt.title('Número de elementos por classe')
plt.show(True)
N = 3 # Vamos selecionar apenas três ocorrências de cada classe e seus respectivos atributos.
# classes
cl = np.unique(coluna_class) #pega o número de diferentes classes
X = np.array(data_veiculo) #transforma o dataset em uma matriz/array
Xnew = [] # este será o array que receberá appends, por enquanto está vazio
cls = np.array(data_veiculo[data_veiculo.columns[-1]])
for i in np.arange(0, len(cl)): # de 0 até o tamanho(número de diferentes classes)
a = np.argwhere(cls == cl[i])
inds = np.random.choice(a[:,0], N, replace=False) # Aleatoriamente, seleciona um elemento "a" qualquer "N" vezes
Xnew.append(X[inds,:]) # append do valor armazenado na variável inds
Xnew = np.array(Xnew) # transforma Xnew em array estruturado
print('Dados obtidos a partir da amostragem')
print(Xnew)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('dados/Iris.csv', header=(0))
coluna_class = np.unique(data[data.columns[-1]]) # Vamos guardar a última coluna, 'Class'
print(data.shape) # Através desse print, vemos que possui 150 linhas e 5 colunas
list_labels = list(data.columns)
data.head(10)
data = data.to_numpy() # Transformando o dataset em formato NumPy
nrow,ncol = data.shape
y = data[:,-1]
X = data[:,0:ncol-1] # '-1' para ignorar a última coluna, que é a de Classes
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
from sklearn.decomposition import PCA
pca = PCA(n_components=2) # Número de componentes
pca_resultado = pca.fit_transform(X)
# Vamos alterar os atributos do gráfico e exibi-lo utilizando o matplotlib
plt.figure(figsize=(8,5))
plt.scatter(pca_resultado[:,0], pca_resultado[:,1], s=50, color = 'blue') # primeiro '0' e segundo eixo '1'
plt.xlabel("Primeiro componente", fontsize=20)
plt.ylabel("Segundo componente", fontsize=20)
plt.xticks(color='k', size=20)
plt.yticks(color='k', size=20)
plt.show(True)
# Agora vamos colorir de acordo com a classe dos dados, o que facilita muito na hora de analisar!
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
aux = 0
plt.figure(figsize=(8,5))
for c in coluna_class:
nodes = np.where(y == c)
plt.scatter(pca_resultado[nodes,0], pca_resultado[nodes,1], s=50, color = colors[aux], label = c)
aux = aux + 1
plt.legend()
plt.xlabel("Primeiro componente", fontsize=20)
plt.ylabel("Segundo componente", fontsize=20)
plt.xticks(color='k', size=20)
plt.yticks(color='k', size=20)
plt.show(True)
import seaborn as sns # É uma biblioteca de exibição gráfica
import pandas as pd
import matplotlib.pyplot as plt
data_iris = pd.read_csv('dados/iris.csv', header=(0))
print(data_iris) # Vamos exibir o print para compararmos com o gráfico
plt.figure(figsize=(8, 8))
# Vamos alterar a exibição dos elementos do Boxplot e exibi-lo
sns.boxplot(x="species", y="petal_length", data=data_iris) # É aqui que definimos o que será o atributo "x" e o "y"
plt.xlabel('Espécie', fontsize=18)
plt.ylabel('Comprimento da pétala', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show(True)
import pandas as pd
# Vamos gerar dados de forma randômica valores de '1' a '200' para as variáveis 'a', 'b' e 'c'
data = pd.DataFrame({'a': np.random.randint(1, 200, 20),
'b': np.random.randint(1, 200, 20),
'c': np.random.randint(1, 200, 20)})
# Vamos agora gerar alguns outliers. Os valores acima de 150 serão multiplicados em 10.
data[data > 150] = data[data > 150]*10
print(data)
import seaborn
seaborn.pairplot(data)
plt.show()
Q1 = data.quantile(0.25) #primeiro quartil, ou seja 25% do dataset
Q3 = data.quantile(0.75) #terceiro quartil, ou seja, 75% do dataset
IQR = Q3 - Q1
print(((data < (Q1 - 1.5 * IQR)) | (data > (Q3 + 1.5 * IQR))))
v = ((data < (Q1 - 1.5 * IQR)) | (data > (Q3 + 1.5 * IQR))).any(axis=1) # axis=1 significa linha, axis=0 significa coluna
data = data.drop(data.index[list(v)], axis=0) # Vamos deletar os valores 'v' utilizando data.drop
data.head(10)
| 0.358016 | 0.95297 |
# DeepDream
A Tensorflow implement of DeepDream, using pretrained ResNet-50 v2.
## References:
1. [github: L1aoXingyu/Deep-Dream](https://github.com/L1aoXingyu/Deep-Dream)
2. [github: llSourcell/deep_dream_challenge](https://github.com/llSourcell/deep_dream_challenge)
3. [Official ResNet Model of Tensorflow](https://github.com/tensorflow/models/tree/master/official/resnet)
## Preparation
```
import numpy as np
import PIL.Image
from IPython.display import Image, display
import tensorflow as tf
import matplotlib.pyplot as plt
import urllib.request
import os
import tarfile
```
Download and extract the ResNet-v2 model (from Tensorflow Model Zoo)
```
url = 'http://download.tensorflow.org/models/official/20181001_resnet/savedmodels/resnet_v2_fp32_savedmodel_NHWC.tar.gz'
data_dir = './model/'
model_name = os.path.split(url)[-1]
local_tar_file = os.path.join(data_dir, model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(local_tar_file):
# Download
model_url = urllib.request.urlopen(url)
with open(local_tar_file, 'wb') as output:
output.write(model_url.read())
with tarfile.open(local_tar_file, 'r') as t:
t.extractall(data_dir)
```
Download the picture
```
pic_url = 'https://github.com/Carlxiao/AIClub-2018Fall/raw/master/experiment/deepdream/pic/doge.jpg'
with open('pic.jpg', 'wb') as f:
f.write(urllib.request.urlopen(pic_url).read())
```
## Defining Functions
### Utility functions
```
def restore_graph_saved_model(model_path):
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)
input_tensor = graph.get_tensor_by_name('input_tensor:0')
return graph, input_tensor, sess
def showarray(a, show=True):
a = np.uint8(np.clip(a, 0, 255))
fig = plt.figure(frameon=False)
plt.axis('off')
if show:
plt.imshow(a)
plt.show()
else:
w, h = a.shape[:2]
dpi = 200
fig.set_size_inches(w / dpi, h / dpi)
plt.imshow(a)
plt.savefig('saved.jpg', dpi=dpi, bbox_inches='tight')
plt.close()
```
### DeepDream
Since the input tensor of the pretrained model has a fixed shape of 64x224x224x3, we have to use the trick of calculating the gradients *tile by tile* (each tile is of size 224x224), and image scaling is needed to draw a decent result.
Another important trick is to generate the detail of the dream iteractively, while the image resolution changes from low to high. For each iteration, we (scale and) add the detail we get so far to an image (octave) of some resolution, and perform gradient ascent on it. Then we get a richer detail by substracting the result of gradient ascent by the original octave.
```
def deepdream(graph, input_tensor, sess, target, image,
n_iter=30, rate=2.5, n_octave=6, octave_scale=1.4,
image_scale_mul=1.0):
def resize(img, size):
img = tf.expand_dims(img, 0)
op = tf.image.resize_bilinear(img, size)[0,:,:,:]
return sess.run(op)
def calc_grad_tiled(img, grad_tensor, tile_size=224):
sz = tile_size
h, w = img.shape[:2]
# randomly shift the image, then pad it with zeros so that
# it can be exactly splitted into tiles of tile_size
h_pad = (h + sz - 1) // sz * sz
w_pad = (w + sz - 1) // sz * sz
sx, sy = np.random.randint(32, size=2)
img_shift = np.zeros([h_pad, w_pad, 3])
img_shift[:h, :w] = np.roll(np.roll(img, sx, 1), sy, 0)
# split into tiles
tiles = []
for x in range(0, h_pad, sz):
for y in range(0, w_pad, sz):
tile = img_shift[x:x+sz, y:y+sz]
tiles.append(tile)
# calculate gradients for each tile
tile_grads = []
bs = 1 # how many tiles to deal with at one time
for i in range(0, len(tiles), bs):
batch = np.stack(tiles[i: i+bs])
batch_size = batch.shape[0]
feed = np.zeros([64, 224, 224, 3])
feed[:batch_size] = batch
g = sess.run(grad_tensor, {input_tensor:feed})
tile_grads += [g[j] for j in range(batch_size)]
# get the gradient for the whole image
grad = np.zeros_like(img_shift)
i = 0
for x in range(0, h_pad, sz):
for y in range(0, w_pad, sz):
grad[x:x+sz, y:y+sz] = tile_grads[i]
i += 1
return np.roll(np.roll(grad, -sx, 1), -sy, 0)[:h, :w]
# image preprocessing, including centralizing & resizing
means = np.array([[[123.68, 116.779, 103.939]]])
image_scale = image_scale_mul * 224 / max(image.shape[0], image.shape[1])
image = resize(image, np.float32(image.shape[:2]) * image_scale)
image = image - means
# define loss & gradient
loss = tf.reduce_mean(tf.square(target))
grad = tf.gradients(loss, input_tensor)[0]
print("Building octaves...")
octaves = [image]
for _ in range(n_octave-1):
hw = octaves[-1].shape[:2]
octaves.append(resize(octaves[-1], np.int32(np.float32(hw) / octave_scale)))
print("Dreaming...")
octaves.reverse()
detail = np.zeros_like(octaves[0])
for i, octave in enumerate(octaves):
print("octave", i)
if i > 0:
detail = resize(detail, octave.shape[:2])
octave_d = octave + detail
for i in range(n_iter):
print(i, end=' ')
g = calc_grad_tiled(octave_d, grad)
octave_d += g * (rate / (np.abs(g).mean() + 1e-7))
detail = octave_d - octave
showarray(octave_d + means)
print('done.')
return image + detail + means
```
## Running
Create session and graph
```
graph, input_tensor, sess = restore_graph_saved_model('deepdream/resnet_v2_fp32_savedmodel_NHWC/1538687283')
```
Load image
```
img = PIL.Image.open('pic.jpg')
img = np.float32(img)
```
Select the target layer & start dreaming
**WARNING: The following output may be DISGUSTING, please do not scroll down unless you are sure.**
```
target = graph.get_tensor_by_name("resnet_model/{}:0".format('block_layer3'))
out = deepdream(graph, input_tensor, sess, target, img,
n_iter=30, rate=2.0, n_octave=5+0, octave_scale=1.4, image_scale_mul=1)
showarray(out, show=False)
sess.close()
```
|
github_jupyter
|
import numpy as np
import PIL.Image
from IPython.display import Image, display
import tensorflow as tf
import matplotlib.pyplot as plt
import urllib.request
import os
import tarfile
url = 'http://download.tensorflow.org/models/official/20181001_resnet/savedmodels/resnet_v2_fp32_savedmodel_NHWC.tar.gz'
data_dir = './model/'
model_name = os.path.split(url)[-1]
local_tar_file = os.path.join(data_dir, model_name)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(local_tar_file):
# Download
model_url = urllib.request.urlopen(url)
with open(local_tar_file, 'wb') as output:
output.write(model_url.read())
with tarfile.open(local_tar_file, 'r') as t:
t.extractall(data_dir)
pic_url = 'https://github.com/Carlxiao/AIClub-2018Fall/raw/master/experiment/deepdream/pic/doge.jpg'
with open('pic.jpg', 'wb') as f:
f.write(urllib.request.urlopen(pic_url).read())
def restore_graph_saved_model(model_path):
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], model_path)
input_tensor = graph.get_tensor_by_name('input_tensor:0')
return graph, input_tensor, sess
def showarray(a, show=True):
a = np.uint8(np.clip(a, 0, 255))
fig = plt.figure(frameon=False)
plt.axis('off')
if show:
plt.imshow(a)
plt.show()
else:
w, h = a.shape[:2]
dpi = 200
fig.set_size_inches(w / dpi, h / dpi)
plt.imshow(a)
plt.savefig('saved.jpg', dpi=dpi, bbox_inches='tight')
plt.close()
def deepdream(graph, input_tensor, sess, target, image,
n_iter=30, rate=2.5, n_octave=6, octave_scale=1.4,
image_scale_mul=1.0):
def resize(img, size):
img = tf.expand_dims(img, 0)
op = tf.image.resize_bilinear(img, size)[0,:,:,:]
return sess.run(op)
def calc_grad_tiled(img, grad_tensor, tile_size=224):
sz = tile_size
h, w = img.shape[:2]
# randomly shift the image, then pad it with zeros so that
# it can be exactly splitted into tiles of tile_size
h_pad = (h + sz - 1) // sz * sz
w_pad = (w + sz - 1) // sz * sz
sx, sy = np.random.randint(32, size=2)
img_shift = np.zeros([h_pad, w_pad, 3])
img_shift[:h, :w] = np.roll(np.roll(img, sx, 1), sy, 0)
# split into tiles
tiles = []
for x in range(0, h_pad, sz):
for y in range(0, w_pad, sz):
tile = img_shift[x:x+sz, y:y+sz]
tiles.append(tile)
# calculate gradients for each tile
tile_grads = []
bs = 1 # how many tiles to deal with at one time
for i in range(0, len(tiles), bs):
batch = np.stack(tiles[i: i+bs])
batch_size = batch.shape[0]
feed = np.zeros([64, 224, 224, 3])
feed[:batch_size] = batch
g = sess.run(grad_tensor, {input_tensor:feed})
tile_grads += [g[j] for j in range(batch_size)]
# get the gradient for the whole image
grad = np.zeros_like(img_shift)
i = 0
for x in range(0, h_pad, sz):
for y in range(0, w_pad, sz):
grad[x:x+sz, y:y+sz] = tile_grads[i]
i += 1
return np.roll(np.roll(grad, -sx, 1), -sy, 0)[:h, :w]
# image preprocessing, including centralizing & resizing
means = np.array([[[123.68, 116.779, 103.939]]])
image_scale = image_scale_mul * 224 / max(image.shape[0], image.shape[1])
image = resize(image, np.float32(image.shape[:2]) * image_scale)
image = image - means
# define loss & gradient
loss = tf.reduce_mean(tf.square(target))
grad = tf.gradients(loss, input_tensor)[0]
print("Building octaves...")
octaves = [image]
for _ in range(n_octave-1):
hw = octaves[-1].shape[:2]
octaves.append(resize(octaves[-1], np.int32(np.float32(hw) / octave_scale)))
print("Dreaming...")
octaves.reverse()
detail = np.zeros_like(octaves[0])
for i, octave in enumerate(octaves):
print("octave", i)
if i > 0:
detail = resize(detail, octave.shape[:2])
octave_d = octave + detail
for i in range(n_iter):
print(i, end=' ')
g = calc_grad_tiled(octave_d, grad)
octave_d += g * (rate / (np.abs(g).mean() + 1e-7))
detail = octave_d - octave
showarray(octave_d + means)
print('done.')
return image + detail + means
graph, input_tensor, sess = restore_graph_saved_model('deepdream/resnet_v2_fp32_savedmodel_NHWC/1538687283')
img = PIL.Image.open('pic.jpg')
img = np.float32(img)
target = graph.get_tensor_by_name("resnet_model/{}:0".format('block_layer3'))
out = deepdream(graph, input_tensor, sess, target, img,
n_iter=30, rate=2.0, n_octave=5+0, octave_scale=1.4, image_scale_mul=1)
showarray(out, show=False)
sess.close()
| 0.44746 | 0.93784 |
## Notebook exploring the geodata extracted from adam4adam profiles based in MA
### by Mary Ruth Ngo under supervision of Professor Octavio Gonzalez
1. Using geopy, I can use town data I scraped and convert it to lat/long values
```
!pip install geopy
from geopy.geocoders import Nominatim
geolocator = Nominatim()
location = geolocator.geocode("Chicago Illinois")
print(location.raw)
help(location)
import pandas as pd
import plotly
```
csv file ma_plus_race.csv stores data (last updated to include a section on race/ethnicity for each profile)
```
ma = pd.DataFrame.from_csv("ma_plus_race.csv")
one_city_list = list(set([i + " Massachusetts" for i in ma["town"].values.tolist()\
if len(str(i).split(", ")) == 1 and str(i) != "nan"]))
lats = []
lons = []
# print(len(one_city_list))
for i in one_city_list:
location = geolocator.geocode(i, timeout=10)
lats.append(location.latitude)
lons.append(location.longitude)
two_city_list = list(set([i + " Massachusetts" for i in ma["town"].values.tolist()\
if len(str(i).split(", ")) == 2 and str(i) != "nan"]))
for i in two_city_list:
if "Other" in i:
two_city_list.remove(i)
two_city_list = [i.split(", ")[1] for i in two_city_list]
all_cities = one_city_list + two_city_list
len(all_cities)
```
**Edge cases:**
```
for i,v in enumerate(all_cities):
if "Leather" in v:
all_cities[i] = "Leather District, Boston"
elif "Bay Village" in v:
all_cities[i] = "Bay Village"
elif "Brickell" in v:
all_cities.remove(v)
elif "Boston Airport" in v:
all_cities[i] = "Boston Logan International Airport, Boston"
new_areas = [i + " Massachusetts" for i in new_areas]
everything = all_cities + new_areas
```
Store conversion data in dictionary and pandas frame
```
geo_dict = {}
for i in everything:
location = geolocator.geocode(i, timeout=10)
try:
lat = location.latitude
lon = location.longitude
except AttributeError:
print(i)
geo_dict[i] = {"lat": lat, "lon": lon}
geo_dict["Boston Logan Airport"] = {"lat": 42.366828, "lon": -71.027330}
geo_dict["Leather District"] = {"lat": 42.350807, "lon": -71.057969}
geo_frame = pd.DataFrame(geo_dict).T
geo_frame.index
```
use conversion dictionary to add a lat and long value to each profile in pandas dataframe
```
clean_towns = []
lats_list = []
lons_list = []
ma_city_values = [str(i) for i in ma["town"].values.tolist()]
for ind,i in enumerate(ma_city_values):
if str(i) == "nan" or ":" in i:
clean_towns.append("nan")
lats_list.append("nan")
lons_list.append("nan")
else:
if "," in i:
val = i.split(", ")[1]
else:
val = i
try:
lats_list.append(geo_dict[val + " Massachusetts"]["lat"])
lons_list.append(geo_dict[val + " Massachusetts"]["lon"])
clean_towns.append(val + " Massachusetts")
except KeyError:
if "Bay Village" in val:
lats_list.append(42.349176)
lons_list.append(-71.069591)
clean_towns.append("Bay Village")
elif "Leather" in val:
lats_list.append(42.350807)
lons_list.append(-71.057969)
clean_towns.append("Leather District")
elif "Airport" in val:
lats_list.append(42.366828)
lons_list.append(-71.027330)
clean_towns.append("Boston Logan Airport")
else:
clean_towns.append("nan")
lats_list.append("nan")
lons_list.append("nan")
ma["towns"] = clean_towns
ma["lat"] = lats_list
ma["lon"] = lons_list
len(ma["towns"])
from itertools import groupby
lengtsh = [len(list(group)) for key, group in groupby(clean_towns)]
```
**Check out the frequency breakdown for users around the state**
```
import collections
counter=collections.Counter(clean_towns)
# print(counter)
# print(counter.values())
# print(counter.keys())
frequencies = (counter.most_common(134))
geo_dict[u'Westborough Massachusetts']['lat']
freq = []
for i,v in enumerate(frequencies):
try:
freq.append([geo_dict[v[0]]["lat"], geo_dict[v[0]]["lon"], v[1]])
except KeyError:
print v
prep_towns = ma["towns"][ma["hiv_status"] == "HIV Negative, on PrEP"].values.tolist()
prep_areas = ma["area"][ma["hiv_status"] == "HIV Negative, on PrEP"].values.tolist()
geolocater = []
together = [ma["towns"] + ", " + ma["area"]][0]
for i in together[0:10]:
print i
geo = []
new_areas = []
for ind,i in enumerate(together):
try:
splt = i.split(", ")
if splt[0] == "nan":
if "Other" not in splt[1]:
geo.append(splt[1])
if splt[1] not in new_areas:
new_areas.append(splt[1])
else:
geo.append("nan")
else:
geo.append(splt[0])
except:
print ind, i
geo.append("nan")
print new_areas
add_mass = [ma["area"] + " Massachusetts"]
together = [ma["towns"] + ", " + ma["area"]][0]
important_geos = []
for i in together:
try:
s = i.split(", ")
if "nan" in s[0]:
if "Other" in s[1]:
important_geos.append("nan")
else:
important_geos.append(s[1])
else:
important_geos.append(s[0])
except AttributeError:
important_geos.append("nan")
important_geos
for i in important_geos:
location = geolocator.geocode(i+" Massachusetts", timeout=10)
try:
lat = location.latitude
lon = location.longitude
except AttributeError:
print(i)
geo_dict[i+ " Massachusetts"] = {"lat": lat, "lon": lon}
for key, item in geo_dict.items():
if "Massachusetts" not in i:
del geo_dict[key]
updated = []
for i in important_geos:
if "Massachusetts" not in i and "nan" not in i:
if "Logan" in i or "Leather" in i or "Bay" in i:
updated.append(i)
else:
updated.append(i + " Massachusetts")
else:
updated.append(i)
ma["geos"] = updated
geo_dict
```
### Need to fix some lat/long values in the dictionary by hand
```
geo_dict['South End Massachusetts'] = {'lat': 42.3396042, 'lon':-71.093306}
del geo_dict['Beacon Hill Massachusetts, Boston Metro Massachusetts']
del geo_dict['Boston Logan International Airport, Boston']
del geo_dict['Beacon Hill Massachusetts Massachusetts']
del geo_dict['Barnstable Massachusetts Massachusetts']
del geo_dict['Cambridge Massachusetts Massachusetts']
del geo_dict['Cambridge Massachusetts, Boston Metro Massachusetts']
del geo_dict['Dorchester Massachusetts Massachusetts']
del geo_dict['Dorchester Massachusetts, Boston Metro Massachusetts']
del geo_dict['Framingham Massachusetts Massachusetts']
del geo_dict['Framingham Massachusetts, MetroWest Massachusetts']
del geo_dict['Leather District, Boston']
del geo_dict['Lynn Massachusetts Massachusetts']
del geo_dict['Lynn Massachusetts, Boston Metro Massachusetts']
del geo_dict['Malden Massachusetts Massachusetts']
del geo_dict['Malden Massachusetts, Boston Metro Massachusetts']
geo_dict['Boston Metro Massachusetts'] = {'lat':42.3522313,'lon': -71.0688475}
prep_geos = ma["geos"][ma["hiv_status"] == "HIV Negative, on PrEP"]
und_geos = ma["geos"][ma["hiv_status"] == "HIV Undetectable"]
pos_geos = ma["geos"][ma["hiv_status"] == "HIV Positive"]
neg_geos = ma["geos"][ma["hiv_status"] == "HIV Negative"]
def print_geos(geos_list):
counter=collections.Counter(geos_list)
frequencies = (counter.most_common(300))
freq = []
for i,v in enumerate(frequencies):
try:
freq.append([geo_dict[v[0]]["lat"], geo_dict[v[0]]["lon"], v[1], frequencies[i][0]])
except KeyError:
print v
return freq
print len(print_geos(prep_geos))
print_geos(prep_geos)
freq
```
Use plotly's geoscatter plot to try to visualize that frequency
```
import plotly.tools as tls
tls.set_credentials_file(username='mrngos', api_key='r2pmvn0qhu')
```
## Biggest issue:
The map data in plotly doesn't scale to city/state scope, stays only at state/country level in the States. must find alternative
```
import plotly.plotly as py
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_february_us_airport_traffic.csv')
df.head()
# ma['text'] = df['town'] + '' + df['city'] + ', ' + df['state'] + '' + 'Arrivals: ' + df['cnt'].astype(str)
scl = [ [0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\
[0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"] ]
data = [ dict(
type = 'scattergeo',
locationmode = 'MA-cities',
lon = ma['lon'],
lat = ma['lat'],
mode = 'markers',
marker = dict(
size = 8,
opacity = 0.8,
reversescale = True,
autocolorscale = False,
symbol = 'circle',
line = dict(
width=1,
color='rgba(102, 102, 102)'
),
colorscale = scl,
cmin = 0,
color = df['cnt'],
cmax = df['cnt'].max(),
colorbar=dict(
title="Incoming flightsFebruary 2011"
)
))]
layout = dict(
title = 'Most trafficked US airports<br>(Hover for airport names)',
colorbar = True,
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showland = True,
landcolor = "rgb(250, 250, 250)",
subunitcolor = "rgb(217, 217, 217)",
countrycolor = "rgb(217, 217, 217)",
countrywidth = 0.5,
subunitwidth = 0.5
),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, validate=False, filename='d3-airports' )
```
|
github_jupyter
|
!pip install geopy
from geopy.geocoders import Nominatim
geolocator = Nominatim()
location = geolocator.geocode("Chicago Illinois")
print(location.raw)
help(location)
import pandas as pd
import plotly
ma = pd.DataFrame.from_csv("ma_plus_race.csv")
one_city_list = list(set([i + " Massachusetts" for i in ma["town"].values.tolist()\
if len(str(i).split(", ")) == 1 and str(i) != "nan"]))
lats = []
lons = []
# print(len(one_city_list))
for i in one_city_list:
location = geolocator.geocode(i, timeout=10)
lats.append(location.latitude)
lons.append(location.longitude)
two_city_list = list(set([i + " Massachusetts" for i in ma["town"].values.tolist()\
if len(str(i).split(", ")) == 2 and str(i) != "nan"]))
for i in two_city_list:
if "Other" in i:
two_city_list.remove(i)
two_city_list = [i.split(", ")[1] for i in two_city_list]
all_cities = one_city_list + two_city_list
len(all_cities)
for i,v in enumerate(all_cities):
if "Leather" in v:
all_cities[i] = "Leather District, Boston"
elif "Bay Village" in v:
all_cities[i] = "Bay Village"
elif "Brickell" in v:
all_cities.remove(v)
elif "Boston Airport" in v:
all_cities[i] = "Boston Logan International Airport, Boston"
new_areas = [i + " Massachusetts" for i in new_areas]
everything = all_cities + new_areas
geo_dict = {}
for i in everything:
location = geolocator.geocode(i, timeout=10)
try:
lat = location.latitude
lon = location.longitude
except AttributeError:
print(i)
geo_dict[i] = {"lat": lat, "lon": lon}
geo_dict["Boston Logan Airport"] = {"lat": 42.366828, "lon": -71.027330}
geo_dict["Leather District"] = {"lat": 42.350807, "lon": -71.057969}
geo_frame = pd.DataFrame(geo_dict).T
geo_frame.index
clean_towns = []
lats_list = []
lons_list = []
ma_city_values = [str(i) for i in ma["town"].values.tolist()]
for ind,i in enumerate(ma_city_values):
if str(i) == "nan" or ":" in i:
clean_towns.append("nan")
lats_list.append("nan")
lons_list.append("nan")
else:
if "," in i:
val = i.split(", ")[1]
else:
val = i
try:
lats_list.append(geo_dict[val + " Massachusetts"]["lat"])
lons_list.append(geo_dict[val + " Massachusetts"]["lon"])
clean_towns.append(val + " Massachusetts")
except KeyError:
if "Bay Village" in val:
lats_list.append(42.349176)
lons_list.append(-71.069591)
clean_towns.append("Bay Village")
elif "Leather" in val:
lats_list.append(42.350807)
lons_list.append(-71.057969)
clean_towns.append("Leather District")
elif "Airport" in val:
lats_list.append(42.366828)
lons_list.append(-71.027330)
clean_towns.append("Boston Logan Airport")
else:
clean_towns.append("nan")
lats_list.append("nan")
lons_list.append("nan")
ma["towns"] = clean_towns
ma["lat"] = lats_list
ma["lon"] = lons_list
len(ma["towns"])
from itertools import groupby
lengtsh = [len(list(group)) for key, group in groupby(clean_towns)]
import collections
counter=collections.Counter(clean_towns)
# print(counter)
# print(counter.values())
# print(counter.keys())
frequencies = (counter.most_common(134))
geo_dict[u'Westborough Massachusetts']['lat']
freq = []
for i,v in enumerate(frequencies):
try:
freq.append([geo_dict[v[0]]["lat"], geo_dict[v[0]]["lon"], v[1]])
except KeyError:
print v
prep_towns = ma["towns"][ma["hiv_status"] == "HIV Negative, on PrEP"].values.tolist()
prep_areas = ma["area"][ma["hiv_status"] == "HIV Negative, on PrEP"].values.tolist()
geolocater = []
together = [ma["towns"] + ", " + ma["area"]][0]
for i in together[0:10]:
print i
geo = []
new_areas = []
for ind,i in enumerate(together):
try:
splt = i.split(", ")
if splt[0] == "nan":
if "Other" not in splt[1]:
geo.append(splt[1])
if splt[1] not in new_areas:
new_areas.append(splt[1])
else:
geo.append("nan")
else:
geo.append(splt[0])
except:
print ind, i
geo.append("nan")
print new_areas
add_mass = [ma["area"] + " Massachusetts"]
together = [ma["towns"] + ", " + ma["area"]][0]
important_geos = []
for i in together:
try:
s = i.split(", ")
if "nan" in s[0]:
if "Other" in s[1]:
important_geos.append("nan")
else:
important_geos.append(s[1])
else:
important_geos.append(s[0])
except AttributeError:
important_geos.append("nan")
important_geos
for i in important_geos:
location = geolocator.geocode(i+" Massachusetts", timeout=10)
try:
lat = location.latitude
lon = location.longitude
except AttributeError:
print(i)
geo_dict[i+ " Massachusetts"] = {"lat": lat, "lon": lon}
for key, item in geo_dict.items():
if "Massachusetts" not in i:
del geo_dict[key]
updated = []
for i in important_geos:
if "Massachusetts" not in i and "nan" not in i:
if "Logan" in i or "Leather" in i or "Bay" in i:
updated.append(i)
else:
updated.append(i + " Massachusetts")
else:
updated.append(i)
ma["geos"] = updated
geo_dict
geo_dict['South End Massachusetts'] = {'lat': 42.3396042, 'lon':-71.093306}
del geo_dict['Beacon Hill Massachusetts, Boston Metro Massachusetts']
del geo_dict['Boston Logan International Airport, Boston']
del geo_dict['Beacon Hill Massachusetts Massachusetts']
del geo_dict['Barnstable Massachusetts Massachusetts']
del geo_dict['Cambridge Massachusetts Massachusetts']
del geo_dict['Cambridge Massachusetts, Boston Metro Massachusetts']
del geo_dict['Dorchester Massachusetts Massachusetts']
del geo_dict['Dorchester Massachusetts, Boston Metro Massachusetts']
del geo_dict['Framingham Massachusetts Massachusetts']
del geo_dict['Framingham Massachusetts, MetroWest Massachusetts']
del geo_dict['Leather District, Boston']
del geo_dict['Lynn Massachusetts Massachusetts']
del geo_dict['Lynn Massachusetts, Boston Metro Massachusetts']
del geo_dict['Malden Massachusetts Massachusetts']
del geo_dict['Malden Massachusetts, Boston Metro Massachusetts']
geo_dict['Boston Metro Massachusetts'] = {'lat':42.3522313,'lon': -71.0688475}
prep_geos = ma["geos"][ma["hiv_status"] == "HIV Negative, on PrEP"]
und_geos = ma["geos"][ma["hiv_status"] == "HIV Undetectable"]
pos_geos = ma["geos"][ma["hiv_status"] == "HIV Positive"]
neg_geos = ma["geos"][ma["hiv_status"] == "HIV Negative"]
def print_geos(geos_list):
counter=collections.Counter(geos_list)
frequencies = (counter.most_common(300))
freq = []
for i,v in enumerate(frequencies):
try:
freq.append([geo_dict[v[0]]["lat"], geo_dict[v[0]]["lon"], v[1], frequencies[i][0]])
except KeyError:
print v
return freq
print len(print_geos(prep_geos))
print_geos(prep_geos)
freq
import plotly.tools as tls
tls.set_credentials_file(username='mrngos', api_key='r2pmvn0qhu')
import plotly.plotly as py
import pandas as pd
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_february_us_airport_traffic.csv')
df.head()
# ma['text'] = df['town'] + '' + df['city'] + ', ' + df['state'] + '' + 'Arrivals: ' + df['cnt'].astype(str)
scl = [ [0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\
[0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"] ]
data = [ dict(
type = 'scattergeo',
locationmode = 'MA-cities',
lon = ma['lon'],
lat = ma['lat'],
mode = 'markers',
marker = dict(
size = 8,
opacity = 0.8,
reversescale = True,
autocolorscale = False,
symbol = 'circle',
line = dict(
width=1,
color='rgba(102, 102, 102)'
),
colorscale = scl,
cmin = 0,
color = df['cnt'],
cmax = df['cnt'].max(),
colorbar=dict(
title="Incoming flightsFebruary 2011"
)
))]
layout = dict(
title = 'Most trafficked US airports<br>(Hover for airport names)',
colorbar = True,
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showland = True,
landcolor = "rgb(250, 250, 250)",
subunitcolor = "rgb(217, 217, 217)",
countrycolor = "rgb(217, 217, 217)",
countrywidth = 0.5,
subunitwidth = 0.5
),
)
fig = dict( data=data, layout=layout )
py.iplot( fig, validate=False, filename='d3-airports' )
| 0.095879 | 0.80242 |
```
import re, sys, math, json, os, urllib.request
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.display import display
from time import gmtime, strftime
#Load data and Glove embeddings
try:
model_data = pd.read_json('data.json',
lines=True,
orient='columns')
glove_file = open('glove.6B.100d.txt', encoding = "utf8")
print('Glove loaded.')
print('Success: Data loaded into dataframe.')
except Exception as e:
print('Data load error: ',e)
raw_data = model_data
raw_data
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
import keras
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import seaborn as sns
import re
from sklearn.model_selection import train_test_split
from string import punctuation
from collections import Counter
import numpy as np
def pad_dict_list(dict_list, padel):
#padding words in the list
lmax = 0
length = len(dict_list)
for lname in dict_list:
lmax = max(lmax, len(lname))
for i in range(0, length):
ll = len(dict_list[i])
if ll < lmax:
dict_list[i].append(padel)
return dict_list
def preprocess_text(sen):
# Removing html tags
sentence = remove_tags(sen)
# Removing punctuation
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
# Removing single characters
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
TAG = re.compile(r'<[^>]+>')
def remove_tags(text):
#Removing tags - the original function
return TAG.sub('', text)
#Getting the title and removing metadata
titles = [re.split("^(.+?),", title)[-1] for title in raw_data.content]
#Preprocessing every title
X = []
for sen in titles:
X.append(preprocess_text(sen))
#Converting the letters in the titles to lower
titles = [title.lower() for title in X]
#Joining not punctuation
titles = [''.join([c for c in title if c not in punctuation]) for title in titles]
#Removing the extra titles that do not have any labels prescribed
labels = [md['labels'] for md in raw_data.annotation]
labels2 = []
for i in range(0, len(labels)):
if i:
labels2.append(labels[i])
else:
titles.pop(i)
labels = labels2
#One-hot encoding labels
distressed_labels = []
not_distressed_labels = []
na_labels = []
for label in labels:
if "Distress" in label:
distressed_labels.append(1)
else:
distressed_labels.append(0)
if "Not Distress" in label:
not_distressed_labels.append(1)
else:
not_distressed_labels.append(0)
if "N/A" in label:
na_labels.append(1)
else:
na_labels.append(0)
#Converting encoded labels to a numpy array
encoded_labels = np.array(np.column_stack((distressed_labels, not_distressed_labels, na_labels)))
#Converting labels and titles to train and test sets
X_train, X_test, y_train, y_test = train_test_split(titles, encoded_labels, test_size=0.20, random_state=42)
#Tokenizing both the training and testing sets
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
#Padding both the training and testing sets
max_len = max([len(i) for i in X_train])
vocab_size = len(tokenizer.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen = max_len)
X_test = pad_sequences(X_test, padding='post', maxlen = max_len)
from numpy import array
from numpy import asarray
from numpy import zeros
#Preparing the GloVe embeddings
embeddings_dictionary = dict()
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
#Embedding every tokenized word
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.models import Model
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Input
from keras.layers.merge import Concatenate
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
#Preparing the model
deep_inputs = Input(shape=(max_len,))
embedding_layer = Embedding(vocab_size, 100, weights = [embedding_matrix], trainable= False)(deep_inputs)
LSTM_Layer_1 = LSTM(128)(embedding_layer)
dense_layer_1 = Dense(3, activation='sigmoid')(LSTM_Layer_1)
model = Model(inputs=deep_inputs, outputs = dense_layer_1)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print(model.summary())
#Training the model
history = model.fit(X_train, y_train, batch_size=128, epochs=5, verbose=1, validation_split=0.2)
#Evaluating the model
score = model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
#Saving the model
model.save('models819.h5')
import pickle
#Pickling the model to check serialization
with open('tokenizer819.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
from numpy import loadtxt
from keras.models import load_model
#Loading model
loaded_model = load_model('models2.h5')
loaded_model.summary()
score = loaded_model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
#Checking the predictions
with open('tokenizer.pickle', 'rb') as handle:
loaded_tokenizer = pickle.load(handle)
txt="House of Representatives Investigate Arcelor Mittal Bad Labor Practices Allegation"
seq= loaded_tokenizer.texts_to_sequences([txt])
padded = pad_sequences(seq, maxlen=max_len)
pred = loaded_model.predict(padded)
pred
import matplotlib.pyplot as plt
#Analyzing model accuracy and loss
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
#A short implementation of the zero rule algorithm to use as a benchmark
import numpy as np
from sklearn.metrics import label_ranking_average_precision_score
def zero_rule_algorithm(train_df, eval_df):
outputs = [tuple(t) for t in train_df]
most_common = max(outputs, key = outputs.count)
return [most_common for i in range(len(eval_df))]
predictions = zero_rule_algorithm(y_train, y_test)
values = [tuple(i) for i in y_test]
label_ranking_average_precision_score(values, predictions)
```
|
github_jupyter
|
import re, sys, math, json, os, urllib.request
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import Image
from IPython.display import display
from time import gmtime, strftime
#Load data and Glove embeddings
try:
model_data = pd.read_json('data.json',
lines=True,
orient='columns')
glove_file = open('glove.6B.100d.txt', encoding = "utf8")
print('Glove loaded.')
print('Success: Data loaded into dataframe.')
except Exception as e:
print('Data load error: ',e)
raw_data = model_data
raw_data
import pandas as pd
import numpy as np
import re
import nltk
from nltk.corpus import stopwords
import keras
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import seaborn as sns
import re
from sklearn.model_selection import train_test_split
from string import punctuation
from collections import Counter
import numpy as np
def pad_dict_list(dict_list, padel):
#padding words in the list
lmax = 0
length = len(dict_list)
for lname in dict_list:
lmax = max(lmax, len(lname))
for i in range(0, length):
ll = len(dict_list[i])
if ll < lmax:
dict_list[i].append(padel)
return dict_list
def preprocess_text(sen):
# Removing html tags
sentence = remove_tags(sen)
# Removing punctuation
sentence = re.sub('[^a-zA-Z]', ' ', sentence)
# Removing single characters
sentence = re.sub(r"\s+[a-zA-Z]\s+", ' ', sentence)
# Removing multiple spaces
sentence = re.sub(r'\s+', ' ', sentence)
return sentence
TAG = re.compile(r'<[^>]+>')
def remove_tags(text):
#Removing tags - the original function
return TAG.sub('', text)
#Getting the title and removing metadata
titles = [re.split("^(.+?),", title)[-1] for title in raw_data.content]
#Preprocessing every title
X = []
for sen in titles:
X.append(preprocess_text(sen))
#Converting the letters in the titles to lower
titles = [title.lower() for title in X]
#Joining not punctuation
titles = [''.join([c for c in title if c not in punctuation]) for title in titles]
#Removing the extra titles that do not have any labels prescribed
labels = [md['labels'] for md in raw_data.annotation]
labels2 = []
for i in range(0, len(labels)):
if i:
labels2.append(labels[i])
else:
titles.pop(i)
labels = labels2
#One-hot encoding labels
distressed_labels = []
not_distressed_labels = []
na_labels = []
for label in labels:
if "Distress" in label:
distressed_labels.append(1)
else:
distressed_labels.append(0)
if "Not Distress" in label:
not_distressed_labels.append(1)
else:
not_distressed_labels.append(0)
if "N/A" in label:
na_labels.append(1)
else:
na_labels.append(0)
#Converting encoded labels to a numpy array
encoded_labels = np.array(np.column_stack((distressed_labels, not_distressed_labels, na_labels)))
#Converting labels and titles to train and test sets
X_train, X_test, y_train, y_test = train_test_split(titles, encoded_labels, test_size=0.20, random_state=42)
#Tokenizing both the training and testing sets
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
#Padding both the training and testing sets
max_len = max([len(i) for i in X_train])
vocab_size = len(tokenizer.word_index) + 1
X_train = pad_sequences(X_train, padding='post', maxlen = max_len)
X_test = pad_sequences(X_test, padding='post', maxlen = max_len)
from numpy import array
from numpy import asarray
from numpy import zeros
#Preparing the GloVe embeddings
embeddings_dictionary = dict()
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype='float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
#Embedding every tokenized word
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.models import Model
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Input
from keras.layers.merge import Concatenate
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
#Preparing the model
deep_inputs = Input(shape=(max_len,))
embedding_layer = Embedding(vocab_size, 100, weights = [embedding_matrix], trainable= False)(deep_inputs)
LSTM_Layer_1 = LSTM(128)(embedding_layer)
dense_layer_1 = Dense(3, activation='sigmoid')(LSTM_Layer_1)
model = Model(inputs=deep_inputs, outputs = dense_layer_1)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
print(model.summary())
#Training the model
history = model.fit(X_train, y_train, batch_size=128, epochs=5, verbose=1, validation_split=0.2)
#Evaluating the model
score = model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
#Saving the model
model.save('models819.h5')
import pickle
#Pickling the model to check serialization
with open('tokenizer819.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
from numpy import loadtxt
from keras.models import load_model
#Loading model
loaded_model = load_model('models2.h5')
loaded_model.summary()
score = loaded_model.evaluate(X_test, y_test, verbose=1)
print("Test Score:", score[0])
print("Test Accuracy:", score[1])
#Checking the predictions
with open('tokenizer.pickle', 'rb') as handle:
loaded_tokenizer = pickle.load(handle)
txt="House of Representatives Investigate Arcelor Mittal Bad Labor Practices Allegation"
seq= loaded_tokenizer.texts_to_sequences([txt])
padded = pad_sequences(seq, maxlen=max_len)
pred = loaded_model.predict(padded)
pred
import matplotlib.pyplot as plt
#Analyzing model accuracy and loss
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc='upper left')
plt.show()
#A short implementation of the zero rule algorithm to use as a benchmark
import numpy as np
from sklearn.metrics import label_ranking_average_precision_score
def zero_rule_algorithm(train_df, eval_df):
outputs = [tuple(t) for t in train_df]
most_common = max(outputs, key = outputs.count)
return [most_common for i in range(len(eval_df))]
predictions = zero_rule_algorithm(y_train, y_test)
values = [tuple(i) for i in y_test]
label_ranking_average_precision_score(values, predictions)
| 0.453262 | 0.261553 |
# 机器学习纳米学位
## 非监督学习
## 项目 3: 创建用户分类
欢迎来到机器学习工程师纳米学位的第三个项目!在这个 notebook 文件中,有些模板代码已经提供给你,但你还需要实现更多的功能来完成这个项目。除非有明确要求,你无须修改任何已给出的代码。以**'练习'**开始的标题表示接下来的代码部分中有你必须要实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以 **'TODO'** 标出。请仔细阅读所有的提示!
除了实现代码外,你还**必须**回答一些与项目和你的实现有关的问题。每一个需要你回答的问题都会以**'问题 X'**为标题。请仔细阅读每个问题,并且在问题后的**'回答'**文字框中写出完整的答案。我们将根据你对问题的回答和撰写代码所实现的功能来对你提交的项目进行评分。
>**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown 可以通过双击进入编辑模式。
## 开始
在这个项目中,你将分析一个数据集的内在结构,这个数据集包含很多客户真对不同类型产品的年度采购额(用**金额**表示)。这个项目的任务之一是如何最好地描述一个批发商不同种类顾客之间的差异。这样做将能够使得批发商能够更好的组织他们的物流服务以满足每个客户的需求。
这个项目的数据集能够在[UCI机器学习信息库](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers)中找到.因为这个项目的目的,分析将不会包括 'Channel' 和 'Region' 这两个特征——重点集中在6个记录的客户购买的产品类别上。
运行下面的的代码单元以载入整个客户数据集和一些这个项目需要的 Python 库。如果你的数据集载入成功,你将看到后面输出数据集的大小。
```
# 检查你的Python版本
from sys import version_info
if version_info.major != 3:
raise Exception('请使用Python 3.x 来完成此项目')
# 引入这个项目需要的库
import numpy as np
import pandas as pd
import visuals as vs
from IPython.display import display # 使得我们可以对DataFrame使用display()函数
# 设置以内联的形式显示matplotlib绘制的图片(在notebook中显示更美观)
%matplotlib inline
# 高分辨率显示
# %config InlineBackend.figure_format='retina'
# 载入整个客户数据集
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print("Wholesale customers dataset has {} samples with {} features each.".format(*data.shape))
except:
print("Dataset could not be loaded. Is the dataset missing?")
```
## 分析数据
在这部分,你将开始分析数据,通过可视化和代码来理解每一个特征和其他特征的联系。你会看到关于数据集的统计描述,考虑每一个属性的相关性,然后从数据集中选择若干个样本数据点,你将在整个项目中一直跟踪研究这几个数据点。
运行下面的代码单元给出数据集的一个统计描述。注意这个数据集包含了6个重要的产品类型:**'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**和 **'Delicatessen'**。想一下这里每一个类型代表你会购买什么样的产品。
```
# 显示数据集的一个描述
display(data.describe())
data.min()
data[data.Milk==data.Milk.max()]
```
### 练习: 选择样本
为了对客户有一个更好的了解,并且了解代表他们的数据将会在这个分析过程中如何变换。最好是选择几个样本数据点,并且更为详细地分析它们。在下面的代码单元中,选择**三个**索引加入到索引列表`indices`中,这三个索引代表你要追踪的客户。我们建议你不断尝试,直到找到三个明显不同的客户。
```
# TODO:从数据集中选择三个你希望抽样的数据点的索引
indices = [86,338,154]
# 为选择的样本建立一个DataFrame
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print("Chosen samples of wholesale customers dataset:")
display(samples)
```
### 问题 1
在你看来你选择的这三个样本点分别代表什么类型的企业(客户)?对每一个你选择的样本客户,通过它在每一种产品类型上的花费与数据集的统计描述进行比较,给出你做上述判断的理由。
**提示:** 企业的类型包括超市、咖啡馆、零售商以及其他。注意不要使用具体企业的名字,比如说在描述一个餐饮业客户时,你不能使用麦当劳。
**回答:**
第一个样本点可能代表连锁咖啡馆。因为乳制品和餐巾纸进货量极大,远高于平均,接近最大值,与咖啡馆的消耗符合。
第二个样本点可能代表大型超市,因为冷冻制品/杂货进货量大,远高于平均,新鲜食材少,接近最小值,这类产品分布适合超市存储。
第三个样本点可能代表菜场零售商,因为所有商品进货量小于平均,而其中新鲜食材相对较多。
### 练习: 特征相关性
一个有趣的想法是,考虑这六个类别中的一个(或者多个)产品类别,是否对于理解客户的购买行为具有实际的相关性。也就是说,当用户购买了一定数量的某一类产品,我们是否能够确定他们必然会成比例地购买另一种类的产品。有一个简单的方法可以检测相关性:我们用移除了某一个特征之后的数据集来构建一个监督学习(回归)模型,然后用这个模型去预测那个被移除的特征,再对这个预测结果进行评分,看看预测结果如何。
在下面的代码单元中,你需要实现以下的功能:
- 使用 `DataFrame.drop` 函数移除数据集中你选择的不需要的特征,并将移除后的结果赋值给 `new_data` 。
- 使用 `sklearn.model_selection.train_test_split` 将数据集分割成训练集和测试集。
- 使用移除的特征作为你的目标标签。设置 `test_size` 为 `0.25` 并设置一个 `random_state` 。
- 导入一个 DecisionTreeRegressor (决策树回归器),设置一个 `random_state`,然后用训练集训练它。
- 使用回归器的 `score` 函数输出模型在测试集上的预测得分。
```
# TODO:为DataFrame创建一个副本,用'drop'函数丢弃一个特征# TODO:
new_data = data.drop(columns='Grocery')
# TODO:使用给定的特征作为目标,将数据分割成训练集和测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(new_data, data.Grocery, test_size=0.25, random_state=42)
# TODO:创建一个DecisionTreeRegressor(决策树回归器)并在训练集上训练它
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
# TODO:输出在测试集上的预测得分
regressor.fit(X_train, y_train)
score = regressor.score(X_test, y_test)
score
```
### 问题 2
你尝试预测哪一个特征?预测的得分是多少?这个特征对于区分用户的消费习惯来说必要吗?为什么?
**提示:** 决定系数(coefficient of determination),$R^2$ 结果在0到1之间,1表示完美拟合,一个负的 $R^2$ 表示模型不能够拟合数据。
**回答:** 尝试预测Grocery,预测得分0.7左右,较好拟合。这个特征对于区分用户消费习惯不太必要,因为它的大部分变化可以由其他特征拟合得到。
### 可视化特征分布
为了能够对这个数据集有一个更好的理解,我们可以对数据集中的每一个产品特征构建一个散布矩阵(scatter matrix)。如果你发现你在上面尝试预测的特征对于区分一个特定的用户来说是必须的,那么这个特征和其它的特征可能不会在下面的散射矩阵中显示任何关系。相反的,如果你认为这个特征对于识别一个特定的客户是没有作用的,那么通过散布矩阵可以看出在这个数据特征和其它特征中有关联性。运行下面的代码以创建一个散布矩阵。
```
# 对于数据中的每一对特征构造一个散布矩阵
pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### 问题 3
这里是否存在一些特征他们彼此之间存在一定程度相关性?如果有请列出。这个结果是验证了还是否认了你尝试预测的那个特征的相关性?这些特征的数据是怎么分布的?
**提示:** 这些数据是正态分布(normally distributed)的吗?大多数的数据点分布在哪?
**回答:** 乳制品,杂货与餐巾纸的消耗量存在明显正相关。
这个结果部分验证了之前的尝试,因为可以根据正相关性从乳制品和餐巾纸推测杂货出货量。
大多数数据点在靠近0的峰值附近,整体分布看上去右倾。
## 数据预处理
在这个部分,你将通过在数据上做一个合适的缩放,并检测异常点(你可以选择性移除)将数据预处理成一个更好的代表客户的形式。预处理数据是保证你在分析中能够得到显著且有意义的结果的重要环节。
### 练习: 特征缩放
如果数据不是正态分布的,尤其是数据的平均数和中位数相差很大的时候(表示数据非常歪斜)。这时候通常用一个[非线性的缩放](https://github.com/czcbangkai/translations/blob/master/use_of_logarithms_in_economics/use_of_logarithms_in_economics.pdf)是很合适的,[(英文原文)](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) — 尤其是对于金融数据。一种实现这个缩放的方法是使用 [Box-Cox 变换](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html),这个方法能够计算出能够最佳减小数据倾斜的指数变换方法。一个比较简单的并且在大多数情况下都适用的方法是使用自然对数。
在下面的代码单元中,你将需要实现以下功能:
- 使用 `np.log` 函数在数据 `data` 上做一个对数缩放,然后将它的副本(不改变原始data的值)赋值给 `log_data`。
- 使用 `np.log` 函数在样本数据 `samples` 上做一个对数缩放,然后将它的副本赋值给 `log_samples`。
```
# TODO:使用自然对数缩放数据
log_data = np.log(data)
# TODO:使用自然对数缩放样本数据
log_samples = np.log(samples)
# 为每一对新产生的特征制作一个散射矩阵
pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### 观察
在使用了一个自然对数的缩放之后,数据的各个特征会显得更加的正态分布。对于任意的你以前发现有相关关系的特征对,观察他们的相关关系是否还是存在的(并且尝试观察,他们的相关关系相比原来是变强了还是变弱了)。
运行下面的代码以观察样本数据在进行了自然对数转换之后如何改变了。
```
# 展示经过对数变换后的样本数据
log_samples
```
### 练习: 异常值检测
对于任何的分析,在数据预处理的过程中检测数据中的异常值都是非常重要的一步。异常值的出现会使得把这些值考虑进去后结果出现倾斜。这里有很多关于怎样定义什么是数据集中的异常值的经验法则。这里我们将使用[ Tukey 的定义异常值的方法](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/):一个异常阶(outlier step)被定义成1.5倍的四分位距(interquartile range,IQR)。一个数据点如果某个特征包含在该特征的 IQR 之外的特征,那么该数据点被认定为异常点。
在下面的代码单元中,你需要完成下面的功能:
- 将指定特征的 25th 分位点的值分配给 `Q1` 。使用 `np.percentile` 来完成这个功能。
- 将指定特征的 75th 分位点的值分配给 `Q3` 。同样的,使用 `np.percentile` 来完成这个功能。
- 将指定特征的异常阶的计算结果赋值给 `step`。
- 选择性地通过将索引添加到 `outliers` 列表中,以移除异常值。
**注意:** 如果你选择移除异常值,请保证你选择的样本点不在这些移除的点当中!
一旦你完成了这些功能,数据集将存储在 `good_data` 中。
```
# 对于每一个特征,找到值异常高或者是异常低的数据点
outliers = {'Fresh': -1, 'Milk': -1, 'Grocery':-1, 'Frozen':-1, 'Detergents_Paper':-1, 'Delicatessen':-1}
for feature in log_data.keys():
# TODO: 计算给定特征的Q1(数据的25th分位点)
Q1 = np.percentile(log_data[feature],25)
# TODO: 计算给定特征的Q3(数据的75th分位点)
Q3 = np.percentile(log_data[feature],75)
# TODO: 使用四分位范围计算异常阶(1.5倍的四分位距)
step = 1.5*(Q3-Q1)
# 显示异常点
print("Data points considered outliers for the feature '{}':".format(feature))
outliers_feature = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
outliers[feature] = outliers_feature.index
display(outliers_feature)
# TODO(可选): 选择你希望移除的数据点的索引
outliers = []
# 以下代码会移除outliers中索引的数据点, 并储存在good_data中
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
```
### 问题 4
请列出所有在多于一个特征下被看作是异常的数据点。这些点应该被从数据集中移除吗?为什么?把你认为需要移除的数据点全部加入到到 `outliers` 变量中。
```
def find_outliers(dataframe, feature):
import numpy as np
Q1 = np.percentile(dataframe[feature],25)
Q3 = np.percentile(dataframe[feature],75)
step = 1.5*(Q3-Q1)
outliers = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
#display(outliers)
return(outliers.index)
outliers = find_outliers(log_data, 'Fresh').join(find_outliers(log_data, 'Grocery'), how='inner')
for feature1 in log_data.keys():
for feature2 in log_data.keys().drop(feature1):
outliers_temp = find_outliers(log_data, feature1).join(find_outliers(log_data, feature2), how='inner')
outliers = outliers.join(outliers_temp, how='outer')
#display(outliers)
log_data.iloc[outliers]
```
**回答:** 65, 66, 75, 128, 154
不应该,某两个特征的‘异常’可能正反映了某一类批发商的特征。
我认为不应该移除点。
```
outliers=[]
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
```
## 特征转换
在这个部分中你将使用主成分分析(PCA)来分析批发商客户数据的内在结构。由于使用PCA在一个数据集上会计算出最大化方差的维度,我们将找出哪一个特征组合能够最好的描绘客户。
### 练习: 主成分分析(PCA)
既然数据被缩放到一个更加正态分布的范围中并且我们也移除了需要移除的异常点,我们现在就能够在 `good_data` 上使用PCA算法以发现数据的哪一个维度能够最大化特征的方差。除了找到这些维度,PCA 也将报告每一个维度的解释方差比(explained variance ratio)--这个数据有多少方差能够用这个单独的维度来解释。注意 PCA 的一个组成部分(维度)能够被看做这个空间中的一个新的“特征”,但是它是原来数据中的特征构成的。
在下面的代码单元中,你将要实现下面的功能:
- 导入 `sklearn.decomposition.PCA` 并且将 `good_data` 用 PCA 并且使用6个维度进行拟合后的结果保存到 `pca` 中。
- 使用 `pca.transform` 将 `log_samples` 进行转换,并将结果存储到 `pca_samples` 中。
```
from sklearn.decomposition import PCA
# TODO:通过在good data上进行PCA,将其转换成6个维度
pca = PCA(n_components=6).fit(good_data)
# TODO:使用上面的PCA拟合将变换施加在log_samples上
pca_samples = pca.transform(log_samples)
# 生成PCA的结果图
pca_results = vs.pca_results(good_data, pca)
```
### 问题 5
数据的第一个和第二个主成分**总共**表示了多少的方差? 前四个主成分呢?使用上面提供的可视化图像,从用户花费的角度来讨论前四个主要成分中每个主成分代表的消费行为并给出你做出判断的理由。
**提示:**
* 对每个主成分中的特征分析权重的正负和大小。
* 结合每个主成分权重的正负讨论消费行为。
* 某一特定维度上的正向增长对应正权特征的增长和负权特征的减少。增长和减少的速率和每个特征的权重相关。[参考资料:Interpretation of the Principal Components](https://onlinecourses.science.psu.edu/stat505/node/54)
**回答:** 数据的第一和第二个主成分总共表示了0.719的方差,前四个主成分表示了0.9314的方差。
第一主成分中生鲜和冷冻食品权重为正,餐巾纸负权重最大,该类用户消费餐巾纸,牛奶和杂货少,消耗生鲜和冷冻食品多。
第二主成分中权重都为负,说明对所有货物消耗量都较小,其中纸巾和杂货的负权重较小,消耗相对接近平均,可能代表一般的非连锁餐厅。
第三主成分熟食和冷冻食品正权重大,生鲜的负权重大,这类用户强烈倾向消耗熟食,强烈不倾向消耗生鲜。
第四主成分冷冻食品正权重大,熟食负权重大,这类用户倾向消耗冷冻食平,不倾向消耗熟食。。
### 观察
运行下面的代码,查看经过对数转换的样本数据在进行一个6个维度的主成分分析(PCA)之后会如何改变。观察样本数据的前四个维度的数值。考虑这和你初始对样本点的解释是否一致。
```
# 展示经过PCA转换的sample log-data
display(pd.DataFrame(np.round(pca_samples,6), columns = pca_results.index.values))
```
### 练习:降维
当使用主成分分析的时候,一个主要的目的是减少数据的维度,这实际上降低了问题的复杂度。当然降维也是需要一定代价的:更少的维度能够表示的数据中的总方差更少。因为这个,**累计解释方差比(cumulative explained variance ratio)**对于我们确定这个问题需要多少维度非常重要。另外,如果大部分的方差都能够通过两个或者是三个维度进行表示的话,降维之后的数据能够被可视化。
在下面的代码单元中,你将实现下面的功能:
- 将 `good_data` 用两个维度的PCA进行拟合,并将结果存储到 `pca` 中去。
- 使用 `pca.transform` 将 `good_data` 进行转换,并将结果存储在 `reduced_data` 中。
- 使用 `pca.transform` 将 `log_samples` 进行转换,并将结果存储在 `pca_samples` 中。
```
# TODO:通过在good data上进行PCA,将其转换成两个维度
pca = PCA(n_components=2).fit(good_data)
# TODO:使用上面训练的PCA将good data进行转换
reduced_data = pca.transform(good_data)
# TODO:使用上面训练的PCA将log_samples进行转换
pca_samples = pca.transform(log_samples)
# 为降维后的数据创建一个DataFrame
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
```
### 观察
运行以下代码观察当仅仅使用两个维度进行 PCA 转换后,这个对数样本数据将怎样变化。观察这里的结果与一个使用六个维度的 PCA 转换相比较时,前两维的数值是保持不变的。
```
# 展示经过两个维度的PCA转换之后的样本log-data
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
```
## 可视化一个双标图(Biplot)
双标图是一个散点图,每个数据点的位置由它所在主成分的分数确定。坐标系是主成分(这里是 `Dimension 1` 和 `Dimension 2`)。此外,双标图还展示出初始特征在主成分上的投影。一个双标图可以帮助我们理解降维后的数据,发现主成分和初始特征之间的关系。
运行下面的代码来创建一个降维后数据的双标图。
```
# 可视化双标图
vs.biplot(good_data, reduced_data, pca)
```
### 观察
一旦我们有了原始特征的投影(红色箭头),就能更加容易的理解散点图每个数据点的相对位置。
在这个双标图中,哪些初始特征与第一个主成分有强关联?哪些初始特征与第二个主成分相关联?你观察到的是否与之前得到的 pca_results 图相符?
## 聚类
在这个部分,你讲选择使用 K-Means 聚类算法或者是高斯混合模型聚类算法以发现数据中隐藏的客户分类。然后,你将从簇中恢复一些特定的关键数据点,通过将它们转换回原始的维度和规模,从而理解他们的含义。
### 问题 6
使用 K-Means 聚类算法的优点是什么?使用高斯混合模型聚类算法的优点是什么?基于你现在对客户数据的观察结果,你选用了这两个算法中的哪一个,为什么?
**回答:** K-Means聚类算法计算量小,在图上易于解读。
高斯混合模型聚类(如果初始化得当)受外离点影响小,中心点一般具有实际意义;且高斯聚类是一种软聚类,同一数据点可以按比例分别属于所有类别。
我选择高斯混合聚类算法,因为图片看上去有两个高斯分布中心,可能代表两种典型采购商;对这两种典型的判断并不清晰,更适合软聚类。
### 练习: 创建聚类
针对不同情况,有些问题你需要的聚类数目可能是已知的。但是在聚类数目不作为一个**先验**知道的情况下,我们并不能够保证某个聚类的数目对这个数据是最优的,因为我们对于数据的结构(如果存在的话)是不清楚的。但是,我们可以通过计算每一个簇中点的**轮廓系数**来衡量聚类的质量。数据点的[轮廓系数](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html)衡量了它与分配给他的簇的相似度,这个值范围在-1(不相似)到1(相似)。**平均**轮廓系数为我们提供了一种简单地度量聚类质量的方法。
在接下来的代码单元中,你将实现下列功能:
- 在 `reduced_data` 上使用一个聚类算法,并将结果赋值到 `clusterer`,需要设置 `random_state` 使得结果可以复现。
- 使用 `clusterer.predict` 预测 `reduced_data` 中的每一个点的簇,并将结果赋值到 `preds`。
- 使用算法的某个属性值找到聚类中心,并将它们赋值到 `centers`。
- 预测 `pca_samples` 中的每一个样本点的类别并将结果赋值到 `sample_preds`。
- 导入 `sklearn.metrics.silhouette_score` 包并计算 `reduced_data` 相对于 `preds` 的轮廓系数。
- 将轮廓系数赋值给 `score` 并输出结果。
```
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
# TODO:在降维后的数据上使用你选择的聚类算法
clusterer = GaussianMixture(n_components=2, random_state=0)
clusterer.fit(reduced_data)
# TODO:预测每一个点的簇
preds = clusterer.predict(reduced_data)
# TODO:找到聚类中心
centers = clusterer.means_
# TODO:预测在每一个转换后的样本点的类
sample_preds = clusterer.predict(pca_samples)
# TODO:计算选择的类别的平均轮廓系数(mean silhouette coefficient)
score = silhouette_score(reduced_data,preds)
```
### 问题 7
汇报你尝试的不同的聚类数对应的轮廓系数。在这些当中哪一个聚类的数目能够得到最佳的轮廓系数?
```
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
for i in [2,3,4,5,6]:
clusterer = GaussianMixture(n_components=i, random_state=0)
clusterer.fit(reduced_data)
preds = clusterer.predict(reduced_data)
score = silhouette_score(reduced_data,preds)
print(i, score)
```
**回答:**
在这些聚类中,聚类数目为2能够得到最佳的轮廓系数0.41.
### 聚类可视化
一旦你选好了通过上面的评价函数得到的算法的最佳聚类数目,你就能够通过使用下面的代码块可视化来得到的结果。作为实验,你可以试着调整你的聚类算法的聚类的数量来看一下不同的可视化结果。但是你提供的最终的可视化图像必须和你选择的最优聚类数目一致。
```
# 从已有的实现中展示聚类的结果
vs.cluster_results(reduced_data, preds, centers, pca_samples)
```
### 练习: 数据恢复
上面的可视化图像中提供的每一个聚类都有一个中心点。这些中心(或者叫平均点)并不是数据中真实存在的点,但是是所有预测在这个簇中的数据点的平均。对于创建客户分类的问题,一个簇的中心对应于那个分类的平均用户。因为这个数据现在进行了降维并缩放到一定的范围,我们可以通过施加一个反向的转换恢复这个点所代表的用户的花费。
在下面的代码单元中,你将实现下列的功能:
- 使用 `pca.inverse_transform` 将 `centers` 反向转换,并将结果存储在 `log_centers` 中。
- 使用 `np.log` 的反函数 `np.exp` 反向转换 `log_centers` 并将结果存储到 `true_centers` 中。
```
# TODO:反向转换中心点
log_centers = pca.inverse_transform(centers)
# TODO:对中心点做指数转换
true_centers = np.exp(log_centers)
# 显示真实的中心点
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
log_centers
```
### 问题 8
考虑上面的代表性数据点在每一个产品类型的花费总数,你认为这些客户分类代表了哪类客户?为什么?需要参考在项目最开始得到的统计值来给出理由。
**提示:** 一个被分到`'Cluster X'`的客户最好被用 `'Segment X'`中的特征集来标识的企业类型表示。
```
display(data.describe())
```
**回答:** Segment 0 可能代表餐厅,因为杂货和餐巾纸消耗高于平均。
Segment 1 可能代表菜场零售商,因为熟食和生鲜消耗高于平均。
### 问题 9
对于每一个样本点**问题 8 **中的哪一个分类能够最好的表示它?你之前对样本的预测和现在的结果相符吗?
运行下面的代码单元以找到每一个样本点被预测到哪一个簇中去。
```
# 显示预测结果
for i, pred in enumerate(sample_preds):
print("Sample point", i, "predicted to be in Cluster", pred)
```
**回答:** 样本1归类为餐厅,初始预测为咖啡厅。
样本2归类为零售商,初始预测为超市。
样本3归类为零售商,初始预测为零售商。
预测与现在的结果基本相符,虽然由于只分成两类,每一类更宽泛。
## 结论
在最后一部分中,你要学习如何使用已经被分类的数据。首先,你要考虑不同组的客户**客户分类**,针对不同的派送策略受到的影响会有什么不同。其次,你要考虑到,每一个客户都被打上了标签(客户属于哪一个分类)可以给客户数据提供一个多一个特征。最后,你会把客户分类与一个数据中的隐藏变量做比较,看一下这个分类是否辨识了特定的关系。
### 问题 10
在对他们的服务或者是产品做细微的改变的时候,公司经常会使用 [A/B tests ](https://en.wikipedia.org/wiki/A/B_testing)以确定这些改变会对客户产生积极作用还是消极作用。这个批发商希望考虑将他的派送服务从每周5天变为每周3天,但是他只会对他客户当中对此有积极反馈的客户采用。这个批发商应该如何利用客户分类来知道哪些客户对它的这个派送策略的改变有积极的反馈,如果有的话?你需要给出在这个情形下A/B 测试具体的实现方法,以及最终得出结论的依据是什么?
**提示:** 我们能假设这个改变对所有的客户影响都一致吗?我们怎样才能够确定它对于哪个类型的客户影响最大?
**回答:**首先将用户分成n类,然后分别类别内随机选取A/B组,对每个类别内的A/B组分别采取每周五天/每周三天的派送服务,收取反馈。如果B组的反馈显著好于A组,那么对于那一类用户,改变派送策略有积极的反馈。
### 问题 11
通过聚类技术,我们能够将原有的没有标记的数据集中的附加结构分析出来。因为每一个客户都有一个最佳的划分(取决于你选择使用的聚类算法),我们可以把用户分类作为数据的一个[工程特征](https://en.wikipedia.org/wiki/Feature_learning#Unsupervised_feature_learning)。假设批发商最近迎来十位新顾客,并且他已经为每位顾客每个产品类别年度采购额进行了预估。进行了这些估算之后,批发商该如何运用它的预估和非监督学习的结果来对这十个新的客户进行更好的预测?
**提示**:在下面的代码单元中,我们提供了一个已经做好聚类的数据(聚类结果为数据中的cluster属性),我们将在这个数据集上做一个小实验。尝试运行下面的代码看看我们尝试预测‘Region’的时候,如果存在聚类特征'cluster'与不存在相比对最终的得分会有什么影响?这对你有什么启发?
```
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 读取包含聚类结果的数据
cluster_data = pd.read_csv("cluster.csv")
y = cluster_data['Region']
X = cluster_data.drop(['Region'], axis = 1)
# 划分训练集测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=24)
clf = RandomForestClassifier(random_state=24)
clf.fit(X_train, y_train)
score_with_cluster = clf.score(X_test, y_test)
# 移除cluster特征
X_train = X_train.copy()
X_train.drop(['cluster'], axis=1, inplace=True)
X_test = X_test.copy()
X_test.drop(['cluster'], axis=1, inplace=True)
clf.fit(X_train, y_train)
score_no_cluster = clf.score(X_test, y_test)
print("不使用cluster特征的得分: %.4f"%score_no_cluster)
print("使用cluster特征的得分: %.4f"%score_with_cluster)
```
**回答:** 使用聚类特征后,分类得分提高了!可以尝试将预测结果作为特征进行聚类,再将聚类结果作为特征进行预测,如此迭代,直至表现不再提高。
### 可视化内在的分布
在这个项目的开始,我们讨论了从数据集中移除 `'Channel'` 和 `'Region'` 特征,这样在分析过程中我们就会着重分析用户产品类别。通过重新引入 `Channel` 这个特征到数据集中,并施加和原来数据集同样的 PCA 变换的时候我们将能够发现数据集产生一个有趣的结构。
运行下面的代码单元以查看哪一个数据点在降维的空间中被标记为 `'HoReCa'` (旅馆/餐馆/咖啡厅)或者 `'Retail'`。另外,你将发现样本点在图中被圈了出来,用以显示他们的标签。
```
# 根据‘Channel‘数据显示聚类的结果
vs.channel_results(reduced_data, outliers, pca_samples)
```
### 问题 12
你选择的聚类算法和聚类点的数目,与内在的旅馆/餐馆/咖啡店和零售商的分布相比,有足够好吗?根据这个分布有没有哪个簇能够刚好划分成'零售商'或者是'旅馆/饭店/咖啡馆'?你觉得这个分类和前面你对于用户分类的定义是一致的吗?
**回答:**分类数目只有两个(但是取不同随机状态有可能分成三类),与内在的旅馆/餐馆/咖啡店和零售商相对应。绿色簇对应旅馆/饭店/咖啡馆,红色簇对应零售商。这个分类与我的分类大体一致,但我将这两大类有细分出了咖啡馆,连锁快餐,大型超市等小类,然而这些小类似乎难以用聚类分出。
> **注意**: 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出**File -> Download as -> HTML (.html)**把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。
```
% matplotlib notebook
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# use a 3-component PCA
reduced_data3 = PCA(n_components=3).fit_transform(good_data)
reduced_data3 = pd.DataFrame(reduced_data3,
columns = ['Dimension 1', 'Dimension 2', 'Dimension 3'])
# get the labels
channel = pd.read_csv("customers.csv")['Channel']
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data3, channel], axis = 1)
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
# group by labels
grouped = labeled.groupby('Channel')
# color stuff
cmap = cm.get_cmap('gist_rainbow')
# let's plot!
fig = plt.figure()
axe3d = fig.gca(projection = '3d')
for i, data3 in grouped:
axe3d.scatter(data3.iloc[:,0],data3.iloc[:,1],data3.iloc[:,2],
c = cmap((i-1)*1.0/2),label = labels[int(i)-1], linewidth = 0)
# ajust for sanity
axe3d.set_xlabel('Dimension 1')
axe3d.set_ylabel('Dimension 2')
axe3d.set_zlabel('Dimension 3')
axe3d.set_xlim(labeled.iloc[:,0].min(),labeled.iloc[:,0].max())
axe3d.set_ylim(labeled.iloc[:,1].min(),labeled.iloc[:,1].max())
axe3d.set_zlim(labeled.iloc[:,2].min(),labeled.iloc[:,2].max())
axe3d.legend()
plt.show()
```
|
github_jupyter
|
# 检查你的Python版本
from sys import version_info
if version_info.major != 3:
raise Exception('请使用Python 3.x 来完成此项目')
# 引入这个项目需要的库
import numpy as np
import pandas as pd
import visuals as vs
from IPython.display import display # 使得我们可以对DataFrame使用display()函数
# 设置以内联的形式显示matplotlib绘制的图片(在notebook中显示更美观)
%matplotlib inline
# 高分辨率显示
# %config InlineBackend.figure_format='retina'
# 载入整个客户数据集
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print("Wholesale customers dataset has {} samples with {} features each.".format(*data.shape))
except:
print("Dataset could not be loaded. Is the dataset missing?")
# 显示数据集的一个描述
display(data.describe())
data.min()
data[data.Milk==data.Milk.max()]
# TODO:从数据集中选择三个你希望抽样的数据点的索引
indices = [86,338,154]
# 为选择的样本建立一个DataFrame
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print("Chosen samples of wholesale customers dataset:")
display(samples)
# TODO:为DataFrame创建一个副本,用'drop'函数丢弃一个特征# TODO:
new_data = data.drop(columns='Grocery')
# TODO:使用给定的特征作为目标,将数据分割成训练集和测试集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(new_data, data.Grocery, test_size=0.25, random_state=42)
# TODO:创建一个DecisionTreeRegressor(决策树回归器)并在训练集上训练它
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
# TODO:输出在测试集上的预测得分
regressor.fit(X_train, y_train)
score = regressor.score(X_test, y_test)
score
# 对于数据中的每一对特征构造一个散布矩阵
pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# TODO:使用自然对数缩放数据
log_data = np.log(data)
# TODO:使用自然对数缩放样本数据
log_samples = np.log(samples)
# 为每一对新产生的特征制作一个散射矩阵
pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# 展示经过对数变换后的样本数据
log_samples
# 对于每一个特征,找到值异常高或者是异常低的数据点
outliers = {'Fresh': -1, 'Milk': -1, 'Grocery':-1, 'Frozen':-1, 'Detergents_Paper':-1, 'Delicatessen':-1}
for feature in log_data.keys():
# TODO: 计算给定特征的Q1(数据的25th分位点)
Q1 = np.percentile(log_data[feature],25)
# TODO: 计算给定特征的Q3(数据的75th分位点)
Q3 = np.percentile(log_data[feature],75)
# TODO: 使用四分位范围计算异常阶(1.5倍的四分位距)
step = 1.5*(Q3-Q1)
# 显示异常点
print("Data points considered outliers for the feature '{}':".format(feature))
outliers_feature = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
outliers[feature] = outliers_feature.index
display(outliers_feature)
# TODO(可选): 选择你希望移除的数据点的索引
outliers = []
# 以下代码会移除outliers中索引的数据点, 并储存在good_data中
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
def find_outliers(dataframe, feature):
import numpy as np
Q1 = np.percentile(dataframe[feature],25)
Q3 = np.percentile(dataframe[feature],75)
step = 1.5*(Q3-Q1)
outliers = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
#display(outliers)
return(outliers.index)
outliers = find_outliers(log_data, 'Fresh').join(find_outliers(log_data, 'Grocery'), how='inner')
for feature1 in log_data.keys():
for feature2 in log_data.keys().drop(feature1):
outliers_temp = find_outliers(log_data, feature1).join(find_outliers(log_data, feature2), how='inner')
outliers = outliers.join(outliers_temp, how='outer')
#display(outliers)
log_data.iloc[outliers]
outliers=[]
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
from sklearn.decomposition import PCA
# TODO:通过在good data上进行PCA,将其转换成6个维度
pca = PCA(n_components=6).fit(good_data)
# TODO:使用上面的PCA拟合将变换施加在log_samples上
pca_samples = pca.transform(log_samples)
# 生成PCA的结果图
pca_results = vs.pca_results(good_data, pca)
# 展示经过PCA转换的sample log-data
display(pd.DataFrame(np.round(pca_samples,6), columns = pca_results.index.values))
# TODO:通过在good data上进行PCA,将其转换成两个维度
pca = PCA(n_components=2).fit(good_data)
# TODO:使用上面训练的PCA将good data进行转换
reduced_data = pca.transform(good_data)
# TODO:使用上面训练的PCA将log_samples进行转换
pca_samples = pca.transform(log_samples)
# 为降维后的数据创建一个DataFrame
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# 展示经过两个维度的PCA转换之后的样本log-data
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# 可视化双标图
vs.biplot(good_data, reduced_data, pca)
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
# TODO:在降维后的数据上使用你选择的聚类算法
clusterer = GaussianMixture(n_components=2, random_state=0)
clusterer.fit(reduced_data)
# TODO:预测每一个点的簇
preds = clusterer.predict(reduced_data)
# TODO:找到聚类中心
centers = clusterer.means_
# TODO:预测在每一个转换后的样本点的类
sample_preds = clusterer.predict(pca_samples)
# TODO:计算选择的类别的平均轮廓系数(mean silhouette coefficient)
score = silhouette_score(reduced_data,preds)
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
for i in [2,3,4,5,6]:
clusterer = GaussianMixture(n_components=i, random_state=0)
clusterer.fit(reduced_data)
preds = clusterer.predict(reduced_data)
score = silhouette_score(reduced_data,preds)
print(i, score)
# 从已有的实现中展示聚类的结果
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# TODO:反向转换中心点
log_centers = pca.inverse_transform(centers)
# TODO:对中心点做指数转换
true_centers = np.exp(log_centers)
# 显示真实的中心点
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
log_centers
display(data.describe())
# 显示预测结果
for i, pred in enumerate(sample_preds):
print("Sample point", i, "predicted to be in Cluster", pred)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# 读取包含聚类结果的数据
cluster_data = pd.read_csv("cluster.csv")
y = cluster_data['Region']
X = cluster_data.drop(['Region'], axis = 1)
# 划分训练集测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=24)
clf = RandomForestClassifier(random_state=24)
clf.fit(X_train, y_train)
score_with_cluster = clf.score(X_test, y_test)
# 移除cluster特征
X_train = X_train.copy()
X_train.drop(['cluster'], axis=1, inplace=True)
X_test = X_test.copy()
X_test.drop(['cluster'], axis=1, inplace=True)
clf.fit(X_train, y_train)
score_no_cluster = clf.score(X_test, y_test)
print("不使用cluster特征的得分: %.4f"%score_no_cluster)
print("使用cluster特征的得分: %.4f"%score_with_cluster)
# 根据‘Channel‘数据显示聚类的结果
vs.channel_results(reduced_data, outliers, pca_samples)
% matplotlib notebook
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# use a 3-component PCA
reduced_data3 = PCA(n_components=3).fit_transform(good_data)
reduced_data3 = pd.DataFrame(reduced_data3,
columns = ['Dimension 1', 'Dimension 2', 'Dimension 3'])
# get the labels
channel = pd.read_csv("customers.csv")['Channel']
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data3, channel], axis = 1)
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
# group by labels
grouped = labeled.groupby('Channel')
# color stuff
cmap = cm.get_cmap('gist_rainbow')
# let's plot!
fig = plt.figure()
axe3d = fig.gca(projection = '3d')
for i, data3 in grouped:
axe3d.scatter(data3.iloc[:,0],data3.iloc[:,1],data3.iloc[:,2],
c = cmap((i-1)*1.0/2),label = labels[int(i)-1], linewidth = 0)
# ajust for sanity
axe3d.set_xlabel('Dimension 1')
axe3d.set_ylabel('Dimension 2')
axe3d.set_zlabel('Dimension 3')
axe3d.set_xlim(labeled.iloc[:,0].min(),labeled.iloc[:,0].max())
axe3d.set_ylim(labeled.iloc[:,1].min(),labeled.iloc[:,1].max())
axe3d.set_zlim(labeled.iloc[:,2].min(),labeled.iloc[:,2].max())
axe3d.legend()
plt.show()
| 0.186391 | 0.898722 |
Copyright 2020 Verily Life Sciences LLC
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file or at
https://developers.google.com/open-source/licenses/bsd
# Trial Specification Demo
The first step to use the Metis tool is to specify your trial.
All data in the Metis tool is stored in [xarray.DataArray](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.html) datasets. This is a [convenient datastructure](http://xarray.pydata.org/en/stable/why-xarray.html) for storing multidimensional arrays with different labels, coordinates or attributes. You don't need to have any expertise with xr.Datasets to use the Metis toolkit. The goal of this notebook is to walk you through the construction of the dataset that contains the specification of your trial.
This notebook has several sections:
1. **Define the Trial**. In this section you will load all aspects of your trial, including the trial sites, the expected recruitment demographics for each trial site (e.g. from a census) as well as the rules for how the trial will be carried out.
2. **Load Incidence Forecasts**. In this section you will load forecasts for covid incidence at the locations of your trial. We highly recommend using forecasts that are as local as possible for the sites of the trial. There is significant variation in covid incidence among counties in the same state, and taking the state (province) average can be highly misleading. Here we include code to preload forecasts for county level forecasts from the US Center for Disease Control. The trial planner should include whatever forecasts they find most compelling.
3. **Simulate the Trial** Given the incidence forecasts and the trial rules, the third section will simulate the trial.
4. **Optimize the Trial** Given the parameters of the trial within our control, the next section asks whether we can set those parameters to make the trial meet our objective criteria, for example most likely to succeed or to succeed as quickly as possible. We have written a set of optimization routines for optimizing different types of trials.
We write out different trial plans, which you can then examine interactively in the second notebook in the Metis Toolbox. That notebook lets you visualize how the trial is proceeding at a per site level and experiment with what will happen when you turn up or down different sites.
If you have questions about how to implement these steps for your clinical trial, or there are variations in the trial specification that are not captured with this framework, please contact metis@projectbaseline.com for additional help.
## Imports
```
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
import functools
import importlib.resources
import numpy as np
import os
import pandas as pd
pd.plotting.register_matplotlib_converters()
import xarray as xr
from IPython.display import display
# metis imports
from metis import demo_data
from metis import io as metis_io
from metis import util
from metis import optimization
from metis import sim
from metis import sim_scenarios
from metis import public_data
```
## Helper methods for visualization
```
def plot_participants(participants):
time = participants.time.values
util.sum_all_but_dims(['time'], participants).cumsum('time').plot()
plt.title('Participants recruited (both control and treatment arm)')
plt.xlim(time[0], time[-1])
plt.ylim(bottom=0)
plt.show()
def plot_events(events):
time = events.time.values
events.cumsum('time').plot.line(x='time', color='k', alpha=.02, add_legend=False)
for analysis, num_events in c.needed_control_arm_events.to_series().items():
plt.axhline(num_events, linestyle='--')
plt.text(time[0], num_events, analysis, ha='left', va='bottom')
plt.ylim(0, 120)
plt.xlim(time[0], time[-1])
plt.title(f'Control arm events\n{events.scenario.size} simulated scenarios')
plt.show()
def plot_success(c, events):
time = c.time.values
success_day = xr.DataArray(util.success_day(c.needed_control_arm_events, events),
coords=(events.scenario, c.analysis))
fig, axes = plt.subplots(c.analysis.size, 1, sharex=True)
step = max(1, int(np.timedelta64(3, 'D') / (time[1] - time[0])))
bins = mpl.units.registry[np.datetime64].convert(time[::step], None, None)
for analysis, ax in zip(c.analysis.values, axes):
success_days = success_day.sel(analysis=analysis).values
np.where(np.isnat(success_days), np.datetime64('2050-06-01'), success_days)
ax.hist(success_days, bins=bins, density=True)
ax.yaxis.set_visible(False)
# subtract time[0] to make into timedelta64s so that we can take a mean/median
median = np.median(success_days - time[0]) + time[0]
median = pd.to_datetime(median).date()
ax.axvline(median, color='r')
ax.text(time[0], 0, f'{analysis}\n{median} median', ha='left', va='bottom')
plt.xlabel('Date when sufficient statistical power is achieved')
plt.xlim(time[0], time[-1])
plt.xticks(rotation=35)
plt.show()
```
# 1. Define the trial
## Choose the sites
A trial specification consists a list of sites, together with various properties of the sites.
For this demo, we read demonstration data embedded in the Metis Python package. Specifically, this information is loaded from the file `demo_data/site_list1.csv`. Each row of this file contains the name of a site, as well as the detailed information about the trial. In this illustrative example, we pick sites in real US counties. Each column contains the following information:
* `opencovid_key` . This is a key that specifies location within [COVID-19 Open Data](https://github.com/GoogleCloudPlatform/covid-19-open-data). It is required by this schema because it is the way we join the incidence forecasts to the site locations.
* `capacity`, the number of participants the site can recruit each week, including both control arm and treatment arms. For simplicity, we assume this is constant over time, but variable recruitment rates are also supported. (See the construction of the `site_capacity` array below).
* `start_date`. This is the first date on which the site can recruit participants.
* The proportion of the population in various demographic categories. For this example, we consider categories for age (`over_60`), ethnicity (`black`, `hisp_lat`), and comorbidities (`smokers`, `diabetes`, `obese`). **Here we just fill in demographic information with random numbers.** We assume different categories are independent, but the data structure supports complex beliefs about how different categories intersect, how much each site can enrich for different categories, and different infection risks for different categories. These are represented in the factors `population_fraction`, `participant_fraction`, `incidence_scaler`, and `incidence_to_event_factor` below. In a practical situation, we recommend that the trial planner uses accurate estimates of the populations for the different sites they are drawing from.
```
with importlib.resources.path(demo_data, 'site_list1.csv') as p:
demo_data_file_path = os.fspath(p)
site_df = pd.read_csv(demo_data_file_path, index_col=0)
site_df.index.name = 'location'
site_df['start_date'] = pd.to_datetime(site_df['start_date'])
display(site_df)
# Add in information we have about each county.
site_df = pd.concat([site_df, public_data.us_county_data().loc[site_df.opencovid_key].set_index(site_df.index)], axis=1)
```
## Choose trial parameters
The trial requires a number of parameters that have to be specified to be able to simulate what will happen in the trial: These include:
* `trial_size_cap`: the maximum number of participants in the trial (includes both control and treatment arms)
* `start_day` and `end_day`: the boundaries of the time period we will simulate.
* `proportion_control_arm`: what proportion of participants are in the control arm. It's assumed that the control arm is as uniformly distributed across locations and time (e.g. at each location on each day, half of the recruited participants are assigned to the control arm).
* `needed_control_arm_events`: the number of events required in the *control* arm of the trial at various intermediate analysis points. For this example we assume intermediate analyses which would demonstrate a vaccine efficacy of about 55%, 65%, 75%, 85%, or 95%.
* `observation_delay`: how long after a participant is recruited before they contribute an event. This is measured in the same time units as your incidence forecasts. Here we assume 28 days.
* `site_capacity` and `site_activation`: the number of participants each site could recruit *if* it were activated, and whether each site is activated at any given time. Here we assume each site as a constant weekly capacity, but time dependence can be included (e.g. to model ramp up of recruitment).
* `population_fraction`, `participant_fraction`, and `incidence_scaler`: the proportion of the general population and the proportion of participants who fall into different demographic categories at each location, and the infection risk factor for each category. These three are required to translate an overall incidence forecast for the population into the incidence forecast for your control arm.
* `incidence_to_event_factor`: what proportion of infections lead to a clinical event. We assume a constant 0.6, but you can specify different values for different demographic categories.
These factors are specified in the datastructure below.
```
start_day = np.datetime64('2020-12-15')
end_day = np.datetime64('2021-04-01')
time_resolution = np.timedelta64(1, 'D')
time = np.arange(start_day, end_day + time_resolution, time_resolution)
c = xr.Dataset(coords=dict(time=time))
c['proportion_control_arm'] = 0.5
# Assume some intermediate analyses.
frac_control = float(c.proportion_control_arm)
efficacy = np.array([.55, .65, .75, .85, .95])
ctrl_events = util.needed_control_arm_events(efficacy, frac_control)
vaccine_events = (1 - efficacy) * ctrl_events * (1 - frac_control) / frac_control
ctrl_events, vaccine_events = np.round(ctrl_events), np.round(vaccine_events)
efficacy = 1 - (vaccine_events / ctrl_events)
total_events = ctrl_events + vaccine_events
analysis_names = [
f'{int(t)} total events @{int(100 * e)}% VE' for t, e in zip(total_events, efficacy)
]
c['needed_control_arm_events'] = xr.DataArray(
ctrl_events, dims=('analysis',)).assign_coords(analysis=analysis_names)
c['recruitment_type'] = 'default'
c['observation_delay'] = int(np.timedelta64(28, 'D') / time_resolution) # 28 days
c['trial_size_cap'] = 30000
# convert weekly capacity to capacity per time step
site_capacity = site_df.capacity.to_xarray() * time_resolution / np.timedelta64(7, 'D')
site_capacity = site_capacity.broadcast_like(c.time).astype('float')
# Can't recruit before the activation date
activation_date = site_df.start_date.to_xarray()
for l in activation_date.location.values:
date = activation_date.loc[l]
site_capacity.loc[site_capacity.time < date, l] = 0.0
c['site_capacity'] = site_capacity.transpose('location', 'time')
c['site_activation'] = xr.ones_like(c.site_capacity)
# For the sake of simplicity, this code assumes black and hisp_lat are
# non-overlapping, and that obese/smokers/diabetes are non-overlapping.
frac_and_scalar = util.fraction_and_incidence_scaler
fraction_scalers = [
frac_and_scalar(site_df, 'age', ['over_60'], [1], 'under_60'),
frac_and_scalar(site_df, 'ethnicity', ['black', 'hisp_lat'], [1, 1],
'other'),
frac_and_scalar(site_df, 'comorbidity', ['smokers', 'diabetes', 'obese'],
[1, 1, 1], 'none')
]
fractions, incidence_scalers = zip(*fraction_scalers)
# We assume that different categories are independent (e.g. the proportion of
# smokers over 60 is the same as the proportion of smokers under 60)
c['population_fraction'] = functools.reduce(lambda x, y: x * y, fractions)
# We assume the participants are drawn uniformly from the population.
c['participant_fraction'] = c['population_fraction']
# Assume some boosted incidence risk for subpopulations. We pick random numbers
# here, but in actual use you'd put your best estimate for the incidence risk
# of each demographic category.
# Since we assume participants are uniformly drawn from the county population,
# this actually doesn't end up affecting the estimated number of clinical events.
c['incidence_scaler'] = functools.reduce(lambda x, y: x * y,
incidence_scalers)
c.incidence_scaler.loc[dict(age='over_60')] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(comorbidity=['smokers', 'diabetes', 'obese'])] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(ethnicity=['black', 'hisp_lat'])] = 1 + 2 * np.random.random()
# We assume a constant incidence_to_event_factor.
c['incidence_to_event_factor'] = 0.6 * xr.ones_like(c.incidence_scaler)
util.add_empty_history(c)
```
# 2. Load incidence forecasts
We load historical incidence data from [COVID-19 Open Data](https://github.com/GoogleCloudPlatform/covid-19-open-data) and forecasts from [COVID-19 Forecast Hub](https://github.com/reichlab/covid19-forecast-hub).
We note that there are a set of caveats when using the CDC models that should be considered when using these for trial planning:
* Forecasts are only available for US counties. Hence, these forecasts will only work for US-only trials. Trials with sites outside the US will need to supplement these forecasts.
* Forecasts only go out for four weeks. Trials take much longer than four weeks to complete, when measured from site selection to logging the required number of cases in the control arm. For simplicity, here we extrapolate incidence as *constant* after the last point of the forecast. Here we extrapolate out to March 1, 2021.
* The forecasts from the CDC are provided with quantile estimates. Our method depends on getting *representative forecasts* from the model: we need a set of sample forecasts for each site which represent the set of scenarios that can occur. Ideally these scenarios will be equally probable so that we can compute probabilities by averaging over samples. To get samples from quantiles, we interpolate/extrapolate to get 100 evenly spaced quantile estimates, which we treat as representative samples.
You can of course replace these forecasts with whatever represents your beliefs and uncertainty about what will happen.
```
# Extrapolate out a bit extra to ensure we're within bounds when we interpolate later.
full_pred = public_data.fetch_cdc_forecasts([('COVIDhub-ensemble', '2021-01-18'),
('COVIDhub-baseline', '2021-01-18')],
end_date=c.time.values[-1] + np.timedelta64(15, 'D'),
num_samples=50)
full_gt = public_data.fetch_opencovid_incidence()
# Work around temporary opencovid bug which puts some data in the future
full_gt = full_gt.sel(time=slice(None, np.datetime64('today')))
# Include more historical incidence here for context. It will be trimmed off when
# we construct scenarios to simulate. The funny backwards range is to ensure that if
# we use weekly instead of daily resolution, we use the same day of the week as c.
time = np.arange(c.time.values[-1], np.datetime64('2020-10-01'), -time_resolution)[::-1]
incidence_model = public_data.assemble_forecast(full_gt, full_pred, site_df, time)
locs = np.random.choice(c.location.values, size=5, replace=False)
incidence_model.sel(location=locs).plot.line(x='time', color='k', alpha=.1, add_legend=False, col='location', row='model')
plt.ylim(0.0, 1e-3)
plt.suptitle('Forecast incidence at a sampling of sites', y=1.0)
pass
```
# 3. Simulate the trial
Now that we've specified how the trial works, we can compute how the trial will turn out given the incidence forecasts you've specified. We do this by first imagining what sampling what incidence will be at all locations simultaneously. For any given fully-specified scenario, we compute how many participants will be under observation at any given time in any given location (in any given combination of demographic buckets), then based on the specified local incidence we compute how many will become infected, and how many will produce clinical events.
Here we assume that the incidence trajectories of different locations are drawn at random from the available forecasts. Other scenario-generation methods in `sim_scenarios` support more complex approaches. For example, we may be highly uncertain about the incidence at each site, but believe that if incidence is high at a site, then it will also be high at geographically nearby sites. If this is the case then the simulation should not choose forecasts independently at each site but instead should take these correlations into account. The code scenario-generating methods in `sim_scenarios` allows us to do that.
```
# incidence_flattened: rolls together all the models you've included in your ensemble, treating them as independent samples.
incidence_flattened = sim_scenarios.get_incidence_flattened(incidence_model, c)
# incidence_scenarios: chooses scenarios given the incidence curves and your chosen method of scenario-generation.
incidence_scenarios = sim_scenarios.generate_scenarios_independently(incidence_flattened, num_scenarios=100)
# compute the number of participants recruited under your trial rule
participants = sim.recruitment(c)
# compute the number of control arm events under your trial rules and incidence_scenarios.
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
# plot events and label different vaccine efficacies
plot_events(events)
# plot histograms of time to success
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_all_site_on.nc')
```
# 4. Optimize the trial
The simulations above supposed that all sites are activated as soon as possible (i.e. `site_activation` is identically 1). Now that we have shown the ability to simulate the outcome of the trial, we can turn it into a mathematical optimization problem.
**Given the parameters of the trial within our control, how can we set those parameters to make the trial most likely to succeed or to succeed as quickly as possible?**
We imagine the main levers of control are which sites to activate or which sites to prioritize activating, and this is what is implemented here.
However, the framework we have developed is very general and could be extended to just about anything you control which you can predict the impact of. For example,
* If you can estimate the impact of money spent boosting recruitment of high-risk participants, we could use those estimates to help figure out how to best allocate a fixed budget.
* If you had requirements for the number of people infected in different demographic groups, we could use those to help figure out how to best allocate doses between sites with different population characteristics.
The optimization algorithms are implemented in [JAX](https://github.com/google/jax), a python library that makes it possible to differentiate through native python and numpy functions. The flexibility of the language makes it possible to compose a variety of trial optimization scenarios and then to write algorithms that find optima. There are a number of technical details in how the optimization algorithms are written that will be discussed elsewhere.
### An example: Optimizing Static site activations
Suppose that the only variable we can control is which sites should be activated, and we have to make this decision at the beginning of the trial. This decision is then set in stone for the duration of the trial. To calculate this we proceed as follows:
The optimizer takes in the trial plan, encoded in the xarray `c` as well as the `incidence_scenarios`, and then calls the optimizer to find the sites that should be activated to minimize the time to success of the trial. The algorithm modifies `c` *in place*, so that after the algorithm runs, it returns the trial plan `c` but with the site activations chosen to be on or off in accordance with the optimizion.
```
%time optimization.optimize_static_activation(c, incidence_scenarios)
```
### Plot the resulting sites
Now we can plot the activations for the resulting sites. Only a fraction (92/146) of the original sites are activated in the optimized plan. If you compare the distributions for the time to success for the optimized sites to those in the original trial plan, where all sites were activated equally, you will see that the optimized plan saves time for the trial.
```
all_sites = c.location.values
activated_sites = c.location.values[c.site_activation.mean('time') == 1]
# Simulate the results with this activation scheme.
print(f'\n\n{len(activated_sites)} of {len(all_sites)} activated')
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_optimized_static.nc')
```
### Another example: prioritizing sites
Suppose we can activate up to 20 sites each week for 10 weeks. How do we prioritize them?
```
# We put all sites in on group. We also support prioritizing sites within groupings.
# For example, if you can activate 2 sites per state per week, sites would be grouped
# according to the state they're in.
site_to_group = pd.Series(['all_sites'] * len(site_df), index=site_df.index)
decision_dates = c.time.values[:70:7]
allowed_activations = pd.DataFrame([[20] * len(decision_dates)], index=['all_sites'], columns=decision_dates)
parameterizer = optimization.PivotTableActivation(c, site_to_group, allowed_activations, can_deactivate=False)
optimization.optimize_params(c, incidence_scenarios, parameterizer)
c['site_activation'] = c.site_activation.round() # each site has to be on or off at each time
df = c.site_activation.to_pandas()
df.columns = [pd.to_datetime(x).date() for x in df.columns]
sns.heatmap(df, cbar=False)
plt.title('Which sites are activated when')
plt.show()
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_prioritized.nc')
```
|
github_jupyter
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
import functools
import importlib.resources
import numpy as np
import os
import pandas as pd
pd.plotting.register_matplotlib_converters()
import xarray as xr
from IPython.display import display
# metis imports
from metis import demo_data
from metis import io as metis_io
from metis import util
from metis import optimization
from metis import sim
from metis import sim_scenarios
from metis import public_data
def plot_participants(participants):
time = participants.time.values
util.sum_all_but_dims(['time'], participants).cumsum('time').plot()
plt.title('Participants recruited (both control and treatment arm)')
plt.xlim(time[0], time[-1])
plt.ylim(bottom=0)
plt.show()
def plot_events(events):
time = events.time.values
events.cumsum('time').plot.line(x='time', color='k', alpha=.02, add_legend=False)
for analysis, num_events in c.needed_control_arm_events.to_series().items():
plt.axhline(num_events, linestyle='--')
plt.text(time[0], num_events, analysis, ha='left', va='bottom')
plt.ylim(0, 120)
plt.xlim(time[0], time[-1])
plt.title(f'Control arm events\n{events.scenario.size} simulated scenarios')
plt.show()
def plot_success(c, events):
time = c.time.values
success_day = xr.DataArray(util.success_day(c.needed_control_arm_events, events),
coords=(events.scenario, c.analysis))
fig, axes = plt.subplots(c.analysis.size, 1, sharex=True)
step = max(1, int(np.timedelta64(3, 'D') / (time[1] - time[0])))
bins = mpl.units.registry[np.datetime64].convert(time[::step], None, None)
for analysis, ax in zip(c.analysis.values, axes):
success_days = success_day.sel(analysis=analysis).values
np.where(np.isnat(success_days), np.datetime64('2050-06-01'), success_days)
ax.hist(success_days, bins=bins, density=True)
ax.yaxis.set_visible(False)
# subtract time[0] to make into timedelta64s so that we can take a mean/median
median = np.median(success_days - time[0]) + time[0]
median = pd.to_datetime(median).date()
ax.axvline(median, color='r')
ax.text(time[0], 0, f'{analysis}\n{median} median', ha='left', va='bottom')
plt.xlabel('Date when sufficient statistical power is achieved')
plt.xlim(time[0], time[-1])
plt.xticks(rotation=35)
plt.show()
with importlib.resources.path(demo_data, 'site_list1.csv') as p:
demo_data_file_path = os.fspath(p)
site_df = pd.read_csv(demo_data_file_path, index_col=0)
site_df.index.name = 'location'
site_df['start_date'] = pd.to_datetime(site_df['start_date'])
display(site_df)
# Add in information we have about each county.
site_df = pd.concat([site_df, public_data.us_county_data().loc[site_df.opencovid_key].set_index(site_df.index)], axis=1)
start_day = np.datetime64('2020-12-15')
end_day = np.datetime64('2021-04-01')
time_resolution = np.timedelta64(1, 'D')
time = np.arange(start_day, end_day + time_resolution, time_resolution)
c = xr.Dataset(coords=dict(time=time))
c['proportion_control_arm'] = 0.5
# Assume some intermediate analyses.
frac_control = float(c.proportion_control_arm)
efficacy = np.array([.55, .65, .75, .85, .95])
ctrl_events = util.needed_control_arm_events(efficacy, frac_control)
vaccine_events = (1 - efficacy) * ctrl_events * (1 - frac_control) / frac_control
ctrl_events, vaccine_events = np.round(ctrl_events), np.round(vaccine_events)
efficacy = 1 - (vaccine_events / ctrl_events)
total_events = ctrl_events + vaccine_events
analysis_names = [
f'{int(t)} total events @{int(100 * e)}% VE' for t, e in zip(total_events, efficacy)
]
c['needed_control_arm_events'] = xr.DataArray(
ctrl_events, dims=('analysis',)).assign_coords(analysis=analysis_names)
c['recruitment_type'] = 'default'
c['observation_delay'] = int(np.timedelta64(28, 'D') / time_resolution) # 28 days
c['trial_size_cap'] = 30000
# convert weekly capacity to capacity per time step
site_capacity = site_df.capacity.to_xarray() * time_resolution / np.timedelta64(7, 'D')
site_capacity = site_capacity.broadcast_like(c.time).astype('float')
# Can't recruit before the activation date
activation_date = site_df.start_date.to_xarray()
for l in activation_date.location.values:
date = activation_date.loc[l]
site_capacity.loc[site_capacity.time < date, l] = 0.0
c['site_capacity'] = site_capacity.transpose('location', 'time')
c['site_activation'] = xr.ones_like(c.site_capacity)
# For the sake of simplicity, this code assumes black and hisp_lat are
# non-overlapping, and that obese/smokers/diabetes are non-overlapping.
frac_and_scalar = util.fraction_and_incidence_scaler
fraction_scalers = [
frac_and_scalar(site_df, 'age', ['over_60'], [1], 'under_60'),
frac_and_scalar(site_df, 'ethnicity', ['black', 'hisp_lat'], [1, 1],
'other'),
frac_and_scalar(site_df, 'comorbidity', ['smokers', 'diabetes', 'obese'],
[1, 1, 1], 'none')
]
fractions, incidence_scalers = zip(*fraction_scalers)
# We assume that different categories are independent (e.g. the proportion of
# smokers over 60 is the same as the proportion of smokers under 60)
c['population_fraction'] = functools.reduce(lambda x, y: x * y, fractions)
# We assume the participants are drawn uniformly from the population.
c['participant_fraction'] = c['population_fraction']
# Assume some boosted incidence risk for subpopulations. We pick random numbers
# here, but in actual use you'd put your best estimate for the incidence risk
# of each demographic category.
# Since we assume participants are uniformly drawn from the county population,
# this actually doesn't end up affecting the estimated number of clinical events.
c['incidence_scaler'] = functools.reduce(lambda x, y: x * y,
incidence_scalers)
c.incidence_scaler.loc[dict(age='over_60')] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(comorbidity=['smokers', 'diabetes', 'obese'])] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(ethnicity=['black', 'hisp_lat'])] = 1 + 2 * np.random.random()
# We assume a constant incidence_to_event_factor.
c['incidence_to_event_factor'] = 0.6 * xr.ones_like(c.incidence_scaler)
util.add_empty_history(c)
# Extrapolate out a bit extra to ensure we're within bounds when we interpolate later.
full_pred = public_data.fetch_cdc_forecasts([('COVIDhub-ensemble', '2021-01-18'),
('COVIDhub-baseline', '2021-01-18')],
end_date=c.time.values[-1] + np.timedelta64(15, 'D'),
num_samples=50)
full_gt = public_data.fetch_opencovid_incidence()
# Work around temporary opencovid bug which puts some data in the future
full_gt = full_gt.sel(time=slice(None, np.datetime64('today')))
# Include more historical incidence here for context. It will be trimmed off when
# we construct scenarios to simulate. The funny backwards range is to ensure that if
# we use weekly instead of daily resolution, we use the same day of the week as c.
time = np.arange(c.time.values[-1], np.datetime64('2020-10-01'), -time_resolution)[::-1]
incidence_model = public_data.assemble_forecast(full_gt, full_pred, site_df, time)
locs = np.random.choice(c.location.values, size=5, replace=False)
incidence_model.sel(location=locs).plot.line(x='time', color='k', alpha=.1, add_legend=False, col='location', row='model')
plt.ylim(0.0, 1e-3)
plt.suptitle('Forecast incidence at a sampling of sites', y=1.0)
pass
# incidence_flattened: rolls together all the models you've included in your ensemble, treating them as independent samples.
incidence_flattened = sim_scenarios.get_incidence_flattened(incidence_model, c)
# incidence_scenarios: chooses scenarios given the incidence curves and your chosen method of scenario-generation.
incidence_scenarios = sim_scenarios.generate_scenarios_independently(incidence_flattened, num_scenarios=100)
# compute the number of participants recruited under your trial rule
participants = sim.recruitment(c)
# compute the number of control arm events under your trial rules and incidence_scenarios.
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
# plot events and label different vaccine efficacies
plot_events(events)
# plot histograms of time to success
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_all_site_on.nc')
%time optimization.optimize_static_activation(c, incidence_scenarios)
all_sites = c.location.values
activated_sites = c.location.values[c.site_activation.mean('time') == 1]
# Simulate the results with this activation scheme.
print(f'\n\n{len(activated_sites)} of {len(all_sites)} activated')
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_optimized_static.nc')
# We put all sites in on group. We also support prioritizing sites within groupings.
# For example, if you can activate 2 sites per state per week, sites would be grouped
# according to the state they're in.
site_to_group = pd.Series(['all_sites'] * len(site_df), index=site_df.index)
decision_dates = c.time.values[:70:7]
allowed_activations = pd.DataFrame([[20] * len(decision_dates)], index=['all_sites'], columns=decision_dates)
parameterizer = optimization.PivotTableActivation(c, site_to_group, allowed_activations, can_deactivate=False)
optimization.optimize_params(c, incidence_scenarios, parameterizer)
c['site_activation'] = c.site_activation.round() # each site has to be on or off at each time
df = c.site_activation.to_pandas()
df.columns = [pd.to_datetime(x).date() for x in df.columns]
sns.heatmap(df, cbar=False)
plt.title('Which sites are activated when')
plt.show()
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_output
metis_io.write_ville_to_netcdf(c, 'demo_output/site_list1_prioritized.nc')
| 0.55254 | 0.941493 |
# Imports
```
from configuration.paths import *
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import models, layers
from src.datasets.chest_xray import ChestXRay
from src.utils.schemes import Scheme
from src.utils.image import Image
```
# Dataset loading for training
```
IMAGE_SIZE = (256, 256)
cx = ChestXRay(DATASET_CHEST_XRAY_PATH, IMAGE_SIZE)
Scheme.dataset_info(cx)
x_train, y_train, _ = cx.load_train_data()
x_train = x_train / 255.0
x_val, y_val, _ = cx.load_val_data()
x_val = x_val / 255.0
Scheme.labeled_images(x_train, y_train)
```
# Mdel definition
```
model = models.Sequential()
model.add(layers.Conv2D(filters=16, activation='relu', kernel_size=3, padding='same', input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=16, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=32, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=64, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=128, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam',
loss="binary_crossentropy",
metrics=['binary_accuracy',
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')])
```
# Model training
```
history = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=35)
Scheme.training_graphs(history)
del x_train
del y_train
```
# Evaluating the model
```
x_test, y_test, diagnosis = cx.load_test_data()
x_test = x_test / 255.0
test_loss, test_accuracy, _, _ = model.evaluate(x_test, y_test)
images = [x_test[10], x_test[11], x_test[55], x_test[67], x_test[60],
x_test[202], x_test[203], x_test[206], x_test[217], x_test[220]]
labels = [1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
images = Image.explainer(images, model, IMAGE_SIZE)
Scheme.labeled_images(images, labels)
predictions = model.predict(x_test)
predictions_rounded = np.round(predictions).astype(int)
Scheme.confusion_matrix(predictions_rounded, cx.get_test_df().diagnosis.to_numpy())
```
|
github_jupyter
|
from configuration.paths import *
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import models, layers
from src.datasets.chest_xray import ChestXRay
from src.utils.schemes import Scheme
from src.utils.image import Image
IMAGE_SIZE = (256, 256)
cx = ChestXRay(DATASET_CHEST_XRAY_PATH, IMAGE_SIZE)
Scheme.dataset_info(cx)
x_train, y_train, _ = cx.load_train_data()
x_train = x_train / 255.0
x_val, y_val, _ = cx.load_val_data()
x_val = x_val / 255.0
Scheme.labeled_images(x_train, y_train)
model = models.Sequential()
model.add(layers.Conv2D(filters=16, activation='relu', kernel_size=3, padding='same', input_shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3)))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=16, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=32, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=64, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.add(layers.Conv2D(filters=128, activation="relu", kernel_size=3, padding="same"))
model.add(layers.MaxPool2D(pool_size=2, strides=2, padding='valid'))
model.summary()
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam',
loss="binary_crossentropy",
metrics=['binary_accuracy',
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall')])
history = model.fit(x=x_train,
y=y_train,
validation_data=(x_val, y_val),
epochs=35)
Scheme.training_graphs(history)
del x_train
del y_train
x_test, y_test, diagnosis = cx.load_test_data()
x_test = x_test / 255.0
test_loss, test_accuracy, _, _ = model.evaluate(x_test, y_test)
images = [x_test[10], x_test[11], x_test[55], x_test[67], x_test[60],
x_test[202], x_test[203], x_test[206], x_test[217], x_test[220]]
labels = [1, 1, 1, 1, 1,
0, 0, 0, 0, 0]
images = Image.explainer(images, model, IMAGE_SIZE)
Scheme.labeled_images(images, labels)
predictions = model.predict(x_test)
predictions_rounded = np.round(predictions).astype(int)
Scheme.confusion_matrix(predictions_rounded, cx.get_test_df().diagnosis.to_numpy())
| 0.935957 | 0.876476 |
```
%reload_ext autoreload
%autoreload 2
from fastai.tabular import *
```
# Rossmann
## Data preparation
To create the feature-engineered train_clean and test_clean from the Kaggle competition data, run `rossman_data_clean.ipynb`. One important step that deals with time series is this:
```python
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
```
```
path = Config().data_path()/'rossmann'
train_df = pd.read_pickle(path/'train_clean')
train_df.head().T
n = len(train_df); n
```
### Experimenting with a sample
```
idx = np.random.permutation(range(n))[:2000]
idx.sort()
small_train_df = train_df.iloc[idx[:1000]]
small_test_df = train_df.iloc[idx[1000:]]
small_cont_vars = ['CompetitionDistance', 'Mean_Humidity']
small_cat_vars = ['Store', 'DayOfWeek', 'PromoInterval']
small_train_df = small_train_df[small_cat_vars + small_cont_vars + ['Sales']]
small_test_df = small_test_df[small_cat_vars + small_cont_vars + ['Sales']]
small_train_df.head()
small_test_df.head()
categorify = Categorify(small_cat_vars, small_cont_vars)
categorify(small_train_df)
categorify(small_test_df, test=True)
small_test_df.head()
small_train_df.PromoInterval.cat.categories
small_train_df['PromoInterval'].cat.codes[:5]
fill_missing = FillMissing(small_cat_vars, small_cont_vars)
fill_missing(small_train_df)
fill_missing(small_test_df, test=True)
small_train_df[small_train_df['CompetitionDistance_na'] == True]
```
### Preparing full data set
```
train_df = pd.read_pickle(path/'train_clean')
test_df = pd.read_pickle(path/'test_clean')
len(train_df),len(test_df)
procs=[FillMissing, Categorify, Normalize]
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
dep_var = 'Sales'
df = train_df[cat_vars + cont_vars + [dep_var,'Date']].copy()
test_df['Date'].min(), test_df['Date'].max()
cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max()
cut
valid_idx = range(cut)
df[dep_var].head()
data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs,)
.split_by_idx(valid_idx)
.label_from_df(cols=dep_var, label_cls=FloatList, log=True)
.add_test(TabularList.from_df(test_df, path=path, cat_names=cat_vars, cont_names=cont_vars))
.databunch())
doc(FloatList)
```
## Model
```
max_log_y = np.log(np.max(train_df['Sales'])*1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04,
y_range=y_range, metrics=exp_rmspe)
learn.model
len(data.train_ds.cont_names)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, 1e-3, wd=0.2)
learn.save('1')
doc(learn.recorder.plot_losses)
learn.recorder.plot_losses()
learn.load('1');
learn.fit_one_cycle(5, 3e-4)
learn.fit_one_cycle(5, 3e-4)
```
(10th place in the competition was 0.108)
```
test_preds=learn.get_preds(DatasetType.Test)
test_df["Sales"]=np.exp(test_preds[0].data).numpy().T[0]
test_df[["Id","Sales"]]=test_df[["Id","Sales"]].astype("int")
test_df[["Id","Sales"]].to_csv("rossmann_submission.csv",index=False)
```
|
github_jupyter
|
%reload_ext autoreload
%autoreload 2
from fastai.tabular import *
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
path = Config().data_path()/'rossmann'
train_df = pd.read_pickle(path/'train_clean')
train_df.head().T
n = len(train_df); n
idx = np.random.permutation(range(n))[:2000]
idx.sort()
small_train_df = train_df.iloc[idx[:1000]]
small_test_df = train_df.iloc[idx[1000:]]
small_cont_vars = ['CompetitionDistance', 'Mean_Humidity']
small_cat_vars = ['Store', 'DayOfWeek', 'PromoInterval']
small_train_df = small_train_df[small_cat_vars + small_cont_vars + ['Sales']]
small_test_df = small_test_df[small_cat_vars + small_cont_vars + ['Sales']]
small_train_df.head()
small_test_df.head()
categorify = Categorify(small_cat_vars, small_cont_vars)
categorify(small_train_df)
categorify(small_test_df, test=True)
small_test_df.head()
small_train_df.PromoInterval.cat.categories
small_train_df['PromoInterval'].cat.codes[:5]
fill_missing = FillMissing(small_cat_vars, small_cont_vars)
fill_missing(small_train_df)
fill_missing(small_test_df, test=True)
small_train_df[small_train_df['CompetitionDistance_na'] == True]
train_df = pd.read_pickle(path/'train_clean')
test_df = pd.read_pickle(path/'test_clean')
len(train_df),len(test_df)
procs=[FillMissing, Categorify, Normalize]
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
dep_var = 'Sales'
df = train_df[cat_vars + cont_vars + [dep_var,'Date']].copy()
test_df['Date'].min(), test_df['Date'].max()
cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max()
cut
valid_idx = range(cut)
df[dep_var].head()
data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs,)
.split_by_idx(valid_idx)
.label_from_df(cols=dep_var, label_cls=FloatList, log=True)
.add_test(TabularList.from_df(test_df, path=path, cat_names=cat_vars, cont_names=cont_vars))
.databunch())
doc(FloatList)
max_log_y = np.log(np.max(train_df['Sales'])*1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04,
y_range=y_range, metrics=exp_rmspe)
learn.model
len(data.train_ds.cont_names)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, 1e-3, wd=0.2)
learn.save('1')
doc(learn.recorder.plot_losses)
learn.recorder.plot_losses()
learn.load('1');
learn.fit_one_cycle(5, 3e-4)
learn.fit_one_cycle(5, 3e-4)
test_preds=learn.get_preds(DatasetType.Test)
test_df["Sales"]=np.exp(test_preds[0].data).numpy().T[0]
test_df[["Id","Sales"]]=test_df[["Id","Sales"]].astype("int")
test_df[["Id","Sales"]].to_csv("rossmann_submission.csv",index=False)
| 0.410993 | 0.827131 |
```
import csv
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
```
## Constants
```
LOGDIR = '../trace-data'
DATE_FORMAT_STR = '%Y-%m-%d %H:%M:%S'
MINUTES_PER_DAY = (24 * 60)
MICROSECONDS_PER_MINUTE = (60 * 1000)
```
## Utility code
```
def parse_date(date_str):
"""Parses a date string and returns a datetime object if possible.
Args:
date_str: A string representing a date.
Returns:
A datetime object if the input string could be successfully
parsed, None otherwise.
"""
if date_str is None or date_str == '' or date_str == 'None':
return None
return datetime.datetime.strptime(date_str, DATE_FORMAT_STR)
def timedelta_to_minutes(timedelta):
"""Converts a datetime timedelta object to minutes.
Args:
timedelta: The timedelta to convert.
Returns:
The number of minutes captured in the timedelta.
"""
minutes = 0.0
minutes += timedelta.days * MINUTES_PER_DAY
minutes += timedelta.seconds / 60.0
minutes += timedelta.microseconds / MICROSECONDS_PER_MINUTE
return minutes
def round_to_nearest_minute(t):
"""Rounds a datetime object down to the nearest minute.
Args:
t: A datetime object.
Returns:
A new rounded down datetime object.
"""
return t - datetime.timedelta(seconds=t.second, microseconds=t.microsecond)
def add_minute(t):
"""Adds a single minute to a datetime object.
Args:
t: A datetime object.
Returns:
A new datetime object with an additional minute.
"""
return t + datetime.timedelta(seconds=60)
def get_cdf(data):
"""Returns the CDF of the given data.
Args:
data: A list of numerical values.
Returns:
An pair of lists (x, y) for plotting the CDF.
"""
sorted_data = sorted(data)
p = 100. * np.arange(len(sorted_data)) / (len(sorted_data) - 1)
return sorted_data, p
data = np.arange(10)
print(get_cdf(data))
class Job:
"""Encapsulates a job."""
def __init__(self, status, vc, jobid, attempts, submitted_time, user):
"""Records job parameters and computes key metrics.
Stores the passed in arguments as well as the number of GPUs
requested by the job. In addition, computes the queueing delay
as defined as the delta between the submission time and the start
time of the first attempt. Finally, computes run time as defined
as the delta between the initial attempt's start time and the last
attempt's finish time.
NOTE: Some jobs do not have any recorded attempts, and some attempts
have missing start and/or end times. A job's latest attempt having no
end time indicates that the job was still running when the log data
was collected.
Args:
status: One of 'Pass', 'Killed', 'Failed'.
vc: The hash of the virtual cluster id the job was run in.
jobid: The hash of the job id.
attempts: A list of dicts, where each dict contains the following keys:
'start_time': The start time of the attempt.
'end_time': The end time of the attempt.
'detail': A list of nested dicts where each dict contains
the following keys:
'ip': The server id.
'gpus': A list of the GPU ids allotted for this attempt.
submitted_time: The time the job was submitted to the queue.
user: The user's id.
"""
self._status = status
self._vc = vc
self._jobid = jobid
for attempt in attempts:
attempt['start_time'] = parse_date(attempt['start_time'])
attempt['end_time'] = parse_date(attempt['end_time'])
self._attempts = attempts
self._submitted_time = parse_date(submitted_time)
self._user = user
if len(self._attempts) == 0:
self._num_gpus = None
self._run_time = None
self._queueing_delay = None
else:
self._num_gpus = sum([len(detail['gpus']) for detail in self._attempts[0]['detail']])
if self._attempts[0]['start_time'] is None:
self._run_time = None
self._queueing_delay = None
else:
if self._attempts[-1]['end_time'] is None:
self._run_time = None
else:
self._run_time = \
timedelta_to_minutes(self._attempts[-1]['end_time'] -
self._attempts[0]['start_time'])
self._queueing_delay = \
timedelta_to_minutes(self._attempts[0]['start_time'] -
self._submitted_time)
@property
def status(self):
return self._status
@property
def vc(self):
return self._vc
@property
def jobid(self):
return self._jobid
@property
def attempts(self):
return self._attempts
@property
def submitted_time(self):
return self._submitted_time
@property
def user(self):
return self._user
@property
def num_gpus(self):
return self._num_gpus
@property
def queueing_delay(self):
return self._queueing_delay
@property
def run_time(self):
return self._run_time
def get_bucket_from_num_gpus(num_gpus):
"""Maps GPU count to a bucket for plotting purposes."""
if num_gpus is None:
return None
elif num_gpus == 1:
return 0
elif num_gpus >= 2 and num_gpus <= 4:
return 1
elif num_gpus >= 5 and num_gpus <= 8:
return 2
elif num_gpus > 8:
return 3
else:
return None
def get_plot_config_from_bucket(bucket):
"""Returns plotting configuration information."""
if bucket == 0:
return ('1', 'green', '-')
elif bucket == 1:
return ('2-4', 'blue', '-.')
elif bucket == 2:
return ('5-8', 'red', '--')
elif bucket == 3:
return ('>8', 'purple', ':')
```
## Load the cluster log
```
cluster_job_log_path = os.path.join(LOGDIR, 'cluster_job_log')
with open(cluster_job_log_path, 'r') as f:
cluster_job_log = json.load(f)
jobs = [Job(**job) for job in cluster_job_log]
print(jobs[16].attempts)
for i in range(len(jobs)):
if jobs[i].jobid == "application_1506638472019_10238":
print(i)
print(jobs[i].attempts)
print(jobs[i].jobid)
print(jobs[i].vc)
print(jobs[i].user)
print(jobs[i].submitted_time)
print(jobs[i].queueing_delay)
print(jobs[i].run_time)
print(type(jobs[i].num_gpus))
break
```
# Job Runtimes (Figure 2)
```
run_times = {}
for job in jobs:
num_gpus = job.num_gpus
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket is None:
continue
if bucket not in run_times:
run_times[bucket] = []
run_time = job.run_time
if run_time is not None:
run_times[bucket].append(run_time)
buckets = sorted([bucket for bucket in run_times])
for bucket in buckets:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(run_times[bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.legend(loc='lower right')
plt.xscale('log')
plt.xlim(10 ** -1, 10 ** 4)
plt.ylim(0, 100)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
```
# Queueing Delay (Figure 3)
```
queueing_delays = {}
for job in jobs:
vc = job.vc
queueing_delays.setdefault(vc, {})
bucket = get_bucket_from_num_gpus(job.num_gpus)
if bucket is None:
continue
queueing_delays[vc].setdefault(bucket, [])
# NOTE: Each period between the job being placed on the queue
# and being scheduled on a machine is recorded as an individual
# queueing delay.
queueing_delay = 0.0
queue_time = job.submitted_time
for attempt in job.attempts:
start_time = attempt['start_time']
if queue_time is not None and start_time is not None:
queueing_delay = timedelta_to_minutes(start_time - queue_time)
queue_time = attempt['end_time']
queueing_delays[vc][bucket].append(queueing_delay)
for vc in queueing_delays:
for bucket in queueing_delays[vc]:
queueing_delays[vc][bucket] = filter(None, queueing_delays[vc][bucket])
?filter
from collections import Iterable
from collections import Iterator
import math
def is_sqr(x):
return math.sqrt(x) % 1 == 0
newlist = filter(is_sqr, range(1, 101))
print(isinstance(newlist, Iterable))
print(isinstance(newlist, Iterator))
print(newlist.__next__)
print(newlist.__iter__)
vcs = queueing_delays.keys()
for i, vc in enumerate(vcs):
for bucket in queueing_delays[vc]:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(queueing_delays[vc][bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title('VC %s' % (vc))
plt.legend(loc='lower right')
plt.xscale('log')
plt.ylim(0, 100)
plt.xlim(10 ** -1, 10 ** 4)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(vcs) - 1:
plt.figure()
plt.show()
```
# Locality Constraints (Figure 4)
```
data = {}
for i, job in enumerate(jobs):
if len(job.attempts) == 0:
continue
num_gpus = job.num_gpus
if num_gpus < 5:
continue
bucket = get_bucket_from_num_gpus(num_gpus)
data.setdefault(bucket, {
'x': [],
'y': []
})
queueing_delay = job.queueing_delay # only job.attempts[0]['start_time'] - job._submitted_time
num_servers = len(job.attempts[0]['detail'])
data[bucket]['x'].append(queueing_delay)
data[bucket]['y'].append(num_servers)
for bucket in data:
num_gpus, _, _ = get_plot_config_from_bucket(bucket)
if bucket == 2:
marker = '+'
facecolors = 'black'
edgecolors = 'none'
else:
marker = 'o'
facecolors = 'none'
edgecolors = 'red'
plt.scatter(data[bucket]['x'], data[bucket]['y'], label='%s GPU' % (num_gpus),
marker=marker, facecolors=facecolors, edgecolors=edgecolors)
plt.legend()
plt.xscale('log')
plt.xlabel('Time (min)')
plt.ylabel('Num. Servers')
plt.show()
```
# GPU Utilization (Figures 5, 6)
```
gpu_util_path = os.path.join(LOGDIR, 'cluster_gpu_util')
gpu_util = {}
with open(gpu_util_path, 'r') as f:
reader = csv.reader(f)
reader.__next__()
for row in reader:
time = row[0][:-4] # Remove the timezone
machineId = row[1]
gpu_util.setdefault(machineId, {})
gpu_util[machineId][time] = row[2:-1] # Ignore extra empty string at the end
def get_utilization_data(jobs, only_large_jobs=False, only_dedicated_servers=False):
"""Aggregates GPU utilization data for a set of jobs.
Args:
jobs: A list of Jobs.
only_large_jobs: If True, only considers jobs of size 8 or 16 GPUs.
Otherwise, considers jobs of size 1, 4, 8, or 16 GPUs.
only_dedicated_servers: If True, only considers jobs that use all GPUs
available on a server(s).
Returns:
A dict indexed by 1) job completion status, 2) number of GPUs requested
by the job, and 3) timestamp. The value of each nested dict is a list of
percentages indicating the utilization of each individual GPU on the
servers used by the job at the particular time requested.
"""
data = {}
for job in jobs:
num_gpus = job.num_gpus
if (len(job.attempts) == 0 or
(num_gpus != 1 and num_gpus != 4 and num_gpus != 8 and num_gpus != 16)):
continue
if only_large_jobs and num_gpus < 8:
continue
status = job.status
if status not in data:
data[status] = {}
if num_gpus not in data[status]:
data[status][num_gpus] = []
for attempt in job.attempts:
if only_dedicated_servers and len(attempt['detail']) > (num_gpus / 8):
continue
current_time = attempt['start_time']
if current_time is None or attempt['end_time'] is None:
continue
current_minute = round_to_nearest_minute(current_time)
while current_minute < attempt['end_time']:
current_minute_str = str(current_minute)
for detail in attempt['detail']:
machineId = detail['ip']
if current_minute_str in gpu_util[machineId]:
for gpu_id in detail['gpus']:
gpu_num = int(gpu_id[3:]) # Remove the 'gpu' prefix
try:
u = gpu_util[machineId][current_minute_str][gpu_num]
if u != 'NA':
data[status][num_gpus].append(float(u))
except Exception as e:
print(gpu_util[machineId][current_minute_str])
print(gpu_num)
raise ValueError(e)
current_minute = add_minute(current_minute)
return data
data = get_utilization_data(jobs)
statuses = data.keys()
for i, status in enumerate(statuses):
all_num_gpus = sorted(data[status].keys())
for num_gpus in all_num_gpus:
if num_gpus == 1:
color = 'green'
linestyle = '-'
elif num_gpus == 4:
color = 'blue'
linestyle = '-.'
elif num_gpus == 8:
color = 'red'
linestyle = '--'
elif num_gpus == 16:
color = 'cyan'
linestyle = ':'
x, y = get_cdf(data[status][num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title(status)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(statuses) - 1:
plt.figure()
plt.show()
data = get_utilization_data(jobs, only_large_jobs=True, only_dedicated_servers=True)
aggregate_data = {}
for status in data:
for num_gpus in data[status]:
if num_gpus not in aggregate_data:
aggregate_data[num_gpus] = []
aggregate_data[num_gpus] += data[status][num_gpus]
all_num_gpus = sorted(aggregate_data.keys())
for num_gpus in all_num_gpus:
if num_gpus == 8:
linestyle = '-'
elif num_gpus == 16:
linestyle = '-.'
x, y = get_cdf(aggregate_data[num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color='black', linestyle=linestyle)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
```
# Host Resource Utilization (Figure 7)
```
mem_util_path = os.path.join(LOGDIR, 'cluster_mem_util')
mem_util = []
with open(mem_util_path, 'r') as f:
reader = csv.reader(f)
reader.next()
for row in reader:
if row[2] == 'NA':
continue
mem_total = float(row[2])
mem_free = float(row[3])
if mem_total == 0:
continue
mem_util.append(100.0 * (mem_total - mem_free) / mem_total)
cpu_util_path = os.path.join(LOGDIR, 'cluster_cpu_util')
cpu_util = []
with open(cpu_util_path, 'r') as f:
reader = csv.reader(f)
reader.next()
for row in reader:
if row[2] == 'NA':
continue
cpu_util.append(float(row[2]))
x, y = get_cdf(cpu_util)
plt.plot(x, y, label='CPU', color='black', linestyle='-')
x, y = get_cdf(mem_util)
plt.plot(x, y, label='Memory', color='black', linestyle='-.')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.show()
machine_list_path = os.path.join(LOGDIR, 'cluster_machine_list')
with open(machine_list_path, 'r') as f:
reader = csv.reader(f)
reader.__next__()
for row in reader:
if row[1] != '8:
print(row)
```
|
github_jupyter
|
import csv
import datetime
import json
import matplotlib.pyplot as plt
import numpy as np
import os
LOGDIR = '../trace-data'
DATE_FORMAT_STR = '%Y-%m-%d %H:%M:%S'
MINUTES_PER_DAY = (24 * 60)
MICROSECONDS_PER_MINUTE = (60 * 1000)
def parse_date(date_str):
"""Parses a date string and returns a datetime object if possible.
Args:
date_str: A string representing a date.
Returns:
A datetime object if the input string could be successfully
parsed, None otherwise.
"""
if date_str is None or date_str == '' or date_str == 'None':
return None
return datetime.datetime.strptime(date_str, DATE_FORMAT_STR)
def timedelta_to_minutes(timedelta):
"""Converts a datetime timedelta object to minutes.
Args:
timedelta: The timedelta to convert.
Returns:
The number of minutes captured in the timedelta.
"""
minutes = 0.0
minutes += timedelta.days * MINUTES_PER_DAY
minutes += timedelta.seconds / 60.0
minutes += timedelta.microseconds / MICROSECONDS_PER_MINUTE
return minutes
def round_to_nearest_minute(t):
"""Rounds a datetime object down to the nearest minute.
Args:
t: A datetime object.
Returns:
A new rounded down datetime object.
"""
return t - datetime.timedelta(seconds=t.second, microseconds=t.microsecond)
def add_minute(t):
"""Adds a single minute to a datetime object.
Args:
t: A datetime object.
Returns:
A new datetime object with an additional minute.
"""
return t + datetime.timedelta(seconds=60)
def get_cdf(data):
"""Returns the CDF of the given data.
Args:
data: A list of numerical values.
Returns:
An pair of lists (x, y) for plotting the CDF.
"""
sorted_data = sorted(data)
p = 100. * np.arange(len(sorted_data)) / (len(sorted_data) - 1)
return sorted_data, p
data = np.arange(10)
print(get_cdf(data))
class Job:
"""Encapsulates a job."""
def __init__(self, status, vc, jobid, attempts, submitted_time, user):
"""Records job parameters and computes key metrics.
Stores the passed in arguments as well as the number of GPUs
requested by the job. In addition, computes the queueing delay
as defined as the delta between the submission time and the start
time of the first attempt. Finally, computes run time as defined
as the delta between the initial attempt's start time and the last
attempt's finish time.
NOTE: Some jobs do not have any recorded attempts, and some attempts
have missing start and/or end times. A job's latest attempt having no
end time indicates that the job was still running when the log data
was collected.
Args:
status: One of 'Pass', 'Killed', 'Failed'.
vc: The hash of the virtual cluster id the job was run in.
jobid: The hash of the job id.
attempts: A list of dicts, where each dict contains the following keys:
'start_time': The start time of the attempt.
'end_time': The end time of the attempt.
'detail': A list of nested dicts where each dict contains
the following keys:
'ip': The server id.
'gpus': A list of the GPU ids allotted for this attempt.
submitted_time: The time the job was submitted to the queue.
user: The user's id.
"""
self._status = status
self._vc = vc
self._jobid = jobid
for attempt in attempts:
attempt['start_time'] = parse_date(attempt['start_time'])
attempt['end_time'] = parse_date(attempt['end_time'])
self._attempts = attempts
self._submitted_time = parse_date(submitted_time)
self._user = user
if len(self._attempts) == 0:
self._num_gpus = None
self._run_time = None
self._queueing_delay = None
else:
self._num_gpus = sum([len(detail['gpus']) for detail in self._attempts[0]['detail']])
if self._attempts[0]['start_time'] is None:
self._run_time = None
self._queueing_delay = None
else:
if self._attempts[-1]['end_time'] is None:
self._run_time = None
else:
self._run_time = \
timedelta_to_minutes(self._attempts[-1]['end_time'] -
self._attempts[0]['start_time'])
self._queueing_delay = \
timedelta_to_minutes(self._attempts[0]['start_time'] -
self._submitted_time)
@property
def status(self):
return self._status
@property
def vc(self):
return self._vc
@property
def jobid(self):
return self._jobid
@property
def attempts(self):
return self._attempts
@property
def submitted_time(self):
return self._submitted_time
@property
def user(self):
return self._user
@property
def num_gpus(self):
return self._num_gpus
@property
def queueing_delay(self):
return self._queueing_delay
@property
def run_time(self):
return self._run_time
def get_bucket_from_num_gpus(num_gpus):
"""Maps GPU count to a bucket for plotting purposes."""
if num_gpus is None:
return None
elif num_gpus == 1:
return 0
elif num_gpus >= 2 and num_gpus <= 4:
return 1
elif num_gpus >= 5 and num_gpus <= 8:
return 2
elif num_gpus > 8:
return 3
else:
return None
def get_plot_config_from_bucket(bucket):
"""Returns plotting configuration information."""
if bucket == 0:
return ('1', 'green', '-')
elif bucket == 1:
return ('2-4', 'blue', '-.')
elif bucket == 2:
return ('5-8', 'red', '--')
elif bucket == 3:
return ('>8', 'purple', ':')
cluster_job_log_path = os.path.join(LOGDIR, 'cluster_job_log')
with open(cluster_job_log_path, 'r') as f:
cluster_job_log = json.load(f)
jobs = [Job(**job) for job in cluster_job_log]
print(jobs[16].attempts)
for i in range(len(jobs)):
if jobs[i].jobid == "application_1506638472019_10238":
print(i)
print(jobs[i].attempts)
print(jobs[i].jobid)
print(jobs[i].vc)
print(jobs[i].user)
print(jobs[i].submitted_time)
print(jobs[i].queueing_delay)
print(jobs[i].run_time)
print(type(jobs[i].num_gpus))
break
run_times = {}
for job in jobs:
num_gpus = job.num_gpus
bucket = get_bucket_from_num_gpus(num_gpus)
if bucket is None:
continue
if bucket not in run_times:
run_times[bucket] = []
run_time = job.run_time
if run_time is not None:
run_times[bucket].append(run_time)
buckets = sorted([bucket for bucket in run_times])
for bucket in buckets:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(run_times[bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.legend(loc='lower right')
plt.xscale('log')
plt.xlim(10 ** -1, 10 ** 4)
plt.ylim(0, 100)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
queueing_delays = {}
for job in jobs:
vc = job.vc
queueing_delays.setdefault(vc, {})
bucket = get_bucket_from_num_gpus(job.num_gpus)
if bucket is None:
continue
queueing_delays[vc].setdefault(bucket, [])
# NOTE: Each period between the job being placed on the queue
# and being scheduled on a machine is recorded as an individual
# queueing delay.
queueing_delay = 0.0
queue_time = job.submitted_time
for attempt in job.attempts:
start_time = attempt['start_time']
if queue_time is not None and start_time is not None:
queueing_delay = timedelta_to_minutes(start_time - queue_time)
queue_time = attempt['end_time']
queueing_delays[vc][bucket].append(queueing_delay)
for vc in queueing_delays:
for bucket in queueing_delays[vc]:
queueing_delays[vc][bucket] = filter(None, queueing_delays[vc][bucket])
?filter
from collections import Iterable
from collections import Iterator
import math
def is_sqr(x):
return math.sqrt(x) % 1 == 0
newlist = filter(is_sqr, range(1, 101))
print(isinstance(newlist, Iterable))
print(isinstance(newlist, Iterator))
print(newlist.__next__)
print(newlist.__iter__)
vcs = queueing_delays.keys()
for i, vc in enumerate(vcs):
for bucket in queueing_delays[vc]:
num_gpus, color, linestyle = get_plot_config_from_bucket(bucket)
x, y = get_cdf(queueing_delays[vc][bucket])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title('VC %s' % (vc))
plt.legend(loc='lower right')
plt.xscale('log')
plt.ylim(0, 100)
plt.xlim(10 ** -1, 10 ** 4)
plt.xlabel('Time (min)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(vcs) - 1:
plt.figure()
plt.show()
data = {}
for i, job in enumerate(jobs):
if len(job.attempts) == 0:
continue
num_gpus = job.num_gpus
if num_gpus < 5:
continue
bucket = get_bucket_from_num_gpus(num_gpus)
data.setdefault(bucket, {
'x': [],
'y': []
})
queueing_delay = job.queueing_delay # only job.attempts[0]['start_time'] - job._submitted_time
num_servers = len(job.attempts[0]['detail'])
data[bucket]['x'].append(queueing_delay)
data[bucket]['y'].append(num_servers)
for bucket in data:
num_gpus, _, _ = get_plot_config_from_bucket(bucket)
if bucket == 2:
marker = '+'
facecolors = 'black'
edgecolors = 'none'
else:
marker = 'o'
facecolors = 'none'
edgecolors = 'red'
plt.scatter(data[bucket]['x'], data[bucket]['y'], label='%s GPU' % (num_gpus),
marker=marker, facecolors=facecolors, edgecolors=edgecolors)
plt.legend()
plt.xscale('log')
plt.xlabel('Time (min)')
plt.ylabel('Num. Servers')
plt.show()
gpu_util_path = os.path.join(LOGDIR, 'cluster_gpu_util')
gpu_util = {}
with open(gpu_util_path, 'r') as f:
reader = csv.reader(f)
reader.__next__()
for row in reader:
time = row[0][:-4] # Remove the timezone
machineId = row[1]
gpu_util.setdefault(machineId, {})
gpu_util[machineId][time] = row[2:-1] # Ignore extra empty string at the end
def get_utilization_data(jobs, only_large_jobs=False, only_dedicated_servers=False):
"""Aggregates GPU utilization data for a set of jobs.
Args:
jobs: A list of Jobs.
only_large_jobs: If True, only considers jobs of size 8 or 16 GPUs.
Otherwise, considers jobs of size 1, 4, 8, or 16 GPUs.
only_dedicated_servers: If True, only considers jobs that use all GPUs
available on a server(s).
Returns:
A dict indexed by 1) job completion status, 2) number of GPUs requested
by the job, and 3) timestamp. The value of each nested dict is a list of
percentages indicating the utilization of each individual GPU on the
servers used by the job at the particular time requested.
"""
data = {}
for job in jobs:
num_gpus = job.num_gpus
if (len(job.attempts) == 0 or
(num_gpus != 1 and num_gpus != 4 and num_gpus != 8 and num_gpus != 16)):
continue
if only_large_jobs and num_gpus < 8:
continue
status = job.status
if status not in data:
data[status] = {}
if num_gpus not in data[status]:
data[status][num_gpus] = []
for attempt in job.attempts:
if only_dedicated_servers and len(attempt['detail']) > (num_gpus / 8):
continue
current_time = attempt['start_time']
if current_time is None or attempt['end_time'] is None:
continue
current_minute = round_to_nearest_minute(current_time)
while current_minute < attempt['end_time']:
current_minute_str = str(current_minute)
for detail in attempt['detail']:
machineId = detail['ip']
if current_minute_str in gpu_util[machineId]:
for gpu_id in detail['gpus']:
gpu_num = int(gpu_id[3:]) # Remove the 'gpu' prefix
try:
u = gpu_util[machineId][current_minute_str][gpu_num]
if u != 'NA':
data[status][num_gpus].append(float(u))
except Exception as e:
print(gpu_util[machineId][current_minute_str])
print(gpu_num)
raise ValueError(e)
current_minute = add_minute(current_minute)
return data
data = get_utilization_data(jobs)
statuses = data.keys()
for i, status in enumerate(statuses):
all_num_gpus = sorted(data[status].keys())
for num_gpus in all_num_gpus:
if num_gpus == 1:
color = 'green'
linestyle = '-'
elif num_gpus == 4:
color = 'blue'
linestyle = '-.'
elif num_gpus == 8:
color = 'red'
linestyle = '--'
elif num_gpus == 16:
color = 'cyan'
linestyle = ':'
x, y = get_cdf(data[status][num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color=color, linestyle=linestyle)
plt.title(status)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
if i < len(statuses) - 1:
plt.figure()
plt.show()
data = get_utilization_data(jobs, only_large_jobs=True, only_dedicated_servers=True)
aggregate_data = {}
for status in data:
for num_gpus in data[status]:
if num_gpus not in aggregate_data:
aggregate_data[num_gpus] = []
aggregate_data[num_gpus] += data[status][num_gpus]
all_num_gpus = sorted(aggregate_data.keys())
for num_gpus in all_num_gpus:
if num_gpus == 8:
linestyle = '-'
elif num_gpus == 16:
linestyle = '-.'
x, y = get_cdf(aggregate_data[num_gpus])
plt.plot(x, y, label='%s GPU' % (num_gpus), color='black', linestyle=linestyle)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.grid(alpha=.3, linestyle='--')
plt.show()
mem_util_path = os.path.join(LOGDIR, 'cluster_mem_util')
mem_util = []
with open(mem_util_path, 'r') as f:
reader = csv.reader(f)
reader.next()
for row in reader:
if row[2] == 'NA':
continue
mem_total = float(row[2])
mem_free = float(row[3])
if mem_total == 0:
continue
mem_util.append(100.0 * (mem_total - mem_free) / mem_total)
cpu_util_path = os.path.join(LOGDIR, 'cluster_cpu_util')
cpu_util = []
with open(cpu_util_path, 'r') as f:
reader = csv.reader(f)
reader.next()
for row in reader:
if row[2] == 'NA':
continue
cpu_util.append(float(row[2]))
x, y = get_cdf(cpu_util)
plt.plot(x, y, label='CPU', color='black', linestyle='-')
x, y = get_cdf(mem_util)
plt.plot(x, y, label='Memory', color='black', linestyle='-.')
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.legend(loc='lower right')
plt.xlabel('Utilization (%)')
plt.ylabel('CDF')
plt.show()
machine_list_path = os.path.join(LOGDIR, 'cluster_machine_list')
with open(machine_list_path, 'r') as f:
reader = csv.reader(f)
reader.__next__()
for row in reader:
if row[1] != '8:
print(row)
| 0.713332 | 0.778355 |
# Connections between MKs
## **MK = Member of the knesset (חבר כנסת)
Based on transcripts of the knesset committees.<br/>
The work was done in the 'public knowledge workshop' hackathon and won 3rd place prize.
```
import pandas as pd
import networkx as nx
import matplotlib
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
%matplotlib inline
plt.rcParams["figure.figsize"] = (15,15)
G = nx.read_gml('./Mk_connections_graph')
G.remove_node('האירק')
#Reverting the Mks names since hebrew is printed reversed :)
def reverse(some_list):
return [item[::-1] for item in some_list]
def print_reversed(dictionary):
return [(key[::-1] , value) for (key, value) in sorted(dictionary.items() ,key=lambda x: x[1] ,reverse=True)]
heads_of_committees = set(['מיקי זוהר', 'משה גפני', 'איתן כבל'
,'אבי דיכטר', 'יואב קיש', 'דוד ביטן', 'צחי הנגבי',
'דוד אמסלם', 'ניסן סלומינסקי', 'אברהם נגוסה', 'יעקב מרגי',
'אלי אלאלוף' ,'שלי יחימוביץ' ,'עאידה תומא סלימאן',
'קארין אלהרר', 'אורי מקלב', 'יצחק וקנין', 'איל בן ראובן'])
def without_committee_heads(mks_list):
return [(mk,d) for (mk,d) in mks_list if mk not in heads_of_committees_reversed]
pos = nx.spring_layout(G, k=1, scale=10000)
def draw_graph(G, arrows=False):
nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos, font_size=16)
weights = set([])
for n1,n2 in G.edges():
weight = G.get_edge_data(n1,n2)['weight']
weights.add(weight)
for weight in weights:
edges_with_weight = [edge for edge in G.edges() if weight == G.get_edge_data(*edge)['weight']]
nx.draw_networkx_edges(G, pos, edgelist=edges_with_weight, width=weight, arrows=arrows, edge_color='b', arrow_style='->')
draw_graph(G)
```
# Degree of node:
```
def reverse(some_list):
return [item[::-1] for item in some_list]
def print_reversed(dictionary):
dictionary = dict(dictionary)
return [(key[::-1] , value) for (key, value) in sorted(dictionary.items() ,key=lambda x: x[1] ,reverse=True)]
```
## The MKs with the highest total degree
```
print_reversed(G.degree())[:10]
```
### We can see that these are mostly the head of the committees, which makes sense.. <br/>Let's remove them
```
without_committee_heads(print_reversed(G.degree()))[:10]
```
## The MKs with the highest in degree
The mks that are spoken to the most
```
print_reversed(G.in_degree())[:10]
```
Without head of the committees
```
without_committee_heads(print_reversed(G.in_degree()))[:10]
```
## The MKs with the highest in degree
The mks that speaks to others the most
```
print_reversed(G.out_degree())[:10]
without_committee_heads(print_reversed(G.out_degree()))[:10]
```
## The MKs with the highest centrality
The most central mks
```
print_reversed(nx.degree_centrality(G))[:10]
without_committee_heads(print_reversed(nx.degree_centrality(G)))[:10]
```
# Who did I talk to/about
```
def draw_out_subgraph_of_node(some_node, arrows=False):
some_node = some_node[::-1]
neighbors = list(G.neighbors(some_node))
neighbors.append(some_node)
draw_graph(G.subgraph(neighbors), arrows)
draw_out_subgraph_of_node('ראש הממשלה בנימין נתניהו')
draw_out_subgraph_of_node('בנימין נתניהו')
```
Benjamin Netanyahu doesn't attend the Knesset committees and even if he do he only talks to Yuli Edelstein.
# Who talked about me
```
def draw_in_subgraph_of_node(some_node, arrows=False):
some_node = some_node[::-1]
neighbors = list(G.to_undirected().neighbors(some_node))
neighbors.append(some_node)
draw_graph(G.subgraph(neighbors), arrows)
draw_in_subgraph_of_node('בנימין נתניהו')
```
A lot of people are talking about Benjamin Netanyahu. (Even though the committees should be subject-based)
Just to name a few:
- Karin Elarar was head committee of state audit. Benjamin Netanyahu is suspected with fraud.
- Jamal Zhalaka is talking about Benjamin Netanyahu because he is not satisfied with the state of Israeli-Arabs.
|
github_jupyter
|
import pandas as pd
import networkx as nx
import matplotlib
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
%matplotlib inline
plt.rcParams["figure.figsize"] = (15,15)
G = nx.read_gml('./Mk_connections_graph')
G.remove_node('האירק')
#Reverting the Mks names since hebrew is printed reversed :)
def reverse(some_list):
return [item[::-1] for item in some_list]
def print_reversed(dictionary):
return [(key[::-1] , value) for (key, value) in sorted(dictionary.items() ,key=lambda x: x[1] ,reverse=True)]
heads_of_committees = set(['מיקי זוהר', 'משה גפני', 'איתן כבל'
,'אבי דיכטר', 'יואב קיש', 'דוד ביטן', 'צחי הנגבי',
'דוד אמסלם', 'ניסן סלומינסקי', 'אברהם נגוסה', 'יעקב מרגי',
'אלי אלאלוף' ,'שלי יחימוביץ' ,'עאידה תומא סלימאן',
'קארין אלהרר', 'אורי מקלב', 'יצחק וקנין', 'איל בן ראובן'])
def without_committee_heads(mks_list):
return [(mk,d) for (mk,d) in mks_list if mk not in heads_of_committees_reversed]
pos = nx.spring_layout(G, k=1, scale=10000)
def draw_graph(G, arrows=False):
nx.draw_networkx_nodes(G, pos)
nx.draw_networkx_labels(G, pos, font_size=16)
weights = set([])
for n1,n2 in G.edges():
weight = G.get_edge_data(n1,n2)['weight']
weights.add(weight)
for weight in weights:
edges_with_weight = [edge for edge in G.edges() if weight == G.get_edge_data(*edge)['weight']]
nx.draw_networkx_edges(G, pos, edgelist=edges_with_weight, width=weight, arrows=arrows, edge_color='b', arrow_style='->')
draw_graph(G)
def reverse(some_list):
return [item[::-1] for item in some_list]
def print_reversed(dictionary):
dictionary = dict(dictionary)
return [(key[::-1] , value) for (key, value) in sorted(dictionary.items() ,key=lambda x: x[1] ,reverse=True)]
print_reversed(G.degree())[:10]
without_committee_heads(print_reversed(G.degree()))[:10]
print_reversed(G.in_degree())[:10]
without_committee_heads(print_reversed(G.in_degree()))[:10]
print_reversed(G.out_degree())[:10]
without_committee_heads(print_reversed(G.out_degree()))[:10]
print_reversed(nx.degree_centrality(G))[:10]
without_committee_heads(print_reversed(nx.degree_centrality(G)))[:10]
def draw_out_subgraph_of_node(some_node, arrows=False):
some_node = some_node[::-1]
neighbors = list(G.neighbors(some_node))
neighbors.append(some_node)
draw_graph(G.subgraph(neighbors), arrows)
draw_out_subgraph_of_node('ראש הממשלה בנימין נתניהו')
draw_out_subgraph_of_node('בנימין נתניהו')
def draw_in_subgraph_of_node(some_node, arrows=False):
some_node = some_node[::-1]
neighbors = list(G.to_undirected().neighbors(some_node))
neighbors.append(some_node)
draw_graph(G.subgraph(neighbors), arrows)
draw_in_subgraph_of_node('בנימין נתניהו')
| 0.283682 | 0.875255 |
```
from __future__ import print_function
import os
import pandas as pd
%matplotlib inline
from matplotlib import pyplot as plt
#Set current dir and work relative to it
os.chdir('../Data Files')
#Read dataset into a pandas.DataFrame
beer_df = pd.read_csv('quarterly-beer-production-in-aus-March 1956-June 1994.csv')
beer_df.index = beer_df['Quarter']
#Display shape of the dataset
print('Shape of the dataframe:', beer_df.shape)
#Show top 10 rows
beer_df.head(10)
#Rename the 2nd column
beer_df.rename(columns={'Quarterly beer production in Australia: megalitres. March 1956 ? June 1994':
'Beer_Prod'
},
inplace=True
)
#Remove missing values
missing = (pd.isnull(beer_df['Quarter'])) | (pd.isnull(beer_df['Beer_Prod']))
print('Number of rows with at least one missing values:', missing.sum())
beer_df = beer_df.loc[~missing, :]
print('Shape after removing missing values:', beer_df.shape)
#In order to remove seasonal patterns let us calculate 2X4 quarter moving average
MA4 = beer_df['Beer_Prod'].rolling(window=4).mean()
TwoXMA4 = MA4.rolling(window=2).mean()
TwoXMA4 = TwoXMA4.loc[~pd.isnull(TwoXMA4)]
#Let's plot the original time series and the seasonal moving averages
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
beer_df['Beer_Prod'].plot(ax=ax, color='b', linestyle='-')
TwoXMA4.plot(ax=ax, color='r', linestyle='-')
plt.xticks(rotation=60)
ax.set_title('Quaterly Beer Production between in Australia and 2X4 quarter MA')
plt.savefig('plots/ch2/B07887_02_16.png', format='png', dpi=300)
#Let's compute the residuals after removing the trend
residuals = beer_df['Beer_Prod']-TwoXMA4
residuals = residuals.loc[~pd.isnull(residuals)]
#Let's plot the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
residuals.plot(ax=ax, color='b', linestyle='-')
plt.xticks(rotation=60)
ax.set_title('Residuals in Quaterly Beer Production time series')
plt.savefig('plots/ch2/B07887_02_17.png', format='png', dpi=300)
from pandas.plotting import autocorrelation_plot
#Let's plot the autocorrelation function of the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
autocorrelation_plot(residuals, ax=ax)
ax.set_title('ACF of Residuals in Quaterly Beer Production time series')
plt.savefig('plots/ch2/B07887_02_18.png', format='png', dpi=300)
autocorrelation_plot(residuals)
#Let's compute quarterly differecing to remove quaterly seasonality
residuals_qtr_diff = residuals.diff(4)
#Remove null values
residuals_qtr_diff = residuals_qtr_diff.loc[~pd.isnull(residuals_qtr_diff)]
#Let's plot the autocorrelation function of the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
autocorrelation_plot(residuals_qtr_diff, ax=ax)
ax.set_title('ACF of Quaterly Differenced Residuals')
plt.savefig('plots/ch2/B07887_02_19.png', format='png', dpi=300)
```
|
github_jupyter
|
from __future__ import print_function
import os
import pandas as pd
%matplotlib inline
from matplotlib import pyplot as plt
#Set current dir and work relative to it
os.chdir('../Data Files')
#Read dataset into a pandas.DataFrame
beer_df = pd.read_csv('quarterly-beer-production-in-aus-March 1956-June 1994.csv')
beer_df.index = beer_df['Quarter']
#Display shape of the dataset
print('Shape of the dataframe:', beer_df.shape)
#Show top 10 rows
beer_df.head(10)
#Rename the 2nd column
beer_df.rename(columns={'Quarterly beer production in Australia: megalitres. March 1956 ? June 1994':
'Beer_Prod'
},
inplace=True
)
#Remove missing values
missing = (pd.isnull(beer_df['Quarter'])) | (pd.isnull(beer_df['Beer_Prod']))
print('Number of rows with at least one missing values:', missing.sum())
beer_df = beer_df.loc[~missing, :]
print('Shape after removing missing values:', beer_df.shape)
#In order to remove seasonal patterns let us calculate 2X4 quarter moving average
MA4 = beer_df['Beer_Prod'].rolling(window=4).mean()
TwoXMA4 = MA4.rolling(window=2).mean()
TwoXMA4 = TwoXMA4.loc[~pd.isnull(TwoXMA4)]
#Let's plot the original time series and the seasonal moving averages
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
beer_df['Beer_Prod'].plot(ax=ax, color='b', linestyle='-')
TwoXMA4.plot(ax=ax, color='r', linestyle='-')
plt.xticks(rotation=60)
ax.set_title('Quaterly Beer Production between in Australia and 2X4 quarter MA')
plt.savefig('plots/ch2/B07887_02_16.png', format='png', dpi=300)
#Let's compute the residuals after removing the trend
residuals = beer_df['Beer_Prod']-TwoXMA4
residuals = residuals.loc[~pd.isnull(residuals)]
#Let's plot the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
residuals.plot(ax=ax, color='b', linestyle='-')
plt.xticks(rotation=60)
ax.set_title('Residuals in Quaterly Beer Production time series')
plt.savefig('plots/ch2/B07887_02_17.png', format='png', dpi=300)
from pandas.plotting import autocorrelation_plot
#Let's plot the autocorrelation function of the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
autocorrelation_plot(residuals, ax=ax)
ax.set_title('ACF of Residuals in Quaterly Beer Production time series')
plt.savefig('plots/ch2/B07887_02_18.png', format='png', dpi=300)
autocorrelation_plot(residuals)
#Let's compute quarterly differecing to remove quaterly seasonality
residuals_qtr_diff = residuals.diff(4)
#Remove null values
residuals_qtr_diff = residuals_qtr_diff.loc[~pd.isnull(residuals_qtr_diff)]
#Let's plot the autocorrelation function of the residuals
fig = plt.figure(figsize=(5.5, 5.5))
ax = fig.add_subplot(1,1,1)
autocorrelation_plot(residuals_qtr_diff, ax=ax)
ax.set_title('ACF of Quaterly Differenced Residuals')
plt.savefig('plots/ch2/B07887_02_19.png', format='png', dpi=300)
| 0.635222 | 0.65919 |
# <span style="color: blue">Laboratorio 1: Gráficos</span> <a class="tocSkip">
### <span style="color: green">Prof. Andrés Arguedas - XS-0300 - II 2020 con colaboraciones de Andrea Vargas</span> <a class="tocSkip">
*Este laboratorio es parte del curso XS-0300 Estadística para Biología I de la Escuela de Estadística de la Universidad de Costa Rica, y se presenta con una licencia [CC-BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).*
**ATENCIÓN:** Antes de entregar este laboratorio, asegúrese que todo corre como se espera. Para hacer esto, primero **reinice el kernel**, esto lo puede hacer seleccionado **Kernel** en el menú y luego **Restart**. Habiendo reiniciado el kernel, **corra todas las celdas**, esto lo puede hacer seleccionando **Cell** en el menú y luego **Run All**.
Por último, recuerde rellenar cualquier lugar donde diga `ESCRIBA SU CÓDIGO AQUÍ` o "ESCRIBA SU RESPUESTA AQUI", así como escribir su nombre y carné a continuación:
```
NOMBRE = ""
CARNE = ""
```
---
## Preparación para este laboratorio
El presente laboratorio tiene como objetivos de aprendizaje:
1. Saber que es un archivo `.csv`
2. Conocer el paquete `readr` y como cargar archivos `.csv` a R
3. Entender la utilidad y forma de utilizar los comandos `mutate()` y `rename()` del paquete `dplyr`
4. Discernir la diferencia en métodos y motivación entre hacer gráficos en R base y con `ggplot2`
5. Crear gráficos de dispersión y de barras utilizando las capacidades de R
En este laboratorio utilizaremos el conjunto de datos de `aves`, junto con los paquetes `ggplot2`, `dplyr` y `readr`. Ya todos estos paquetes están descargados y disponibles en el entorno virtual, al igual que el conjunto de datos, por lo que no es necesario que descarguen o consigan ningún paquete o conjunto de datos adicional, todos ya están disponibles.
---
## Conjunto de datos `aves`
El conjunto de datos de `aves`, contenido en el archivo [`aves.csv`](https://raw.githubusercontent.com/andresarguedas/LabXS0300-content/master/data/aves.csv), contiene una serie de observaciones realizadas de dos especies de aves en cuatro localidades. Para todas estas aves se documentaron las siguientes variables:
- `Localidad`: La localidad en el cual se hizo la observación: **Cerro de la Muerte**, **Irazú**, **Barva** o **Poás**
- `Especie`: La especie a la cual pertenece el individuo: *Chlorospingus pileatus* o *Catharus gracilirostris*
- `Largo pico`: El largo del pico, medido en cm
- `Ala`: El largo del ala, medido en cm
- `Cola`: El largo de la cola, medido en cm
- `Tarso`: El largo del tarso, medido en cm
- `Juanete`: El largo del juanete, medido en cm
- `Alto pico`: La altura del pico, medido en cm
- `Ancho pico`: El ancho del pico, medido en cm
- `Peso`: El peso del ave, medido en g
En total, se tienen 97 observaciones distintas, la mayoría con datos en todas las variables, los cuales se usarán a lo largo de este laboratorio y de algunos de los siguientes laboratorios. Esta es una foto de un individuo de cada una de las especies:
### *Chlorospingus pileatus* <a class="tocSkip">
<p><a href="https://commons.wikimedia.org/wiki/File:Sooty-capped_Bush-tanager.jpg#/media/Archivo:Sooty-capped_Bush-tanager.jpg"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/1/1d/Sooty-capped_Bush-tanager.jpg/1200px-Sooty-capped_Bush-tanager.jpg" alt="Sooty-capped Bush-tanager.jpg"></a><br>De <a rel="nofollow" class="external text" href="https://www.flickr.com/photos/7457894@N04">Jerry Oldenettel</a> - originalmente subida a <a href="//commons.wikimedia.org/wiki/Flickr" class="mw-redirect" title="Flickr">Flickr</a> como <a rel="nofollow" class="external text" href="https://www.flickr.com/photos/7457894@N04/2308450870">DSC_5075a.jpg</a>, <a href="https://creativecommons.org/licenses/by-sa/2.0" title="Creative Commons Attribution-Share Alike 2.0">CC BY-SA 2.0</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=3878999">Enlace</a></p>
### *Catharus gracilirostris* <a class="tocSkip">
<p><a href="https://commons.wikimedia.org/wiki/File:Black-billed_Nightingale-thrush.jpg#/media/Archivo:Black-billed_Nightingale-thrush.jpg"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/2/24/Black-billed_Nightingale-thrush.jpg/1200px-Black-billed_Nightingale-thrush.jpg" alt="Black-billed Nightingale-thrush.jpg"></a><br>De <a rel="nofollow" class="external text" href="https://www.flickr.com/photos/7457894@N04">Jerry Oldenettel</a> - originalmente subida a <a href="//commons.wikimedia.org/wiki/Flickr" class="mw-redirect" title="Flickr">Flickr</a> como <a rel="nofollow" class="external text" href="https://www.flickr.com/photos/7457894@N04/2307608219">DSC_5055a.jpg</a>, <a href="https://creativecommons.org/licenses/by-sa/2.0" title="Creative Commons Attribution-Share Alike 2.0">CC BY-SA 2.0</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=3882594">Enlace</a></p>
Ya sabemos en que consiste el conjunto de datos con el que trabajaremos, ¿pero cómo hacemos para cargarlo en R? Esto lo veremos en la próxima sección.
---
## Archivos `.csv` y el paquete `readr`
Hasta el momento, solamente hemos trabajo con datos en R que nosotros mismos hemos guardado en memoria mediante la función de asignación `<-`. Pero, ¿qué pasa si queremos cargar datos que ya hemos recolectado y tenemos guardados en la computadora? Aunque R por default tiene soporte para importar y leer datos en diversos formatos, hay muchos paquetes que ofrecen mejorías sobre las funciones básicas de importación de datos y agregan nuevos formatos soportados. En este laboratorio, nos vamos a enfocar principalmente en un tipo específico de archivos, llamados csv, y utilizando un paquete en específico `readr` para lograr cargarlos a R.
### ¿Qué es un archivo `.csv`?
Un archivo `.csv` es un tipo de archivo donde los datos se separan en columnas mediante comas (**C**omma **S**eparated **V**alues en inglés). Actualmente, un archivo csv no necesita estar separado por comas estrictamente, sino que puede estar separado por espacios, tabulaciones o punto y comas; lo importante es que haya alguna forma de delimitar las columnas unas de otras. La forma más sencilla de pensar en un archivo csv es como un archivo de Excel con solamente los datos separados en columnas y filas. Un ejemplo ilustrativo de un posible archivo csv se presenta a continuación:
```
x, y, z
1, 2, 3
5, 6, 7
```
Como se puede ver, por lo general la primera línea contiene los nombres de las variables, separados mediante comas, de forma que la computadora entiende que la primera columna se llama `x`, luego `y` y, por último, `z`. Como después de la `z` no hay una coma, la computadora entiende que ya no hay más columnas, por lo que tiene que pasar a la siguiente fila. En la siguiente fila el primer valor es un `1` antes de la coma, por lo que el primer valor de la primer columna (`x`) va a ser 1, y así con todas las demás columnas y filas del archivo. En conclusión, otra forma de ver el anterior archivo csv es como se presenta en la siguiente tabla:
|x|y|z|
|-|-|-|
|1|2|3|
|5|6|7|
Para que la computadora puede entender como leer los datos es muy importante siempre específicar como están divididos los datos, es decir, cual separador están usando.
### El paquete `readr`
Aunque R puede leer nativamente archivos csv mediante la función `read.csv`, el paquete [`readr`](https://readr.tidyverse.org), el cual es parte del `tidyverse`, contiene una serie de mejoras y facilidades para poder importar y trabajar con archivos csv, entre otros. Pueden consultar un cheat sheet con comandos de `readr`, en inglés, en el [siguiente enlace](https://rawgit.com/rstudio/cheatsheets/master/data-import.pdf).
La función que más usaremos del paquete `readr` es `read_csv`, que se puede utilizar de la siguiente forma:
```{r}
library(readr)
# Recuerden que siempre ocupamos cargar
# el paquete antes de usar una función de dicho paquete!
objeto <- read_csv("datos.csv")
```
En el ejemplo anterior, tenemos los siguientes elementos:
- `objeto` es el nombre en memoria que le asignaremos al conjunto de datos que estamos cargando desde la computadora
- `read_csv()` es la función que usaremos para leer los datos de la computadora a R
- `"datos.csv"` es el nombre del conjunto de datos en la computadora que queremos cargar a R
¡Y eso es todo! Hay otros argumentos que veremos más adelante que podemos usar para especificar el tipo de variables que estamos cargando en cada columna, o posibles valores faltantes, llamados `NA`'s, pero por ahora esto es todo lo que ocupamos saber para cargar conjuntos de datos a R.
### Cargando el conjunto de datos `aves`
Como se describió en la sección 2, el conjunto de datos de aves contiene una serie de mediciones de aves de dos especies: *Cholorospingus pileatus* y *Catharus gracilirostris*, y en cuatro lugares distintos: Cerro de la Muerte, Irazú, Barva y Poás. En el caso de los conjuntos de datos para los laboratorios, estos ya están disponibles en línea, de forma que no es necesario que los descarguen o que hagan ningún procedimiento adicional para que estos estén disponibles, sino que por default estarán en un fólder, llamado `data`. Por lo tanto, el conjunto de datos `aves.csv` estará disponible si lo llamamos como `data/aves.csv`. Por lo tanto, para cargar este conjunto de datos en R, con el nombre en memoria de `aves`, poedmos hacer lo siguiente:
```
library(readr)
aves <- read_csv("data/aves.csv")
```
Esto lo podemos hacer directamente en RStudio desde la interfaz gráfica utilizando el siguiente procedimiento:

Primero, es importante resaltar que ocupamos correr el comando `library(readr)` para poder cargar el paquete `readr` en R y utilizar la función `read_csv` para cargar los datos. **Recuerden que siempre ocupamos cargar los paquetes antes de poder utilizar las funciones que vienen en ellos.** Una vez que hayan corrido el bloque anterior, les debería salir un mensaje como el siguiente:
```{r}
Parsed with column specification:
cols(
Localidad = col_double(),
Especie = col_character(),
`Largo pico` = col_double(),
Ala = col_double(),
Cola = col_double(),
Tarso = col_double(),
Juanete = col_double(),
`Alto pico` = col_double(),
`Ancho pico` = col_double(),
Peso = col_double()
)
```
Esto se debe a que no estamos especificando el nivel de medición de cada variable, por lo que `readr` tiene que hacer suposiciones que, aunque por lo general son correctas, pueden estar equivocadas. De forma general, las especificaciones de las columnas más usuales son las siguientes:
- `col_logical()`: valores lógicos, es decir, `T`, `F`, `TRUE`, `FALSE`
- `col_double()`: cualquier tipo de valor numérico
- `col_integer()`: números enteros, es decir, sin decimales. Se prefiere utilizar `col_double()` en la mayoría de los casos
- `col_factor()`: variable factor con una serie fija de valores que puede tomar
- `col_character()`: la información se devuelve como un string a R
Como mencionamos anteriormente, `readr` busca "adivinar" el tipo de variable con base en ciertos lineamientos y lo más usual es que termine con variables tipo `col_double()` y `col_character()`. Aunque podemos especificar directamente el tipo de variable de cada columna a la función `read_csv`, por ahora vamos a trabajar con los defaults que nos tira. Veamos los datos de `aves` que acabamos de cargar en R:
```
aves
```
Como se puede ver, en la parte de arriba de la salida anterior nos dice *A spec_tbl_df: 97 x 10*. En este caso *spec_tbl_df* nos está diciendo que `aves` es un **data frame**, mientras que *97 x 10* nos indica que el conjunto de datos `aves` tiene **97 observaciones en 10 columnas**. La primera fila de la tabla anterior contiene los nombres de las variables, que son: `Localidad`, `Especie`, `Largo pico`, `Ala`, `Cola`, `Tarso`, `Juanete`, `Alto pico`, `Ancho pico` y `Peso`. Aquí nos encontramos con el primer problema con los datos que acabamos de importar: **los nombres de las variables no deberían tener espacios**. Esto se debe a que, al igual que con los nombres de las variables, R considera un espacio en un nombre como dos objetos distintos, por lo que ocupamos cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios.
Por otro lado, si vemos la segunda fila del resultado anterior, esta nos dice el tipo de variable con el que estamos trabajando; en este caso **`<dbl>`** para una numérica y **`<chr>`** para una de caracter. Aquí nos encontramos con el segundo problema con los datos que acabamos de importar: **las variables numéricas deben ser `<dbl>` mientras que las cualitativas, por lo general, deberían ser factores o `<fct>`.** Esto lo podemos solucionar cambiando tanto `Localidad` como `Especie` a factores, ya que todas las variables numéricas ya son numéricas en R.
Por último, el resultado también nos muestra *algunos* (léase no todos) los datos que están en `aves`. Esto nos puede ayudar para hacernos una idea general sobre el comportamiento de los datos pero es muy difícil llegar a conclusiones solamente viendo los datos sueltos, que es por lo que queremos hacer gráficos. Además hay algunos datos que aparecen con `NA`, como por ejemplo en la variable `Peso`, lo que significa que no se tiene información para esa variable para ese individuo. Por ahora, los gráficos que hacemos por default eliminan estos `NA`'s, o valores perdidos, pero en los próximos laboratorios veremos como trabajar con estos datos.
En resumen, tenemos dos problemas con el conjunto de datos `aves` que ocupamos solucionar:
- [ ] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [ ] Cambiar las variables `Localidad` y `Especie` para que sean factores
En la próxima sección veremos como solucionar estos problemas.
---
## Manipulando conjuntos de datos en R con `dplyr`
En el laboratorio anterior mencionamos el paquete `dplyr` junto con el operador pipe (`%>%`), pero hay mucho más que este paquete puede hacer para facilitarnos el trabajo de manipular y trabajar con conjuntos de datos en R. Por lo tanto, el objetivo de esta sección es presentar algunas de las funciones principales de `dplyr` que nos van a ayudar a lo largo del curso. Para empezar, vamos a cargar el paquete `dplyr` para poder utilizar todas las funciones que veremos a continuación:
```
library(dplyr)
```
### El comando `mutate()`
Cuando querramos modificar una columna, o crear columnas nuevas, en un conjunto de datos podemos usar el comando `mutate()` del paquete `dplyr` para poder lograrlo. La sintáxis para usar esta función es la siguiente:
`mutate(datos, Col_nueva = fun(Col))`
donde `datos` es el nombre del conjunto de datos que queremos modificar, `Col_nueva` es el nombre de la nueva columna que queremos crear, `fun()` es una función que queremos aplicar a la columna original para transformarla y `Col` es la columna original. De igual forma, podríamos modificar una misma columna cambiando `Col_nueva` por `Col`.
---
**Ejemplo:** Vamos a crear una nueva variable en el conjunto de datos `aves`, llamada `Peso_cuadrado` que sea el peso del ave elevado al cuadrado, usando el comando `mutate()`. Esto lo podemos hacer mediante el siguiente comando:
```
mutate(aves, Peso_cuadrado = Peso^2)
```
Como podemos ver, ahora tenemos una nueva variable, llamada `Peso_cuadrado` que contiene el peso de las aves, elevado al cuadrado.
---
Usando un pipe `%>%` podemos hacer el comando anterior de forma incluso más sencilla, utilizando la siguiente sintáxis:
`datos %>% mutate(Col_nueva = fun(Col))`
Como podemos ver, lo único que cambió es que ahora `datos` está a la izquierda del pipe y no dentro del `mutate()`, ya que el pipe lo está introduciendo ahí automáticamente.
---
**Ejemplo:** Podemos agregar la columna `Peso_cuadrado` de forma más sencilla usando un pipe:
```
aves %>% mutate(Peso_cuadrado = Peso^2)
```
---
Adicionalmente, podemos manipular varias columnas al mismo tiempo separando con una `,` dentro del comando `mutate()`, de la siguiente forma:
`datos %>% mutate(Col_nueva1 = fun1(Col1), Col_nueva2 = fun2(Col2), ...)`
Esto nos permite ahorrar tiempo y espacio, ya que podemos convertir todas las variables al mismo tiempo. Por lo tanto, si quiseramos convertir las variables de `Especie` y `Localidad` en factor, podríamos hacer lo siguiente:
```
aves %>% mutate(Especie = factor(Especie),
Localidad = factor(Localidad))
```
Aunque los datos parecen no haber cambiado, si nos fijamos debajo de **Localidad** y **Especie** en la tabla anterior, ahora estos presentan **`<fct>`** como debería ser, en lugar de **`<dbl>`** y **`<chr>`**. Por lo tanto, con la ayuda del comando `mutate()` pudimos solucionar uno de nuestros problemas:
- [ ] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [X] Cambiar las variables `Localidad` y `Especie` para que sean factores
### El comando `rename()`
Cuando querramos cambiar el nombre de alguna variable en un conjunto de datos, podemos hacer uso de la función `rename()` del paquete `dplyr`. La sintáxis de esta función es:
`rename(datos, Nuevo_nombre = Col)`
o usando pipes:
`datos %>% rename(Nuevo_nombre = Col)`
donde `datos` es el conjunto de datos, `Col` es la columna a la cual queremos cambiar el nombre y `Nuevo_nombre` es el nuevo nombre que le queremos poner a dicha columna. En este caso `rename()` no crea una nueva columna, sino que simplemente cambia el nombre de la columna, pero no toca los datos de fondo, como lo hace `mutate()`. De igual forma que `mutate()`, eso si, nos permite cambiar el nombre de múltiples columnas al mismo tiempo separándolas mediante `,`.
Por lo tanto, podemos cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` de nuestro conjunto de datos, usando `rename()`, de la siguiente forma:
```
aves %>% rename(Largo_pico = `Largo pico`,
Alto_pico = `Alto pico`,
Ancho_pico = `Ancho pico`)
```
Como podemos ver, ahora las columnas `Largo pico`, `Alto pico` y `Ancho pico` se llaman `Largo_pico`, `Alto_pico` y `Ancho_pico`, respectivamente. Por lo general, cuando se quieren quitar espacios de nombres de variables o de columnas, los sustituimos por `_` o `.`. Lo último por resaltar es que cuando una columna tiene un nombre con espacios, necesitamos escribirla rodeada por ``` `` ```, para que el espacio no sea problemático.
Por lo tanto, utilizando el comando `rename()` logramos solucionar uno de nuestros problemas:
- [X] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [X] Cambiar las variables `Localidad` y `Especie` para que sean factores
Verifiquemos que todo está como lo queremos en `aves`:
```
aves
```
**¿Qué pasó?** Parece que no hicimos ningún cambio en `aves`: las variables siguen teniendo espacios y `Localidad` y `Especie` siguen siendo factores como antes, ¿pero por qué? Recordemos que, como todo en R, si queremos guardar un resultado que obtuvimos, lo necesitamos guardar mediante el comando de asignación `<-` por lo que, todo lo que hicimos con `rename()` y `mutate()` no quedó guardado en R, pero hay una forma muy sencilla de solucionar esto:
```
aves <- aves %>% mutate(Especie = factor(Especie),
Localidad = factor(Localidad)) %>%
rename(Largo_pico = `Largo pico`,
Alto_pico = `Alto pico`,
Ancho_pico = `Ancho pico`)
```
Como podemos ver, es cuestión de agregar un `aves <-` antes de los comandos que hicimos y R guardará los cambios que hicimos sobre el conjunto de datos `aves`. Otra cosa importante a tomar en cuenta es que podemos anidar los pipes, de tal forma que primero aplicamos un `mutate()` y luego un `rename()` sin ningún problema. Ahora si, verifiquemos que los cambios se guardaron:
```
aves
```
Parece que ahora si se guardaron los cambios, y no tenemos ningún problema con el conjunto de datos `aves`, por lo que ya podemos empezar a hacer gráficos, que es a lo que nos dedicaremos en la próxima sección.
---
### Ejercicios (2pts.)
Con base en el conjunto de datos `aves` y utilizando las funciones vistas anteriormente, agregue dos nuevas columnas a `aves`, con las siguientes especificaciones:
- `razon`: La razón del tamaño del ala del ave con respecto a la longitud de su cola, es decir dividir el largo del ala entre el largo de la cola
- `tarso_mm`: El tamaño del tarso, medido en mm, es decir multiplicar el tamaño del tarso por 10
```
# ESCRIBA SU CÓDIGO AQUÍ
testthat::fail() # No respuesta - elimine esta línea si escribió una respuesta
```
Corra las siguientes celdas para ver su respuesta y verifique que sea correcta:
```
aves$razon
aves$tarso_mm
```
---
## Gráficos en R (base y ggplot)
Una de las principales utilidades y ventajas de R es que permite hacer representaciones gráficas de muy alta calidad y con un alto grado de personalización. R trae, en sus paquetes por default, un sistema de graficación denominado **base** o **R base**, pero además hay muchos paquetes que permiten realizar gráficos de distintas formas; nosotros nos enfocaremos en uno específicamente: [`ggplot2`](https://ggplot2.tidyverse.org), aunque paquetes como `lattice` también se pueden usar. En este laboratorio vamos a hacer gráficos tanto con R base como con ggplot para poder hacer comparaciones entre ambos métodos y determinar cual es más sencillo de utilizar en ciertos casos.
Siempre que utilizemos ggplot para hacer gráficos, ocupamos cargar el paquete `ggplot2`, o sino no podremos correr los comandos, por lo que vamos a proceder a cargar este paquete:
```
library(ggplot2)
```
### Gráficos de dispersión
Los gráficos de dispersión son muy usados y permiten ver la relación que existe entre dos variables numéricas, poniendo variables numéricas en los ejes *X* y *Y* y "pintando" los pares ordenados de cada observación para esas dos variables. Veamos un ejemplo donde comparamos `Ancho_pico` con `Alto_pico`.
#### R base
Hacer un gráfico de dispersión utilizando R base es cuestión de utilizar la función `plot()` de la siguiente forma:
`plot(datos$varx, datos$vary)`
donde `datos` es el conjunto de datos a utilizar, además `varx` y `vary` son variables numéricas de `datos`. Esto nos dará como resultado un gráfico de dispersión donde la variable `varx` estará en el eje *X* y la variable `vary` en el eje Y. Por lo tanto, se puede hacer el gráfico de dispersión de `Ancho_pico` con `Alto_pico` mediante el siguiente comando:
```
plot(aves$Ancho_pico, aves$Alto_pico)
```
¡Listo! Eso es todo lo que ocupamos hacer para hacer un gráfico de dispersión con R base. Hay otras opciones que podemos usar, las cuales veremos algunas a continuación:
- `pch`: un número que define el tipo de punto a utilizar. Por default es 1, pero 16 también es muy utilizado
- `xlab` y `ylab`: nombres del eje *X* y *Y*, respectivamente. Debemos proporcionarlo entre comillas `""`
- `main`: nombre del título. Debemos proporcionarlo entre comillas `""`
Todos estos argumentos se separan mediante `,` dentro del comando `plot()`. Por lo que podríamos refinar el gráfico de la siguiente forma:
```
plot(aves$Ancho_pico, aves$Alto_pico, pch = 16,
xlab = "Ancho pico", ylab = "Alto pico",
main = "Alto y ancho del pico de las aves")
```
#### `ggplot2`
Hacer un gráfico con ggplot toma más comandos que con R base pero también es bastante sencillo. Lo primero que ocupamos hacer es especificar cual variable estará en el eje *X* y cual en el eje *Y*, además del conjunto de datos que usaremos. Esto lo podemos hacer mediante la función `ggplot()` **(OJO: El paquete se llama `ggplot2` pero la función se llama `ggplot()`)**
`ggplot(datos, aes(x = varx, y = vary))`
donde `datos` es el conjunto de datos a utilizar, además `varx` y `vary` son variables numéricas de `datos`. En este caso, el primer argumento de `ggplot()` es el conjunto de datos que vamos a usar, mientras que dentro del argumento `aes()` especificamos todas las variables a utilizar, junto con otras características que veremos más adelante. Lo más importante a resaltar es que, si solamente corremos el comando presentado anteriormente, no tendremos el gráfico de dispersión, como si lo teníamos mediante `plot`, sino que simplemente crearemos un lienzo, como se ve a continuación:
```
ggplot(aves, aes(x = Ancho_pico, y = Alto_pico))
```
Podemos ver que en el eje *X* está `Ancho_pico` y en el eje *Y* está `Alto_pico`, ¿pero dónde están los puntos? Esto se debe a que la forma de pensar detrás de ggplot es distinta a la filosofía utiliza por R base: R base crea todo el gráfico de un solo, mediante un solo comando, mientras que ggplot va agregando las partes del gráfico que nosotros queremos, de forma ordenada. Esto tiene la ventaja que permite un mayor nivel de personalización y gráficos de mejor calidad, pero tiene la desventaja de que necesitamos saber, de antemano, que tipo de gráfico queremos hacer; **ggplot no puede adivinar lo que queremos hacer, a diferencia de R base, que aunque sea intenta adivinar.**
Por lo tanto, si queremos agregar nuevos elementos gráficos, ocupamos hacerlo agregando un `+` al final de cada línea si queremos agregar un elemento adicional. En nuestro caso, podemos agregar puntos a un gráfico mediante el comando `geom_point()`:
```
ggplot(aves, aes(x = Ancho_pico, y = Alto_pico)) +
geom_point()
```
*¡Voilá!* Tenemos un gráfico de dispersión igual que con `plot()`. Vean que ocupamos agregar un `+` después de `ggplot()`, ya que queremos agregar puntos al lienzo, pero no después de `geom_point()`, dado que no queremos agregar ningún otro elemento gráfico adicional.
Cabe destacar que tanto `ggplot2` como `dplyr` son parte del `tidyverse`, por lo que ambos tienen soporte por pipes, de forma que podemos hacer el gráfico anterior de forma más sencilla usando pipes:
```
aves %>% ggplot(aes(x = Ancho_pico, y = Alto_pico)) +
geom_point()
```
Ahora, para poder cambiar los nombres de los ejes podemos usar la función `labs()` que tiene como argumentos `title`, `x` y `y` los cuales, bastante explicitamente, cambian el título y los nombres de los ejes *X* y *Y*, respectivamente. Al igual que con `plot()` estos nombres deben ir entre comillas `""` y podemos añadir la función `labs()` a nuestro gráfico agregando un `+` después de `geom_point()`, como se muestra a continuación:
```
aves %>% ggplot(aes(x = Ancho_pico, y = Alto_pico)) +
geom_point() +
labs(title = "Alto y ancho del pico de las aves",
x = "Ancho pico", y = "Alto pico")
```
Como podemos ver, el gráfico resultante es, en fondo, el mismo tanto con R base como con ggplot, aunque la apariencia es un poco distinto. Esto va a suceder con todos los gráficos que hagamos en este laboratorio, solo que en algunos será más sencillo con un método que con otro.
---
#### Ejercicio (2 pts.)
a) Haga un gráfico de dispersión relacionando el largo de la cola y del tarso de las aves del conjunto de datos, colocando la variable `Cola` en el eje *X* y `Tarso` en el eje *Y*. **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
```
# ESCRIBA SU CÓDIGO AQUÍ
testthat::fail() # No respuesta - elimine esta línea si escribió una respuesta
```
b) ¿Qué puede concluir sobre la relación entre el largo de la cola y del tarso de las aves? ¿Hay algún valor extraño entre los datos?
ESCRIBA SU RESPUESTA AQUI
---
### Gráficos de dispersión por grupos
Una forma de poder agregar una tercer variable, categórica, a un gráfico de dispersión es mediante colores, de forma que coloreamos los puntos del gráfico dependiendo de la variable categórica a la que corresponden. Esto es, en el fondo, un gráfico de dispersión como los que vimos anteriormente, con la única diferencia de que vamos a agregar un nuevo argumento a las dos funciones anteriores. Por lo tanto, comparemos el alto y ancho del pico de las aves, separándolas según su especie:
#### R base
Para agregar color a los puntos, es cuestión de agregar el argumento `col` a la función `plot()`, especificando la variable de interés, y R lo agrega al gráfico:
#### `ggplot2`
Para agregar color a los puntos, es cuestión de agregar el argumento `col` dentro de `aes()`, especificando la variable de interés, y ggplot lo agrega al gráfico:
```
plot(aves$Ancho_pico, aves$Alto_pico, pch = 16,
col = aves$Especie,
xlab = "Ancho pico", ylab = "Alto pico",
main = "Alto y ancho del pico de las aves, separado según especie")
aves %>% ggplot(aes(x = Ancho_pico, y = Alto_pico,
col = Especie)) +
geom_point() +
labs(title = "Alto y ancho del pico de las aves",
x = "Ancho pico", y = "Alto pico")
```
Aquí podemos ver una de las ventajas de ggplot sobre R base: `ggplot()` crea automáticamente la leyenda, mientras que `plot()` no. De esta forma, viendo el gráfico de `plot()` no podemos saber a cual especie corresponden los puntos negros o rojos, mientras que con `ggplot()` nos dice explícitamente que los puntos <span style="color:red">rojos</span> son de <span style="color:red">*Catharus gracilirostris*</span> mientras que los <span style="color:lightblue">celeste</span> son de <span style="color:lightblue">*Chlorospingus pileatus*</span>.
Para poder agregar una leyenda en R base, necesitamos utilizar el comando `legend()`, de la siguiente forma:
```
plot(aves$Ancho_pico, aves$Alto_pico, pch = 16,
col = aves$Especie,
xlab = "Ancho pico", ylab = "Alto pico",
main = "Alto y ancho del pico de las aves, separado según especie")
legend("bottomright",
legend = c("Catharus gracilirostris", "Chlorospingus pileatus"),
col = c("black", "red"),
pch = 16,
bty = "n")
```
donde primero especificamos la posición de la leyenda en el gráfico, en este caso `"bottomright"` es decir abajo a la derecha; `legend` es el nombre de las categorías; `col` son los colores de los puntos; `pch` es el tipo de punto para cada categoría y `bty = "n"` le dice a R que no queremos hacer una caja alrededor de la leyenda.
Como se puede notar, con solamente agregar una leyenda, se torna mucho más sencillo utilizar ggplot, que lo hace de forma automática, que tener que agregarla explícitamente en R base, donde además necesitamos saber cual color corresponde a cual categoría, lo cual no siempre es algo trivial.
---
#### Ejercicio (2 pts.)
a) Vuelva a hacer el gráfico de dispersión entre `Ancho_pico` (eje *X*) y `Alto_pico` (eje *Y*) pero separado ahora por colores según el lugar dónde se hizo la observación (**Localidad**). **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
```
# ESCRIBA SU CÓDIGO AQUÍ
testthat::fail() # No respuesta - elimine esta línea si escribió una respuesta
```
b) ¿Parece haber alguna diferencia entre el ancho y alto del pico de las aves según la localidad? Con base en lo visto al comparar estas mismas características de las aves según la especie, ¿cuál de las dos variables, especie o localidad, parece presentar mayores diferencias con respecto al ancho y alto del pico?
ESCRIBA SU RESPUESTA AQUI
---
### Gráficos de barras
Los gráficos de barras se utilizan cuando tenemos una, o dos, variables categóricas, donde representamos los conteos para cada categoría. Por ejemplo, podríamos hacer un gráfico de barras para representar la cantidad de aves de cada una de las dos especies:
#### R base
En el caso de R base, para poder hacer un gráfico de barras, primero ocupamos crear un cuadro, o tabla, utilizando el comando `table()` de la variable que nos interesa, en este caso `Especie`:
```
table(aves$Especie)
```
Este comando lo que hace es devolver los conteos para cada una de las dos especies, es decir, hay 39 individuos de la especie *Catharus gracilirostris* y 58 de *Chlorospingus pileatus*. Con base en esta tabla, podemos hacer un gráfico de barras utilizando el comando `barplot()`, como se muestra a continuación:
```
barplot(table(aves$Especie))
```
En este gráfico, el eje *X* tiene las dos especies, mientras que el eje *Y* tiene los conteos, o frecuencias, de los individuos de cada una de dichas especies.
Noten además que, usando pipes, podemos escribir el comando anterior de una forma más sencilla de leer:
```
aves$Especie %>% table() %>% barplot()
```
Al igual que antes, podemos agregar nombres a los ejes y título al gráfico con las mismas opciones que `plot()`:
```
aves$Especie %>% table() %>%
barplot(xlab = "Especie", ylab = "Frecuencia",
main = "Cantidad de individuos de cada especie")
```
#### `ggplot2`
Para hacer un gráfico de barras en ggplot, es cuestión de especificar que la variable en el eje *X* es una variable categórica, además de agregar el argumento `geom_bar()` y podemos hacer un gráfico de barras, como se muestra a continuación:
```
aves %>% ggplot(aes(x = Especie)) +
geom_bar()
```
El resultado es el mismo a si usaramos `barplot()`, con la diferencia que no ocupamos llamar al comando `table()`, ya que ggplot de fondo está haciendo esos conteos por si mismo.
Al igual que antes, podemos agregar nombres a los ejes *X* y *Y*, junto con un título al gráfico, para tener ya algo presentable:
```
aves %>% ggplot(aes(x = Especie)) +
geom_bar() +
labs(y = "Frecuencia",
title = "Cantidad de individuos de cada especie") +
theme_bw()
```
Agregamos `theme_bw()` que transforma el lienzo para que se parezca más al de R base, agregando un fondo blanco y un borde al diagrama, pero esto no es necesario de agregar, es cuestión de gusto.
---
#### Ejercicio (2 pts.)
Sabiendo que las cuatro localidades donde se realizaron las observaciones son: **Cerro de la Muerte**, **Irazú**, **Barva** y **Poás**, respectivamente, realice lo siguiente:
a) Cree un gráfico de barras para las frecuencias de la variable `Localidad`, es decir, cada barra contiene la cantidad de aves observadas en cada una de las distintas localidades. **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
```
# ESCRIBA SU CÓDIGO AQUÍ
testthat::fail() # No respuesta - elimine esta línea si escribió una respuesta
```
b) ¿Parecen haber diferencias muy grandes entre la cantidad de aves de cada localidad? ¿En cuál lugar se observaron más aves? ¿En cuál lugar se observaron menos?
ESCRIBA SU RESPUESTA AQUI
---
## <span style="color: blue">Entrega del laboratorio</span> <a class="tocSkip">
Para hacer entrega de este laboratorio, debe descargarlo presionando en **File**, luego **Download as** y, por último, **Notebook (.ipynb)**, en el menú superior, debajo del logo de Jupyter. También puede descargarlo directamente desde el botón de **Download** en la barra con íconos para acceso rápido.
Luego de haber localizado el archivo descargado, debe cambiar el nombre del archivo descargado por su número de carné, de forma que, si su carné es **B12345**, su archivo debería llamarse `B12345.ipynb`. Una vez le haya cambiado el nombre al archivo, lo puede subir a Mediación Virtual, en el apartado correspondiente a este laboratorio.
<a href="https://creativecommons.org/licenses/by-sa/4.0/">
<img src="https://mirrors.creativecommons.org/presskit/buttons/88x31/png/by-sa.png" width="88" height="31" />
</a>
|
github_jupyter
|
NOMBRE = ""
CARNE = ""
x, y, z
1, 2, 3
5, 6, 7
En el ejemplo anterior, tenemos los siguientes elementos:
- `objeto` es el nombre en memoria que le asignaremos al conjunto de datos que estamos cargando desde la computadora
- `read_csv()` es la función que usaremos para leer los datos de la computadora a R
- `"datos.csv"` es el nombre del conjunto de datos en la computadora que queremos cargar a R
¡Y eso es todo! Hay otros argumentos que veremos más adelante que podemos usar para especificar el tipo de variables que estamos cargando en cada columna, o posibles valores faltantes, llamados `NA`'s, pero por ahora esto es todo lo que ocupamos saber para cargar conjuntos de datos a R.
### Cargando el conjunto de datos `aves`
Como se describió en la sección 2, el conjunto de datos de aves contiene una serie de mediciones de aves de dos especies: *Cholorospingus pileatus* y *Catharus gracilirostris*, y en cuatro lugares distintos: Cerro de la Muerte, Irazú, Barva y Poás. En el caso de los conjuntos de datos para los laboratorios, estos ya están disponibles en línea, de forma que no es necesario que los descarguen o que hagan ningún procedimiento adicional para que estos estén disponibles, sino que por default estarán en un fólder, llamado `data`. Por lo tanto, el conjunto de datos `aves.csv` estará disponible si lo llamamos como `data/aves.csv`. Por lo tanto, para cargar este conjunto de datos en R, con el nombre en memoria de `aves`, poedmos hacer lo siguiente:
Esto lo podemos hacer directamente en RStudio desde la interfaz gráfica utilizando el siguiente procedimiento:

Primero, es importante resaltar que ocupamos correr el comando `library(readr)` para poder cargar el paquete `readr` en R y utilizar la función `read_csv` para cargar los datos. **Recuerden que siempre ocupamos cargar los paquetes antes de poder utilizar las funciones que vienen en ellos.** Una vez que hayan corrido el bloque anterior, les debería salir un mensaje como el siguiente:
Esto se debe a que no estamos especificando el nivel de medición de cada variable, por lo que `readr` tiene que hacer suposiciones que, aunque por lo general son correctas, pueden estar equivocadas. De forma general, las especificaciones de las columnas más usuales son las siguientes:
- `col_logical()`: valores lógicos, es decir, `T`, `F`, `TRUE`, `FALSE`
- `col_double()`: cualquier tipo de valor numérico
- `col_integer()`: números enteros, es decir, sin decimales. Se prefiere utilizar `col_double()` en la mayoría de los casos
- `col_factor()`: variable factor con una serie fija de valores que puede tomar
- `col_character()`: la información se devuelve como un string a R
Como mencionamos anteriormente, `readr` busca "adivinar" el tipo de variable con base en ciertos lineamientos y lo más usual es que termine con variables tipo `col_double()` y `col_character()`. Aunque podemos especificar directamente el tipo de variable de cada columna a la función `read_csv`, por ahora vamos a trabajar con los defaults que nos tira. Veamos los datos de `aves` que acabamos de cargar en R:
Como se puede ver, en la parte de arriba de la salida anterior nos dice *A spec_tbl_df: 97 x 10*. En este caso *spec_tbl_df* nos está diciendo que `aves` es un **data frame**, mientras que *97 x 10* nos indica que el conjunto de datos `aves` tiene **97 observaciones en 10 columnas**. La primera fila de la tabla anterior contiene los nombres de las variables, que son: `Localidad`, `Especie`, `Largo pico`, `Ala`, `Cola`, `Tarso`, `Juanete`, `Alto pico`, `Ancho pico` y `Peso`. Aquí nos encontramos con el primer problema con los datos que acabamos de importar: **los nombres de las variables no deberían tener espacios**. Esto se debe a que, al igual que con los nombres de las variables, R considera un espacio en un nombre como dos objetos distintos, por lo que ocupamos cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios.
Por otro lado, si vemos la segunda fila del resultado anterior, esta nos dice el tipo de variable con el que estamos trabajando; en este caso **`<dbl>`** para una numérica y **`<chr>`** para una de caracter. Aquí nos encontramos con el segundo problema con los datos que acabamos de importar: **las variables numéricas deben ser `<dbl>` mientras que las cualitativas, por lo general, deberían ser factores o `<fct>`.** Esto lo podemos solucionar cambiando tanto `Localidad` como `Especie` a factores, ya que todas las variables numéricas ya son numéricas en R.
Por último, el resultado también nos muestra *algunos* (léase no todos) los datos que están en `aves`. Esto nos puede ayudar para hacernos una idea general sobre el comportamiento de los datos pero es muy difícil llegar a conclusiones solamente viendo los datos sueltos, que es por lo que queremos hacer gráficos. Además hay algunos datos que aparecen con `NA`, como por ejemplo en la variable `Peso`, lo que significa que no se tiene información para esa variable para ese individuo. Por ahora, los gráficos que hacemos por default eliminan estos `NA`'s, o valores perdidos, pero en los próximos laboratorios veremos como trabajar con estos datos.
En resumen, tenemos dos problemas con el conjunto de datos `aves` que ocupamos solucionar:
- [ ] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [ ] Cambiar las variables `Localidad` y `Especie` para que sean factores
En la próxima sección veremos como solucionar estos problemas.
---
## Manipulando conjuntos de datos en R con `dplyr`
En el laboratorio anterior mencionamos el paquete `dplyr` junto con el operador pipe (`%>%`), pero hay mucho más que este paquete puede hacer para facilitarnos el trabajo de manipular y trabajar con conjuntos de datos en R. Por lo tanto, el objetivo de esta sección es presentar algunas de las funciones principales de `dplyr` que nos van a ayudar a lo largo del curso. Para empezar, vamos a cargar el paquete `dplyr` para poder utilizar todas las funciones que veremos a continuación:
### El comando `mutate()`
Cuando querramos modificar una columna, o crear columnas nuevas, en un conjunto de datos podemos usar el comando `mutate()` del paquete `dplyr` para poder lograrlo. La sintáxis para usar esta función es la siguiente:
`mutate(datos, Col_nueva = fun(Col))`
donde `datos` es el nombre del conjunto de datos que queremos modificar, `Col_nueva` es el nombre de la nueva columna que queremos crear, `fun()` es una función que queremos aplicar a la columna original para transformarla y `Col` es la columna original. De igual forma, podríamos modificar una misma columna cambiando `Col_nueva` por `Col`.
---
**Ejemplo:** Vamos a crear una nueva variable en el conjunto de datos `aves`, llamada `Peso_cuadrado` que sea el peso del ave elevado al cuadrado, usando el comando `mutate()`. Esto lo podemos hacer mediante el siguiente comando:
Como podemos ver, ahora tenemos una nueva variable, llamada `Peso_cuadrado` que contiene el peso de las aves, elevado al cuadrado.
---
Usando un pipe `%>%` podemos hacer el comando anterior de forma incluso más sencilla, utilizando la siguiente sintáxis:
`datos %>% mutate(Col_nueva = fun(Col))`
Como podemos ver, lo único que cambió es que ahora `datos` está a la izquierda del pipe y no dentro del `mutate()`, ya que el pipe lo está introduciendo ahí automáticamente.
---
**Ejemplo:** Podemos agregar la columna `Peso_cuadrado` de forma más sencilla usando un pipe:
---
Adicionalmente, podemos manipular varias columnas al mismo tiempo separando con una `,` dentro del comando `mutate()`, de la siguiente forma:
`datos %>% mutate(Col_nueva1 = fun1(Col1), Col_nueva2 = fun2(Col2), ...)`
Esto nos permite ahorrar tiempo y espacio, ya que podemos convertir todas las variables al mismo tiempo. Por lo tanto, si quiseramos convertir las variables de `Especie` y `Localidad` en factor, podríamos hacer lo siguiente:
Aunque los datos parecen no haber cambiado, si nos fijamos debajo de **Localidad** y **Especie** en la tabla anterior, ahora estos presentan **`<fct>`** como debería ser, en lugar de **`<dbl>`** y **`<chr>`**. Por lo tanto, con la ayuda del comando `mutate()` pudimos solucionar uno de nuestros problemas:
- [ ] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [X] Cambiar las variables `Localidad` y `Especie` para que sean factores
### El comando `rename()`
Cuando querramos cambiar el nombre de alguna variable en un conjunto de datos, podemos hacer uso de la función `rename()` del paquete `dplyr`. La sintáxis de esta función es:
`rename(datos, Nuevo_nombre = Col)`
o usando pipes:
`datos %>% rename(Nuevo_nombre = Col)`
donde `datos` es el conjunto de datos, `Col` es la columna a la cual queremos cambiar el nombre y `Nuevo_nombre` es el nuevo nombre que le queremos poner a dicha columna. En este caso `rename()` no crea una nueva columna, sino que simplemente cambia el nombre de la columna, pero no toca los datos de fondo, como lo hace `mutate()`. De igual forma que `mutate()`, eso si, nos permite cambiar el nombre de múltiples columnas al mismo tiempo separándolas mediante `,`.
Por lo tanto, podemos cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` de nuestro conjunto de datos, usando `rename()`, de la siguiente forma:
Como podemos ver, ahora las columnas `Largo pico`, `Alto pico` y `Ancho pico` se llaman `Largo_pico`, `Alto_pico` y `Ancho_pico`, respectivamente. Por lo general, cuando se quieren quitar espacios de nombres de variables o de columnas, los sustituimos por `_` o `.`. Lo último por resaltar es que cuando una columna tiene un nombre con espacios, necesitamos escribirla rodeada por ``` `` ```, para que el espacio no sea problemático.
Por lo tanto, utilizando el comando `rename()` logramos solucionar uno de nuestros problemas:
- [X] Cambiar el nombre de las variables `Largo pico`, `Alto pico` y `Ancho pico` para que no tengan espacios
- [X] Cambiar las variables `Localidad` y `Especie` para que sean factores
Verifiquemos que todo está como lo queremos en `aves`:
**¿Qué pasó?** Parece que no hicimos ningún cambio en `aves`: las variables siguen teniendo espacios y `Localidad` y `Especie` siguen siendo factores como antes, ¿pero por qué? Recordemos que, como todo en R, si queremos guardar un resultado que obtuvimos, lo necesitamos guardar mediante el comando de asignación `<-` por lo que, todo lo que hicimos con `rename()` y `mutate()` no quedó guardado en R, pero hay una forma muy sencilla de solucionar esto:
Como podemos ver, es cuestión de agregar un `aves <-` antes de los comandos que hicimos y R guardará los cambios que hicimos sobre el conjunto de datos `aves`. Otra cosa importante a tomar en cuenta es que podemos anidar los pipes, de tal forma que primero aplicamos un `mutate()` y luego un `rename()` sin ningún problema. Ahora si, verifiquemos que los cambios se guardaron:
Parece que ahora si se guardaron los cambios, y no tenemos ningún problema con el conjunto de datos `aves`, por lo que ya podemos empezar a hacer gráficos, que es a lo que nos dedicaremos en la próxima sección.
---
### Ejercicios (2pts.)
Con base en el conjunto de datos `aves` y utilizando las funciones vistas anteriormente, agregue dos nuevas columnas a `aves`, con las siguientes especificaciones:
- `razon`: La razón del tamaño del ala del ave con respecto a la longitud de su cola, es decir dividir el largo del ala entre el largo de la cola
- `tarso_mm`: El tamaño del tarso, medido en mm, es decir multiplicar el tamaño del tarso por 10
Corra las siguientes celdas para ver su respuesta y verifique que sea correcta:
---
## Gráficos en R (base y ggplot)
Una de las principales utilidades y ventajas de R es que permite hacer representaciones gráficas de muy alta calidad y con un alto grado de personalización. R trae, en sus paquetes por default, un sistema de graficación denominado **base** o **R base**, pero además hay muchos paquetes que permiten realizar gráficos de distintas formas; nosotros nos enfocaremos en uno específicamente: [`ggplot2`](https://ggplot2.tidyverse.org), aunque paquetes como `lattice` también se pueden usar. En este laboratorio vamos a hacer gráficos tanto con R base como con ggplot para poder hacer comparaciones entre ambos métodos y determinar cual es más sencillo de utilizar en ciertos casos.
Siempre que utilizemos ggplot para hacer gráficos, ocupamos cargar el paquete `ggplot2`, o sino no podremos correr los comandos, por lo que vamos a proceder a cargar este paquete:
### Gráficos de dispersión
Los gráficos de dispersión son muy usados y permiten ver la relación que existe entre dos variables numéricas, poniendo variables numéricas en los ejes *X* y *Y* y "pintando" los pares ordenados de cada observación para esas dos variables. Veamos un ejemplo donde comparamos `Ancho_pico` con `Alto_pico`.
#### R base
Hacer un gráfico de dispersión utilizando R base es cuestión de utilizar la función `plot()` de la siguiente forma:
`plot(datos$varx, datos$vary)`
donde `datos` es el conjunto de datos a utilizar, además `varx` y `vary` son variables numéricas de `datos`. Esto nos dará como resultado un gráfico de dispersión donde la variable `varx` estará en el eje *X* y la variable `vary` en el eje Y. Por lo tanto, se puede hacer el gráfico de dispersión de `Ancho_pico` con `Alto_pico` mediante el siguiente comando:
¡Listo! Eso es todo lo que ocupamos hacer para hacer un gráfico de dispersión con R base. Hay otras opciones que podemos usar, las cuales veremos algunas a continuación:
- `pch`: un número que define el tipo de punto a utilizar. Por default es 1, pero 16 también es muy utilizado
- `xlab` y `ylab`: nombres del eje *X* y *Y*, respectivamente. Debemos proporcionarlo entre comillas `""`
- `main`: nombre del título. Debemos proporcionarlo entre comillas `""`
Todos estos argumentos se separan mediante `,` dentro del comando `plot()`. Por lo que podríamos refinar el gráfico de la siguiente forma:
#### `ggplot2`
Hacer un gráfico con ggplot toma más comandos que con R base pero también es bastante sencillo. Lo primero que ocupamos hacer es especificar cual variable estará en el eje *X* y cual en el eje *Y*, además del conjunto de datos que usaremos. Esto lo podemos hacer mediante la función `ggplot()` **(OJO: El paquete se llama `ggplot2` pero la función se llama `ggplot()`)**
`ggplot(datos, aes(x = varx, y = vary))`
donde `datos` es el conjunto de datos a utilizar, además `varx` y `vary` son variables numéricas de `datos`. En este caso, el primer argumento de `ggplot()` es el conjunto de datos que vamos a usar, mientras que dentro del argumento `aes()` especificamos todas las variables a utilizar, junto con otras características que veremos más adelante. Lo más importante a resaltar es que, si solamente corremos el comando presentado anteriormente, no tendremos el gráfico de dispersión, como si lo teníamos mediante `plot`, sino que simplemente crearemos un lienzo, como se ve a continuación:
Podemos ver que en el eje *X* está `Ancho_pico` y en el eje *Y* está `Alto_pico`, ¿pero dónde están los puntos? Esto se debe a que la forma de pensar detrás de ggplot es distinta a la filosofía utiliza por R base: R base crea todo el gráfico de un solo, mediante un solo comando, mientras que ggplot va agregando las partes del gráfico que nosotros queremos, de forma ordenada. Esto tiene la ventaja que permite un mayor nivel de personalización y gráficos de mejor calidad, pero tiene la desventaja de que necesitamos saber, de antemano, que tipo de gráfico queremos hacer; **ggplot no puede adivinar lo que queremos hacer, a diferencia de R base, que aunque sea intenta adivinar.**
Por lo tanto, si queremos agregar nuevos elementos gráficos, ocupamos hacerlo agregando un `+` al final de cada línea si queremos agregar un elemento adicional. En nuestro caso, podemos agregar puntos a un gráfico mediante el comando `geom_point()`:
*¡Voilá!* Tenemos un gráfico de dispersión igual que con `plot()`. Vean que ocupamos agregar un `+` después de `ggplot()`, ya que queremos agregar puntos al lienzo, pero no después de `geom_point()`, dado que no queremos agregar ningún otro elemento gráfico adicional.
Cabe destacar que tanto `ggplot2` como `dplyr` son parte del `tidyverse`, por lo que ambos tienen soporte por pipes, de forma que podemos hacer el gráfico anterior de forma más sencilla usando pipes:
Ahora, para poder cambiar los nombres de los ejes podemos usar la función `labs()` que tiene como argumentos `title`, `x` y `y` los cuales, bastante explicitamente, cambian el título y los nombres de los ejes *X* y *Y*, respectivamente. Al igual que con `plot()` estos nombres deben ir entre comillas `""` y podemos añadir la función `labs()` a nuestro gráfico agregando un `+` después de `geom_point()`, como se muestra a continuación:
Como podemos ver, el gráfico resultante es, en fondo, el mismo tanto con R base como con ggplot, aunque la apariencia es un poco distinto. Esto va a suceder con todos los gráficos que hagamos en este laboratorio, solo que en algunos será más sencillo con un método que con otro.
---
#### Ejercicio (2 pts.)
a) Haga un gráfico de dispersión relacionando el largo de la cola y del tarso de las aves del conjunto de datos, colocando la variable `Cola` en el eje *X* y `Tarso` en el eje *Y*. **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
b) ¿Qué puede concluir sobre la relación entre el largo de la cola y del tarso de las aves? ¿Hay algún valor extraño entre los datos?
ESCRIBA SU RESPUESTA AQUI
---
### Gráficos de dispersión por grupos
Una forma de poder agregar una tercer variable, categórica, a un gráfico de dispersión es mediante colores, de forma que coloreamos los puntos del gráfico dependiendo de la variable categórica a la que corresponden. Esto es, en el fondo, un gráfico de dispersión como los que vimos anteriormente, con la única diferencia de que vamos a agregar un nuevo argumento a las dos funciones anteriores. Por lo tanto, comparemos el alto y ancho del pico de las aves, separándolas según su especie:
#### R base
Para agregar color a los puntos, es cuestión de agregar el argumento `col` a la función `plot()`, especificando la variable de interés, y R lo agrega al gráfico:
#### `ggplot2`
Para agregar color a los puntos, es cuestión de agregar el argumento `col` dentro de `aes()`, especificando la variable de interés, y ggplot lo agrega al gráfico:
Aquí podemos ver una de las ventajas de ggplot sobre R base: `ggplot()` crea automáticamente la leyenda, mientras que `plot()` no. De esta forma, viendo el gráfico de `plot()` no podemos saber a cual especie corresponden los puntos negros o rojos, mientras que con `ggplot()` nos dice explícitamente que los puntos <span style="color:red">rojos</span> son de <span style="color:red">*Catharus gracilirostris*</span> mientras que los <span style="color:lightblue">celeste</span> son de <span style="color:lightblue">*Chlorospingus pileatus*</span>.
Para poder agregar una leyenda en R base, necesitamos utilizar el comando `legend()`, de la siguiente forma:
donde primero especificamos la posición de la leyenda en el gráfico, en este caso `"bottomright"` es decir abajo a la derecha; `legend` es el nombre de las categorías; `col` son los colores de los puntos; `pch` es el tipo de punto para cada categoría y `bty = "n"` le dice a R que no queremos hacer una caja alrededor de la leyenda.
Como se puede notar, con solamente agregar una leyenda, se torna mucho más sencillo utilizar ggplot, que lo hace de forma automática, que tener que agregarla explícitamente en R base, donde además necesitamos saber cual color corresponde a cual categoría, lo cual no siempre es algo trivial.
---
#### Ejercicio (2 pts.)
a) Vuelva a hacer el gráfico de dispersión entre `Ancho_pico` (eje *X*) y `Alto_pico` (eje *Y*) pero separado ahora por colores según el lugar dónde se hizo la observación (**Localidad**). **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
b) ¿Parece haber alguna diferencia entre el ancho y alto del pico de las aves según la localidad? Con base en lo visto al comparar estas mismas características de las aves según la especie, ¿cuál de las dos variables, especie o localidad, parece presentar mayores diferencias con respecto al ancho y alto del pico?
ESCRIBA SU RESPUESTA AQUI
---
### Gráficos de barras
Los gráficos de barras se utilizan cuando tenemos una, o dos, variables categóricas, donde representamos los conteos para cada categoría. Por ejemplo, podríamos hacer un gráfico de barras para representar la cantidad de aves de cada una de las dos especies:
#### R base
En el caso de R base, para poder hacer un gráfico de barras, primero ocupamos crear un cuadro, o tabla, utilizando el comando `table()` de la variable que nos interesa, en este caso `Especie`:
Este comando lo que hace es devolver los conteos para cada una de las dos especies, es decir, hay 39 individuos de la especie *Catharus gracilirostris* y 58 de *Chlorospingus pileatus*. Con base en esta tabla, podemos hacer un gráfico de barras utilizando el comando `barplot()`, como se muestra a continuación:
En este gráfico, el eje *X* tiene las dos especies, mientras que el eje *Y* tiene los conteos, o frecuencias, de los individuos de cada una de dichas especies.
Noten además que, usando pipes, podemos escribir el comando anterior de una forma más sencilla de leer:
Al igual que antes, podemos agregar nombres a los ejes y título al gráfico con las mismas opciones que `plot()`:
#### `ggplot2`
Para hacer un gráfico de barras en ggplot, es cuestión de especificar que la variable en el eje *X* es una variable categórica, además de agregar el argumento `geom_bar()` y podemos hacer un gráfico de barras, como se muestra a continuación:
El resultado es el mismo a si usaramos `barplot()`, con la diferencia que no ocupamos llamar al comando `table()`, ya que ggplot de fondo está haciendo esos conteos por si mismo.
Al igual que antes, podemos agregar nombres a los ejes *X* y *Y*, junto con un título al gráfico, para tener ya algo presentable:
Agregamos `theme_bw()` que transforma el lienzo para que se parezca más al de R base, agregando un fondo blanco y un borde al diagrama, pero esto no es necesario de agregar, es cuestión de gusto.
---
#### Ejercicio (2 pts.)
Sabiendo que las cuatro localidades donde se realizaron las observaciones son: **Cerro de la Muerte**, **Irazú**, **Barva** y **Poás**, respectivamente, realice lo siguiente:
a) Cree un gráfico de barras para las frecuencias de la variable `Localidad`, es decir, cada barra contiene la cantidad de aves observadas en cada una de las distintas localidades. **Recuerde los lineamientos mínimos requeridos para la presentación de gráficos.**
| 0.672869 | 0.960547 |
```
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(42)
from environments import EnvironmentModel1 as Environment
from environments import translate_action_model1 as translate_action
from environments import reward_scaler_model1 as reward_scaler
from environments import STATES_MU1, STATES_SIGMA1
from environments.model1 import scale_states
from agents.new_doubledqnagent import DDQNAgent as Agent
size_multiplier = 0.7
FIGSIZE = (14*size_multiplier, 8*size_multiplier)
EPISODES = 300
lr = 0.0005
agent = Agent(gamma=0.99, epsilon=0.0, alpha=lr, input_dims=5,
n_actions=4, mem_size=1000000, batch_size=64, epsilon_dec=0.9999,
epsilon_end=0.01, state_scaler_mu = STATES_MU1,
state_scaler_sigma = STATES_SIGMA1)
agent.load_model('models//ddqn_model')
def random_action():
act = np.random.randint(0, 4)
return [0, 25, 37, 45][act]
```
# Beta_L = 2
```
parameters = {
'beta_L' : 2,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
env = Environment(**parameters)
```
results_sim = list()
total_score_sim = list()
for episode in range(200):
if episode % 10 == 0:
print('Episode:', episode)
beta_L = 4
state = env.reset(parameters={'beta_L': beta_L})
done, G = False, 0
while done is False:
action = agent.choose_action(state)
_action = transform_action(action)
new_state, reward, done, info = env.step(_action)
scaled_reward = reward_scaler(reward, beta_L)
agent.epsilon = 0.0
state = new_state
G += scaled_reward
results_sim.append(create_return_array(state, _action, scaled_reward, episode))
total_score_sim.append(G)
if episode % 50 == 0:
plt.plot(total_score_sim)
plt.plot(pd.Series(total_score_sim).rolling(50).mean())
plt.show()
```
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
action = agent.choose_action(state)
_action = translate_action(action)
next_state, reward, done, _ = env.step(_action)
scaled_reward = (reward - 24 ) / 5
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
#print("episode: {}/{}, score: {}, e: {:.2}".format(e, EPISODES, np.mean(rewards_history), agent.epsilon))
break
df_DQAgent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
df_DQAgent_action = pd.DataFrame(action_history, columns=['actions', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = random_action()
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_random_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 0
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_0_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 37
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_37_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 45
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_45_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label='DQ Agent')
ax.plot(df_37_agent.drop('episode', axis=1).groupby('Q').mean(), label='37 hour Agent', ls='--')
ax.plot(df_45_agent.drop('episode', axis=1).groupby('Q').mean(), label='45 hour Agent', ls='--')
ax.plot(df_0_agent.drop('episode', axis=1).groupby('Q').mean(), label='0 hour Agent', ls='--')
ax.plot(df_random_agent.drop('episode', axis=1).groupby('Q').mean(), label='random Agent', ls='--')
ax.legend()
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN simulations (Beta_L = 2)')
f.savefig('..//figures//ddqn_model1_beta_2_solution_benchmark_paths')
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
compare_df = df_45_agent
agent_name = 'Double DQN Agent'
benchmark_name = 'benchmark (45 hour agent)'
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label=agent_name)
upper_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() + df_DQAgent.drop('episode', axis=1).groupby('Q').std()
lower_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() - df_DQAgent.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
ax.plot(compare_df.drop('episode', axis=1).groupby('Q').mean(), label='benchmark')
upper_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() + compare_df.drop('episode', axis=1).groupby('Q').std()
lower_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() - compare_df.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
plt.legend(loc='lower right')
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN vs 45 Agent (beta_L = 2)')
f.savefig('..//figures//ddqn_model1_beta_2_solution_benchmark_variance')
```
# Beta_L = 4
```
parameters = {
'beta_L' : 4,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
env = Environment(**parameters)
rewards_history, action_history = list(), list()
agent.epsilon=0.0
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
action = agent.choose_action(state)
_action = translate_action(action)
next_state, reward, done, _ = env.step(_action)
scaled_reward = (reward - 24 ) / 5
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
#print("episode: {}/{}, score: {}, e: {:.2}".format(e, EPISODES, np.mean(rewards_history), agent.epsilon))
break
df_DQAgent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
df_DQAgent_action = pd.DataFrame(action_history, columns=['actions', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = random_action()
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_random_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 0
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_0_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 37
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_37_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 45
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_45_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label='DQ Agent')
ax.plot(df_37_agent.drop('episode', axis=1).groupby('Q').mean(), label='37 hour Agent', ls='--')
ax.plot(df_45_agent.drop('episode', axis=1).groupby('Q').mean(), label='45 hour Agent', ls='--')
ax.plot(df_0_agent.drop('episode', axis=1).groupby('Q').mean(), label='0 hour Agent', ls='--')
ax.plot(df_random_agent.drop('episode', axis=1).groupby('Q').mean(), label='random Agent', ls='--')
ax.legend()
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN simulations (Beta_L = 4)')
f.savefig('..//figures//ddqn_model1_beta_4_solution_benchmark_paths')
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
compare_df = df_0_agent
agent_name = 'Double DQN Agent'
benchmark_name = 'benchmark (0 hour agent)'
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label=agent_name)
upper_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() + df_DQAgent.drop('episode', axis=1).groupby('Q').std()
lower_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() - df_DQAgent.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
ax.plot(compare_df.drop('episode', axis=1).groupby('Q').mean(), label='benchmark')
upper_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() + compare_df.drop('episode', axis=1).groupby('Q').std()
lower_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() - compare_df.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
plt.legend(loc='lower right')
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN vs 0 hours Agent (beta_L = 4)')
f.savefig('..//figures//ddqn_model1_beta_4_solution_benchmark_variance')
```
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(42)
from environments import EnvironmentModel1 as Environment
from environments import translate_action_model1 as translate_action
from environments import reward_scaler_model1 as reward_scaler
from environments import STATES_MU1, STATES_SIGMA1
from environments.model1 import scale_states
from agents.new_doubledqnagent import DDQNAgent as Agent
size_multiplier = 0.7
FIGSIZE = (14*size_multiplier, 8*size_multiplier)
EPISODES = 300
lr = 0.0005
agent = Agent(gamma=0.99, epsilon=0.0, alpha=lr, input_dims=5,
n_actions=4, mem_size=1000000, batch_size=64, epsilon_dec=0.9999,
epsilon_end=0.01, state_scaler_mu = STATES_MU1,
state_scaler_sigma = STATES_SIGMA1)
agent.load_model('models//ddqn_model')
def random_action():
act = np.random.randint(0, 4)
return [0, 25, 37, 45][act]
parameters = {
'beta_L' : 2,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
env = Environment(**parameters)
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
action = agent.choose_action(state)
_action = translate_action(action)
next_state, reward, done, _ = env.step(_action)
scaled_reward = (reward - 24 ) / 5
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
#print("episode: {}/{}, score: {}, e: {:.2}".format(e, EPISODES, np.mean(rewards_history), agent.epsilon))
break
df_DQAgent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
df_DQAgent_action = pd.DataFrame(action_history, columns=['actions', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = random_action()
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_random_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 0
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_0_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 37
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_37_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 45
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_45_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label='DQ Agent')
ax.plot(df_37_agent.drop('episode', axis=1).groupby('Q').mean(), label='37 hour Agent', ls='--')
ax.plot(df_45_agent.drop('episode', axis=1).groupby('Q').mean(), label='45 hour Agent', ls='--')
ax.plot(df_0_agent.drop('episode', axis=1).groupby('Q').mean(), label='0 hour Agent', ls='--')
ax.plot(df_random_agent.drop('episode', axis=1).groupby('Q').mean(), label='random Agent', ls='--')
ax.legend()
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN simulations (Beta_L = 2)')
f.savefig('..//figures//ddqn_model1_beta_2_solution_benchmark_paths')
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
compare_df = df_45_agent
agent_name = 'Double DQN Agent'
benchmark_name = 'benchmark (45 hour agent)'
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label=agent_name)
upper_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() + df_DQAgent.drop('episode', axis=1).groupby('Q').std()
lower_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() - df_DQAgent.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
ax.plot(compare_df.drop('episode', axis=1).groupby('Q').mean(), label='benchmark')
upper_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() + compare_df.drop('episode', axis=1).groupby('Q').std()
lower_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() - compare_df.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
plt.legend(loc='lower right')
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN vs 45 Agent (beta_L = 2)')
f.savefig('..//figures//ddqn_model1_beta_2_solution_benchmark_variance')
parameters = {
'beta_L' : 4,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
env = Environment(**parameters)
rewards_history, action_history = list(), list()
agent.epsilon=0.0
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
action = agent.choose_action(state)
_action = translate_action(action)
next_state, reward, done, _ = env.step(_action)
scaled_reward = (reward - 24 ) / 5
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
#print("episode: {}/{}, score: {}, e: {:.2}".format(e, EPISODES, np.mean(rewards_history), agent.epsilon))
break
df_DQAgent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
df_DQAgent_action = pd.DataFrame(action_history, columns=['actions', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = random_action()
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_random_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 0
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_0_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 37
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_37_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
rewards_history, action_history = list(), list()
for e in range(EPISODES):
env.reset()
state = env.states
for time in range(18, 500):
_action = 45
next_state, reward, done, _ = env.step(_action)
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
state = next_state
if done:
break
df_45_agent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label='DQ Agent')
ax.plot(df_37_agent.drop('episode', axis=1).groupby('Q').mean(), label='37 hour Agent', ls='--')
ax.plot(df_45_agent.drop('episode', axis=1).groupby('Q').mean(), label='45 hour Agent', ls='--')
ax.plot(df_0_agent.drop('episode', axis=1).groupby('Q').mean(), label='0 hour Agent', ls='--')
ax.plot(df_random_agent.drop('episode', axis=1).groupby('Q').mean(), label='random Agent', ls='--')
ax.legend()
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN simulations (Beta_L = 4)')
f.savefig('..//figures//ddqn_model1_beta_4_solution_benchmark_paths')
f, ax = plt.subplots(1, 1, figsize = FIGSIZE)
compare_df = df_0_agent
agent_name = 'Double DQN Agent'
benchmark_name = 'benchmark (0 hour agent)'
ax.plot(df_DQAgent.drop('episode', axis=1).groupby('Q').mean(), label=agent_name)
upper_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() + df_DQAgent.drop('episode', axis=1).groupby('Q').std()
lower_bound = df_DQAgent.drop('episode', axis=1).groupby('Q').mean() - df_DQAgent.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
ax.plot(compare_df.drop('episode', axis=1).groupby('Q').mean(), label='benchmark')
upper_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() + compare_df.drop('episode', axis=1).groupby('Q').std()
lower_bound = compare_df.drop('episode', axis=1).groupby('Q').mean() - compare_df.drop('episode', axis=1).groupby('Q').std()
ax.fill_between(range(18,61), upper_bound['rewards'], lower_bound["rewards"], alpha=0.3, label=benchmark_name)
plt.legend(loc='lower right')
ax.set_xlabel('age')
ax.set_ylabel('utility')
ax.set_title('Double DQN vs 0 hours Agent (beta_L = 4)')
f.savefig('..//figures//ddqn_model1_beta_4_solution_benchmark_variance')
| 0.393269 | 0.688822 |
## Configuration
_Initial steps to get the notebook ready to play nice with our repository. Do not delete this section._
Code formatting with [black](https://pypi.org/project/nb-black/).
```
%load_ext lab_black
import os
import pathlib
this_dir = pathlib.Path(os.path.abspath(""))
data_dir = this_dir / "data"
import pytz
import glob
import json
import requests
import pandas as pd
from datetime import datetime
```
## Download
Retrieve the page
```
url = "https://gis.maderacounty.com/server/rest/services/COVID19/MC_COVID19/MapServer/1/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=MCGIS.DBO.PH_COVID19_REPORTING_AREAS.AREA%2C+T_CASES%2C+DECEASED&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&having=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&historicMoment=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentOnly=false&datumTransformation=¶meterValues=&rangeValues=&quantizationParameters=&f=pjson"
r = requests.get(url)
data = r.json()
```
## Parse
```
dict_list = []
for item in data["features"]:
d = dict(
county="Madera",
area=item["attributes"]["MCGIS.DBO.PH_COVID19_REPORTING_AREAS.AREA"],
confirmed_cases=item["attributes"]["T_CASES"],
)
dict_list.append(d)
df = pd.DataFrame(dict_list)
```
Get timestamp
```
date_url = "https://gis.maderacounty.com/server/rest/services/COVID19/MC_COVID19/MapServer/2/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=DATE_UPDATED&returnGeometry=false&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&having=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&historicMoment=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentOnly=false&datumTransformation=¶meterValues=&rangeValues=&quantizationParameters=&f=pjson"
date_r = requests.get(date_url)
date_data = date_r.json()
date = date_data["features"][0]["attributes"]["DATE_UPDATED"]
latest_date = pd.to_datetime(date).date()
df["county_date"] = latest_date
```
## Vet
```
try:
assert not len(df) > 6
except AssertionError:
raise AssertionError("Madera's area scraper has extra rows")
try:
assert not len(df) < 6
except AssertionError:
raise AssertionError("Madera's area scraper is missing rows")
```
## Export
Set date
```
tz = pytz.timezone("America/Los_Angeles")
today = datetime.now(tz).date()
slug = "madera"
df.to_csv(data_dir / slug / f"{today}.csv", index=False)
```
## Combine
```
csv_list = [
i
for i in glob.glob(str(data_dir / slug / "*.csv"))
if not str(i).endswith("timeseries.csv")
]
df_list = []
for csv in csv_list:
if "manual" in csv:
df = pd.read_csv(csv, parse_dates=["date"])
else:
file_date = csv.split("/")[-1].replace(".csv", "")
df = pd.read_csv(csv, parse_dates=["county_date"])
df["date"] = file_date
df_list.append(df)
df = pd.concat(df_list).sort_values(["date", "area"])
df.to_csv(data_dir / slug / "timeseries.csv", index=False)
```
|
github_jupyter
|
%load_ext lab_black
import os
import pathlib
this_dir = pathlib.Path(os.path.abspath(""))
data_dir = this_dir / "data"
import pytz
import glob
import json
import requests
import pandas as pd
from datetime import datetime
url = "https://gis.maderacounty.com/server/rest/services/COVID19/MC_COVID19/MapServer/1/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=MCGIS.DBO.PH_COVID19_REPORTING_AREAS.AREA%2C+T_CASES%2C+DECEASED&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&having=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&historicMoment=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentOnly=false&datumTransformation=¶meterValues=&rangeValues=&quantizationParameters=&f=pjson"
r = requests.get(url)
data = r.json()
dict_list = []
for item in data["features"]:
d = dict(
county="Madera",
area=item["attributes"]["MCGIS.DBO.PH_COVID19_REPORTING_AREAS.AREA"],
confirmed_cases=item["attributes"]["T_CASES"],
)
dict_list.append(d)
df = pd.DataFrame(dict_list)
date_url = "https://gis.maderacounty.com/server/rest/services/COVID19/MC_COVID19/MapServer/2/query?where=1%3D1&text=&objectIds=&time=&geometry=&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=DATE_UPDATED&returnGeometry=false&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&having=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&historicMoment=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentOnly=false&datumTransformation=¶meterValues=&rangeValues=&quantizationParameters=&f=pjson"
date_r = requests.get(date_url)
date_data = date_r.json()
date = date_data["features"][0]["attributes"]["DATE_UPDATED"]
latest_date = pd.to_datetime(date).date()
df["county_date"] = latest_date
try:
assert not len(df) > 6
except AssertionError:
raise AssertionError("Madera's area scraper has extra rows")
try:
assert not len(df) < 6
except AssertionError:
raise AssertionError("Madera's area scraper is missing rows")
tz = pytz.timezone("America/Los_Angeles")
today = datetime.now(tz).date()
slug = "madera"
df.to_csv(data_dir / slug / f"{today}.csv", index=False)
csv_list = [
i
for i in glob.glob(str(data_dir / slug / "*.csv"))
if not str(i).endswith("timeseries.csv")
]
df_list = []
for csv in csv_list:
if "manual" in csv:
df = pd.read_csv(csv, parse_dates=["date"])
else:
file_date = csv.split("/")[-1].replace(".csv", "")
df = pd.read_csv(csv, parse_dates=["county_date"])
df["date"] = file_date
df_list.append(df)
df = pd.concat(df_list).sort_values(["date", "area"])
df.to_csv(data_dir / slug / "timeseries.csv", index=False)
| 0.212477 | 0.549761 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.